Esempio n. 1
0
def pos_create_ec(sender, instance, **kwargs):
    establishment = instance
    isave = False
    if 'establishment/0/' in establishment.img_logo.__str__():
        image = ImageUpload.objects.get(upload=establishment.img_logo)
        img_path = default_storage.location+'/'+establishment.img_logo.__str__()
        img_logo = File(open(img_path, 'r'))
        default_storage.delete(img_path)
        default_storage.save(img_path.replace('/0/', '/'+establishment.id.__str__()+'/'), img_logo)
        new_img = establishment.img_logo.__str__().replace('/0/', '/'+establishment.id.__str__()+'/')
        image.upload = new_img
        image.save()
        establishment.img_logo = new_img 
        isave = True    
    if 'establishment/0/' in establishment.img_vitrin.__str__():
        image = ImageUpload.objects.get(upload=establishment.img_vitrin)
        img_path = default_storage.location+'/'+establishment.img_vitrin.__str__()
        img_vitrin = File(open(img_path, 'r'))
        default_storage.delete(img_path)
        default_storage.save(img_path.replace('/0/', '/'+establishment.id.__str__()+'/'), img_vitrin)
        new_img = establishment.img_vitrin.__str__().replace('/0/', '/'+establishment.id.__str__()+'/')
        image.upload = new_img
        image.save()
        establishment.img_vitrin = new_img 
        isave = True
    if isave:
        establishment.save()
Esempio n. 2
0
def import_all():
	if import_dir_locked():
		return

	# Lock the import directory
	lock_import_dir()
	
	file_list = default_storage.listdir(settings.ARP_ROOT)[1]
	for file_name in file_list:
		# Expects filename like: arp-111101-0006.txt
		if file_name.find("arp-") < 0:
			continue
		runtime_str = file_name.lstrip(settings.ARP_ROOT)
		runtime_str = runtime_str.lstrip("arp-").rstrip(".txt")
		runtime = timezone.make_aware(datetime.strptime(runtime_str, "%y%m%d-%H%M"), timezone.get_current_timezone())
		full_path = settings.ARP_ROOT + file_name
		file = default_storage.open(full_path)	
		log_message("importing %s" % file_name)
		ImportLog.objects.create(file_name=file_name, success=False)
		import_file(file, runtime)
		default_storage.delete(full_path)
		log = ImportLog.objects.filter(file_name=file_name).order_by('created')[0]
		log.success = True
		log.save()
		
	# Unlock the import directory
	unlock_import_dir()
Esempio n. 3
0
def rmtree(prefix):
    dirs, files = storage.listdir(prefix)
    for fname in files:
        storage.delete(os.path.join(prefix, fname))
    for d in dirs:
        rmtree(os.path.join(prefix, d))
    storage.delete(prefix)
Esempio n. 4
0
def delete_category(sender, instance, **kwargs):
    category = instance
    try:            
        products = Product.objects.filter(category=category.id) 
        for pro in products:         
            if pro.parse_img_product:
                print pro.parse_img_product
                try:
                    parse.ImageParse(objectId=pro.parse_img_product).delete()
                except:
                    pass   
            try:
                parse.ProductParse(objectId=pro.parse_id).delete()
            except:
                pass 
            imagem = ImageUpload.objects.get(pk=pro.image.id)
            imagem.delete()
            img_path = default_storage.location+'/'+pro.img_product.__str__()
            default_storage.delete(img_path)
            pro.delete()     
    except:
        pass
    try:
        parse.ProductCategoryParse(objectId=category.parse_id).delete()
    except:
        pass  
Esempio n. 5
0
def thumbnail(file, size="200x200"):
    # defining the size
    x, y = [int(x) for x in size.split("x")]
    # defining the filename and the miniature filename
    filehead, filetail = os.path.split(file.name)
    basename, format = os.path.splitext(filetail)
    miniature = basename + "_" + size + format
    filename = file.name
    miniature_filename = os.path.join(filehead, miniature)
    filehead, filetail = os.path.split(file.url)
    miniature_url = filehead + "/" + miniature

    thumbnail_exist = False
    if default_storage.exists(miniature_filename):
        mt_filename = default_storage.modified_time(filename)
        mt_miniature_filename = default_storage.modified_time(miniature_filename)
        if mt_filename > mt_miniature_filename:
            # remove the miniature
            default_storage.delete(miniature_filename)
        else:
            thumbnail_exist = True

    # if the image wasn't already resized, resize it
    if not thumbnail_exist:
        # image = Image.open(filename)
        image = Image.open(default_storage.open(filename))
        image.thumbnail([x, y], Image.ANTIALIAS)

        f = default_storage.open(miniature_filename, "w")
        image.save(f, image.format, quality=90, optimize=1)
        f.close()

    return miniature_url
Esempio n. 6
0
 def download_external_thumbnail(self, override_thumbnail=False):
     """Try to download and save an external thumbnail."""
     if not self.external_thumbnail_url:
         return
     if self.thumbnail and not override_thumbnail:
         return
     from django.conf import settings
     max_retries = getattr(settings,
                           'DJVIDSCRAPER_MAX_DOWNLOAD_RETRIES',
                           3)
     if self.external_thumbnail_tries > max_retries:
         return
     try:
         final_path = download_thumbnail(self.external_thumbnail_url,
                                         self,
                                         'thumbnail')
     except Exception:
         self.external_thumbnail_tries += 1
         self.save()
     else:
         try:
             self.thumbnail = final_path
             self.save()
         except Exception:
             default_storage.delete(final_path)
Esempio n. 7
0
def convert(directory, delete=False):
    print 'Converting icons in %s' % directory

    pks = []
    k = 0
    for path, names, filenames in walk_storage(directory):
        for filename in filenames:
            old = os.path.join(path, filename)
            pre, ext = os.path.splitext(old)
            if (pre[-3:] in size_suffixes or ext not in extensions):
                continue

            if not storage.size(old):
                print 'Icon %s is empty, ignoring.' % old
                continue

            for size, size_suffix in zip(sizes, size_suffixes):
                new = '%s%s%s' % (pre, size_suffix, '.png')
                if os.path.exists(new):
                    continue
                resize_image(old, new, (size, size), remove_src=False)

            if ext != '.png':
                pks.append(os.path.basename(pre))

            if delete:
                storage.delete(old)

            k += 1
            if not k % 1000:
                print "... converted %s" % k

    for chunk in chunked(pks, 100):
        Webapp.objects.filter(pk__in=chunk).update(icon_type='image/png')
Esempio n. 8
0
    def test_filetracker_to_django_field(self):
        data = 'eloziom'
        path = 'my/path'
        abspath = '/' + path

        storage = default_storage
        try:
            self.assertEqual(storage.save(path, ContentFile(data)), path)

            model = TestFileModel()
            # File field is ignoring preferred name, as we can't copy file
            # in filetracker to another location
            with self.assertRaises(NotImplementedError):
                model.file_field.save('xx',
                        filetracker_to_django_file(abspath, storage))

            model.file_field = filetracker_to_django_file(abspath, storage)
            model.save()
            self.assertEqual(model.file_field.name, path)
            pk = model.pk

            # Here the model is removed from Django's cache, so the query
            # below actually hits the database.
            del model

            model = TestFileModel.objects.get(pk=pk)
            self.assertEqual(model.file_field.name, path)
            self.assertEqual(django_to_filetracker_path(model.file_field),
                                abspath)
            self.assertEqual(model.file_field.read(), data)
        finally:
            default_storage.delete(path)
Esempio n. 9
0
def cleanup_extracted_file():
    log.info('Removing extracted files for file viewer.')
    root = os.path.join(settings.TMP_PATH, 'file_viewer')
    # Local storage uses local time for file modification. S3 uses UTC time.
    now = datetime.utcnow if storage_is_remote() else datetime.now
    for path in storage.listdir(root)[0]:
        full = os.path.join(root, path)
        age = now() - storage.modified_time(os.path.join(full,
                                                         'manifest.webapp'))
        if age.total_seconds() > (60 * 60):
            log.debug('Removing extracted files: %s, %dsecs old.' %
                      (full, age.total_seconds()))
            for subroot, dirs, files in walk_storage(full):
                for f in files:
                    storage.delete(os.path.join(subroot, f))
            # Nuke out the file and diff caches when the file gets removed.
            id = os.path.basename(path)
            try:
                int(id)
            except ValueError:
                continue

            key = hashlib.md5()
            key.update(str(id))
            cache.delete('%s:memoize:%s:%s' % (settings.CACHE_PREFIX,
                                               'file-viewer', key.hexdigest()))
    def run_test(self, filename, content='Lorem ipsum dolar sit amet'):
        content = UnicodeContentFile(content)
        filename = default_storage.save(filename, content)
        self.assert_(default_storage.exists(filename))

        self.assertEqual(default_storage.size(filename), content.size)
        now = datetime.utcnow()
        delta = timedelta(minutes=5)
        mtime = default_storage.getmtime(filename)
        self.assert_(mtime > mktime((now - delta).timetuple()))
        self.assert_(mtime < mktime((now + delta).timetuple()))
        file = default_storage.open(filename)
        self.assertEqual(file.size, content.size)
        fileurl = force_unicode(file).replace('\\', '/')
        fileurl = urlquote_plus(fileurl, '/')
        if fileurl.startswith('/'):
            fileurl = fileurl[1:]
        self.assertEqual(
            MEDIA_URL+fileurl,
            default_storage.url(filename)
        )
        file.close()

        default_storage.delete(filename)
        self.assert_(not default_storage.exists(filename))
Esempio n. 11
0
    def from_upload(cls, upload, addon, platforms, send_signal=True):
        data = utils.parse_addon(upload, addon)
        try:
            license = addon.versions.latest().license_id
        except Version.DoesNotExist:
            license = None
        max_len = cls._meta.get_field_by_name('_developer_name')[0].max_length
        developer = data.get('developer_name', '')[:max_len]
        v = cls.objects.create(addon=addon, version=data['version'],
                               license_id=license, _developer_name=developer)
        log.info('New version: %r (%s) from %r' % (v, v.id, upload))

        AV = ApplicationsVersions
        for app in data.get('apps', []):
            AV(version=v, min=app.min, max=app.max,
               application_id=app.id).save()
        if addon.type == amo.ADDON_SEARCH:
            # Search extensions are always for all platforms.
            platforms = [Platform.objects.get(id=amo.PLATFORM_ALL.id)]
        else:
            platforms = cls._make_safe_platform_files(platforms)

        for platform in platforms:
            File.from_upload(upload, v, platform, parse_data=data)

        v.disable_old_files()
        # After the upload has been copied to all platforms, remove the upload.
        storage.delete(upload.path)
        if send_signal:
            version_uploaded.send(sender=v)

        return v
    def test_send_bad_image_format_cant_open(self):
        """
        Testing bad image fortmat
        Save Profile method open image and scale it
        """
        bad_file_path = settings.BASE_DIR + \
            settings.MEDIA_URL + 'test_file.doc'
        bad_file = open(bad_file_path, 'w+')
        form_data = {
            'id': 1,
            'name': 'ad2s',
            'last_name': 'admin',
            'bio': 'my bio',
            'date_of_birth': '1993-11-21',
            'email': '*****@*****.**',
            'jabber': '*****@*****.**',
            'skype': 'sgsfdf',
            'photo': bad_file  # only this bad field
        }
        self.client.login(username='******', password='******')
        response = self.client.post(reverse('hello:edit_profile'), form_data)
        self.assertIn("error", response.content)

        # delete file from app
        default_storage.delete(bad_file_path)
Esempio n. 13
0
def delete_pic(request):
    pic_instance = Test.objects.get(id=request.POST['id'])
    from django.core.files.storage import default_storage as ds
    pic_path = pic_instance.image.path
    ds.delete(pic_path)
    pic_instance.delete()
    return HttpResponse('OK')
Esempio n. 14
0
 def test_entry_feed_enclosure(self):
     entry = self.create_published_entry()
     feed = EntryFeed()
     self.assertEquals(
         feed.item_enclosure_url(entry), 'http://example.com/image.jpg')
     self.assertEquals(feed.item_enclosure_length(entry), '100000')
     self.assertEquals(feed.item_enclosure_mime_type(entry), 'image/jpeg')
     entry.content = 'My test content with image <img src="image.jpg" />'
     entry.save()
     self.assertEquals(
         feed.item_enclosure_url(entry), 'http://example.com/image.jpg')
     self.assertEquals(feed.item_enclosure_length(entry), '100000')
     self.assertEquals(feed.item_enclosure_mime_type(entry), 'image/jpeg')
     entry.content = 'My test content with image ' \
                     '<img src="http://test.com/image.jpg" />'
     entry.save()
     self.assertEquals(
         feed.item_enclosure_url(entry), 'http://test.com/image.jpg')
     self.assertEquals(feed.item_enclosure_length(entry), '100000')
     self.assertEquals(feed.item_enclosure_mime_type(entry), 'image/jpeg')
     path = default_storage.save('enclosure.png', ContentFile('Content'))
     entry.image = path
     entry.save()
     self.assertEquals(feed.item_enclosure_url(entry),
                       urljoin('http://example.com', entry.image.url))
     self.assertEquals(feed.item_enclosure_length(entry), '7')
     self.assertEquals(feed.item_enclosure_mime_type(entry), 'image/png')
     default_storage.delete(path)
     entry.image = 'invalid_image_without_extension'
     entry.save()
     self.assertEquals(feed.item_enclosure_url(entry),
                       urljoin('http://example.com', entry.image.url))
     self.assertEquals(feed.item_enclosure_length(entry), '100000')
     self.assertEquals(feed.item_enclosure_mime_type(entry), 'image/jpeg')
def put_thumbs(notify_buf, jpeg_dir, prefix, suffix, video_id, store_loc):
    # I wish Filesystem API worked the same for local and remote, but it don't
    if store_loc == 'local':
        root = getattr(settings, 'MEDIA_ROOT')
        store_path = root + "/" + prefix + "/" + suffix + "/videos/" + str(video_id) + "/jpegs"
        if default_storage.exists(store_path):
            infoLog(notify_buf, "Found prior directory, removing: %s" % store_path)
            dirRemove(store_path) 
        os.mkdir(store_path)
    else:
        store_path = prefix + "/" + suffix + "/videos/" + str(video_id) + "/jpegs"
        default_storage.delete(store_path)

    # not doing write to tmp and then mv because the file storage API limitation
    image_list = os.listdir(jpeg_dir)
    image_list.sort()
    for fname in image_list:
        infoLog(notify_buf, "Uploading: %s" % fname)
        local_file = open(jpeg_dir + "/" + fname, 'rb')
        store_file = default_storage.open(store_path + "/" + fname, 'wb')
        file_data = local_file.read();
        store_file.write(file_data)
        local_file.close()
        store_file.close()
    infoLog(notify_buf, "Uploaded: %s files" % str(len(image_list)))
Esempio n. 16
0
    def test_export_task(self):
        """Test exporting resources task."""
        resources = LearningResource.objects.all()

        result = export_resources.delay(
            resources, self.user.username).get()
        path = result['name']
        collision = result['collision']
        tempdir = mkdtemp()

        self.assertTrue(collision)

        # HACK: Have to patch in "seekable" attribute for python3 and tar
        # See: https://code.djangoproject.com/ticket/24963#ticket. Remove
        # when updating to Django 1.9
        def seekable():
            """Hacked seekable for django storage to work in python3."""
            return True
        try:
            resource_archive = default_storage.open(path)
            resource_archive.seekable = seekable
            Archive(resource_archive, ext='.tar.gz').extract(
                to_path=tempdir, method='safe'
            )
            assert_resource_directory(self, resources, tempdir)
        finally:
            rmtree(tempdir)
            default_storage.delete(path)
Esempio n. 17
0
def sign(version_id, reviewer=False):
    version = Version.objects.get(pk=version_id)
    app = version.addon
    log.info('Signing version: %s of app: %s' % (version_id, app))

    if not app.type == amo.ADDON_WEBAPP:
        log.error('Attempt to sign something other than an app.')
        raise SigningError('Not an app')

    if not app.is_packaged:
        log.error('Attempt to sign a non-packaged app.')
        raise SigningError('Not packaged')

    try:
        file_obj = version.all_files[0]
    except IndexError:
        log.error('Attempt to sign an app with no files in version.')
        raise SigningError('No file')

    path = (file_obj.signed_reviewer_file_path if reviewer else
            file_obj.signed_file_path)
    if storage.exists(path):
        log.info('Already signed app exists.')
        return path

    with statsd.timer('services.sign.app'):
        try:
            sign_app(file_obj.file_path, path, reviewer)
        except SigningError:
            if storage.exists(path):
                storage.delete(path)
            raise
    log.info('Signing complete.')
    return path
Esempio n. 18
0
    def delete(self):
        log.info(u'Version deleted: %r (%s)' % (self, self.id))
        mkt.log(mkt.LOG.DELETE_VERSION, self.addon, str(self.version))

        models.signals.pre_delete.send(sender=Version, instance=self)

        was_current = False
        if self == self.addon.current_version:
            was_current = True

        self.update(deleted=True)

        # Set file status to disabled.
        f = self.all_files[0]
        f.update(status=mkt.STATUS_DISABLED, _signal=False)
        f.hide_disabled_file()

        # If version deleted was the current version and there now exists
        # another current_version, we need to call some extra methods to update
        # various bits for packaged apps.
        if was_current and self.addon.current_version:
            self.addon.update_name_from_package_manifest()
            self.addon.update_supported_locales()

        if self.addon.is_packaged:
            # Unlink signed packages if packaged app.
            storage.delete(f.signed_file_path)
            log.info(u'Unlinked file: %s' % f.signed_file_path)
            storage.delete(f.signed_reviewer_file_path)
            log.info(u'Unlinked file: %s' % f.signed_reviewer_file_path)

        models.signals.post_delete.send(sender=Version, instance=self)
Esempio n. 19
0
def deleteThumb(filename):
    path = '%s/%s.jpg' % (THUMB_DIR, filename)
    if storage.exists(path):
        storage.delete(path)
        return True
    else:
        return False
Esempio n. 20
0
def file_cleanup(sender, **kwargs):
    """
    File cleanup callback used to emulate the old delete
    behavior using signals. Initially django deleted linked
    files when an object containing a File/ImageField was deleted.

    Usage:

    >>> from django.db.models.signals import post_delete

    >>> post_delete.connect(file_cleanup, sender=MyModel, dispatch_uid="mymodel.file_cleanup")
    """
    for fieldname in sender._meta.get_all_field_names():
        try:
            field = sender._meta.get_field(fieldname)
        except:
            field = None
        if field and isinstance(field, FileField):
            inst = kwargs['instance']
            f = getattr(inst, fieldname)
            m = inst.__class__._default_manager
            if hasattr(f, 'path') and os.path.exists(f.path) \
                and not m.filter(**{'%s__exact' % fieldname: getattr(inst, fieldname)})\
                .exclude(pk=inst._get_pk_val()):
                    try:
                        #os.remove(f.path)
                        default_storage.delete(f.path)
                    except:
                        pass
def resize(source, dest, width, height=None):
    """
    Resize an image to the given width/height, if no height is specificed it
    will be calculated.  Returns the new width and height.
    """
    source_file = default_storage.open(source)
    
    # open image with PIL
    img_obj = Image.open(source_file)
    
    # get a file-like object for the new image and its new dimensions
    img_buffer, img_width, img_height = _resize(img_obj, width, height)
    
    source_file.close()
    
    if source == dest:
        try:
            default_storage.delete(source)
        except IOError:
            pass
    
    # write out the new file
    dest_name = default_storage.save(dest, ContentFile(img_buffer.getvalue()))
    
    return dest_name, img_width, img_height
Esempio n. 22
0
def classes(request, offset):#不删除学生身份,只删除其课程
    try:
        if (not 'uid' in request.session) or (request.session['group']!='a'):
            return HttpResponseRedirect('/login/')
    except KeyError:
        return HttpResponseRedirect('/login/')
    hint = ''
    if 'box' in request.GET:
        boxlist = request.GET.getlist('box')
        for j in boxlist:
            para = int(j)
            t = AssignmentFile.objects.filter(sID = para)
            for k in t:
                default_storage.delete('/home/tunghsu/workspace/SAMS/media/'+str(AssignmentFile.objects.get(asfID=k.asfID).asFile))
            AssignmentFile.objects.filter(sID = para).delete()
            Student_Class_Relation.objects.filter(sID = para).delete()
        hint = 'Student of the class Deleted'
    #try:
    para = int (offset)
    #except:
        #URL错误
        #pass
    m = Student_Class_Relation.objects.filter(clID = para).order_by("sID")
    line = {}
    matrix = []
    for i in m:
        line['studentName'] = Student.objects.get(sID = i.sID).sName
        line['studentID'] = i.sID
        matrix.append(dict(line))
    return  render_to_response('class.html', {'title': offset+"班级学生查看页面", 'matrix':matrix,'hint':hint})
Esempio n. 23
0
def delete(request, id):
    if not request.user.is_authenticated():
        return HttpResponseForbidden(render_to_string('forbidden.html', context_instance=RequestContext(request)))

    result = redirect('management:debates_index')
    post = get_object_or_404(DiscussionPost, pk=id)
    if request.method == 'GET':
        result = render(request, "management/debates/delete.html", {'post' : post, 'section' : 'debates'})
    elif request.method == "POST":
        for c in post.get_comments():
            c.delete()
        trash = list()
        if len(post.attach) > 0:
            trash.append(post.attach_path())
        if len(post.attach_thumb) > 0:
            trash.append(post.attach_thumb_path())

        post.delete()
        for tfile in trash:
            if fs.exists(tfile):
                try:
                    fs.delete(tfile)
                except IOError as e:
                    log.error("Failed to delete file '%s'. %s" % (tfile, e.message))
    return result
Esempio n. 24
0
    def save(self):
        for name, field in self.fields.items():
            value = self.cleaned_data[name]

            if isinstance(value, UploadedFile):
                if isinstance(self.obj, Event):
                    fname = '%s/%s/%s.%s' % (
                        self.obj.organizer.slug, self.obj.slug, name, value.name.split('.')[-1]
                    )
                else:
                    fname = '%s/%s.%s' % (self.obj.slug, name, value.name.split('.')[-1])
                with default_storage.open(fname, 'wb+') as destination:
                    for chunk in value.chunks():
                        destination.write(chunk)
                value._name = fname
            elif isinstance(value, File):
                # file is unchanged
                continue
            elif isinstance(field, forms.FileField):
                # file is deleted
                fname = self.obj.settings.get(name, as_type=File)
                if fname:
                    try:
                        default_storage.delete(fname.name)
                    except OSError:
                        logger.error('Deleting file %s failed.' % fname.name)

            if value is None:
                del self.obj.settings[name]
            elif self.obj.settings.get(name, as_type=type(value)) != value:
                self.obj.settings.set(name, value)
    def handle_noargs(self, **options):
        # Clear all adjusted images that reference nonexistant
        # storage paths.
        self._delete_queryset(self._old_adjustments())

        # Clear all areas that reference nonexistant storage paths.
        self._delete_queryset(self._old_areas())

        # Clear all adjusted images that reference nonexistant adjustments.
        self._delete_queryset(self._missing_adjustments(),
                              'reference missing adjustments')

        # Clear all duplicate adjusted images.
        self._delete_queryset(self._duplicate_adjustments(),
                              reason='is a duplicate',
                              reason_plural='are duplicates')

        # Clean up files that aren't referenced by any adjusted images.
        orphans = self._orphaned_files()
        if not orphans:
            self.stdout.write("No orphaned files found.\n")
        else:
            self.stdout.write(
                "Deleting {0} orphaned file{1}... ".format(
                    len(orphans),
                    pluralize(len(orphans))))
            self.stdout.flush()
            for filepath in orphans:
                try:
                    default_storage.delete(filepath)
                except IOERRORS:
                    pass
            self.stdout.write("Done.\n")

        self.stdout.write("\n")
Esempio n. 26
0
    def obj_delete(self, bundle, **kwargs):
        if not hasattr(bundle.obj, "delete"):
            try:
                bundle.obj = self.obj_get(bundle=bundle, **kwargs)
            except ObjectDoesNotExist:
                raise NotFound("A model instance matching the provided arguments could not be found.")
        if bundle.request.user == bundle.obj.submitter or bundle.request.user.is_superuser:
            self.authorized_delete_detail(self.get_object_list(bundle.request), bundle)
            bundle.obj.delete()
            # delete the images
            default_storage.delete(bundle.obj.image.name)
            default_storage.delete(bundle.obj.thumbnail.name)

            # TODO:try += so i dont accendnetly over write the bundle data.
            # TODO: how to add message to normal tasypie responce instead of forcing it here.
            # Also, why is it not getting picked up by middleware, so i can gust use django messages.
            messages.success(bundle.request, "Delete was successfull.")
            mstore = messages.get_messages(bundle.request)
            for m in mstore:
                bundle.data["django_messages"] = [{"extra_tags": m.tags, "message": m, "level": m.level}]
            # using HttpGone in stead of HttpNoContent so success message can be displaied.
            raise ImmediateHttpResponse(self.create_response(bundle.request, bundle, response_class=HttpGone))
        else:
            bundle.data = {
                "django_messages": [
                    {
                        "extra_tags": "alert alert-error fade-out",
                        "message": "You can not delete other users pins.",
                        "level": 25,
                    }
                ]
            }
            raise ImmediateHttpResponse(self.create_response(bundle.request, bundle, response_class=HttpForbidden))
Esempio n. 27
0
 def test_upload(self):
     new_file = SimpleUploadedFile('new_file', 'content')
     response = self.c.post('/upload/',
                            {'file': new_file})
     self.assertEqual(response.status_code, 302)
     self.assertTrue(default_storage.exists('new_file'))
     default_storage.delete('new_file')
Esempio n. 28
0
def attach_delete(request):
    if not request.user.is_authenticated():
        return HttpResponse(get_json_response(code=403, message='Unauthorized'))

    try:
        if 'uid' in request.POST and len(request.POST['uid']) > 0:
            if not re.match('[a-zA-Z0-9]+', request.POST['uid']):
                raise ValueError(u'Передано некорректное имя файла')
            uid = request.POST['uid']
            if not uid in request.session:
                raise ValueError(u'Сессия завершена!')

            uploaded_file = request.session[uid][1]
            thumb_file = request.session[uid][2]

            del request.session[uid]

            if fs.exists(uploaded_file):
                try:
                    fs.delete(uploaded_file)
                except IOError as e:
                    log.error("Failed to delete file '%s'. %s" % (uploaded_file, e.message))
            if fs.exists(thumb_file):
                try:
                    fs.delete(thumb_file)
                except IOError as e:
                    log.error("Failed to delete file '%s'. %s" % (thumb_file, e.message))
    except ValueError as e:
        HttpResponse(get_json_response(code=400, message=e.message))
    return HttpResponse(get_json_response(code=200))
Esempio n. 29
0
def edit_avatar(request, group_slug):
    """Edit group avatar."""
    prof = get_object_or_404(GroupProfile, slug=group_slug)

    if not _user_can_edit(request.user, prof):
        raise PermissionDenied

    form = GroupAvatarForm(request.POST or None, request.FILES or None,
                           instance=prof)

    old_avatar_path = None

    if prof.avatar and default_storage.exists(prof.avatar.name):
        # Need to store the path, or else django's
        # form.is_valid() messes with it.
        old_avatar_path = prof.avatar.name

    if request.method == 'POST' and form.is_valid():
        # Upload new avatar and replace old one.
        if old_avatar_path:
            default_storage.delete(old_avatar_path)

        content = _create_image_thumbnail(form.instance.avatar.file,
                                          settings.AVATAR_SIZE, pad=True)
        # We want everything as .png
        name = form.instance.avatar.name + ".png"
        prof.avatar.save(name, content, save=True)
        return HttpResponseRedirect(prof.get_absolute_url())

    return render(request, 'groups/edit_avatar.html', {
        'form': form, 'profile': prof})
Esempio n. 30
0
def photo_size(request, id, size, crop=False, quality=90, download=False, constrain=False):
    """
    Renders image and returns response
    Does not use template
    Saves resized image within cache system
    Returns 404 if if image rendering fails
    """

    if isinstance(quality, unicode) and quality.isdigit():
        quality = int(quality)

    cache_key = generate_image_cache_key(file=id, size=size, pre_key=PHOTO_PRE_KEY, crop=crop, unique_key=id, quality=quality, constrain=constrain)
    cached_image = cache.get(cache_key)
    if cached_image:
        return redirect(cached_image)

    photo = get_object_or_404(Image, id=id)
    size = [int(s) for s in size.split('x')]
    size = aspect_ratio(photo.image_dimensions(), size, constrain)

    # check permissions
    if not has_perm(request.user, 'photos.view_image', photo):
        raise Http403

    attachment = ''
    if download:
        attachment = 'attachment;'


    if not photo.image or not default_storage.exists(photo.image.name):
        raise Http404

    # gets resized image from cache or rebuild
    image = get_image(photo.image, size, PHOTO_PRE_KEY, crop=crop, quality=quality, unique_key=str(photo.pk), constrain=constrain)

    # if image not rendered; quit
    if not image:
        raise Http404

    response = HttpResponse(mimetype='image/jpeg')
    response['Content-Disposition'] = '%s filename=%s' % (attachment, photo.image.file.name)
    image.save(response, "JPEG", quality=quality)

    if photo.is_public_photo() and photo.is_public_photoset():
        file_name = photo.image_filename()
        file_path = 'cached%s%s' % (request.path, file_name)
        default_storage.delete(file_path)
        default_storage.save(file_path, ContentFile(response.content))
        full_file_path = "%s%s" % (settings.MEDIA_URL, file_path)
        cache.set(cache_key, full_file_path)
        cache_group_key = "photos_cache_set.%s" % photo.pk
        cache_group_list = cache.get(cache_group_key)

        if cache_group_list is None:
            cache.set(cache_group_key, [cache_key])
        else:
            cache_group_list += [cache_key]
            cache.set(cache_group_key, cache_group_list)

    return response
Esempio n. 31
0
def import_new_contacts_file(account_key, group_key, file_name, file_path,
                             fields, has_header):
    api = VumiUserApi.from_config_sync(account_key, settings.VUMI_API_CONFIG)
    contact_store = api.contact_store
    group = contact_store.get_group(group_key)

    # Get the profile for this user so we can email them when the import
    # has been completed.
    user_profile = UserProfile.objects.get(user_account=account_key)

    written_contacts = []

    try:
        extension, parser = ContactFileParser.get_parser(file_name)

        contact_dictionaries = parser.parse_file(file_path, fields, has_header)
        for counter, contact_dictionary in enumerate(contact_dictionaries):

            # Make sure we set this group they're being uploaded in to
            contact_dictionary['groups'] = [group.key]

            contact = contact_store.new_contact(**contact_dictionary)
            written_contacts.append(contact)

        send_mail('Contact import completed successfully.',
                  render_to_string(
                      'contacts/import_completed_mail.txt', {
                          'count': counter,
                          'group': group,
                          'user': user_profile.user,
                      }),
                  settings.DEFAULT_FROM_EMAIL, [user_profile.user.email],
                  fail_silently=False)

    except Exception:
        # Clean up if something went wrong, either everything is written
        # or nothing is written
        for contact in written_contacts:
            contact.delete()

        exc_type, exc_value, exc_traceback = sys.exc_info()

        send_mail('Something went wrong while importing the contacts.',
                  render_to_string(
                      'contacts/import_failed_mail.txt', {
                          'user':
                          user_profile.user,
                          'group_key':
                          group_key,
                          'account_key':
                          account_key,
                          'file_name':
                          file_name,
                          'file_path':
                          file_path,
                          'fields':
                          fields,
                          'has_header':
                          has_header,
                          'exception_type':
                          exc_type,
                          'exception_value':
                          mark_safe(exc_value),
                          'exception_traceback':
                          mark_safe(traceback.format_tb(exc_traceback)),
                      }),
                  settings.DEFAULT_FROM_EMAIL, [
                      user_profile.user.email,
                      '*****@*****.**',
                  ],
                  fail_silently=False)
    finally:
        default_storage.delete(file_path)
Esempio n. 32
0
def delete_entry(title):
    filename = f"entries/{title}.md"
    default_storage.delete(filename)
Esempio n. 33
0
 def tearDown(self):
     if storage.exists(self.file.file_path):
         storage.delete(self.file.file_path)
     super(TestValidateFile, self).tearDown()
Esempio n. 34
0
    def from_upload(cls,
                    upload,
                    addon,
                    platforms,
                    channel,
                    source=None,
                    parsed_data=None):
        """
        Create a Version instance and corresponding File(s) from a
        FileUpload, an Addon, a list of platform ids, a channel id and the
        parsed_data generated by parse_addon().

        Note that it's the caller's responsability to ensure the file is valid.
        We can't check for that here because an admin may have overridden the
        validation results.
        """
        assert parsed_data is not None

        from olympia.addons.models import AddonFeatureCompatibility

        if addon.status == amo.STATUS_DISABLED:
            raise VersionCreateError(
                'Addon is Mozilla Disabled; no new versions are allowed.')

        license_id = None
        if channel == amo.RELEASE_CHANNEL_LISTED:
            previous_version = addon.find_latest_version(channel=channel,
                                                         exclude=())
            if previous_version and previous_version.license_id:
                license_id = previous_version.license_id
        version = cls.objects.create(
            addon=addon,
            version=parsed_data['version'],
            license_id=license_id,
            source=source,
            channel=channel,
        )
        log.info('New version: %r (%s) from %r' %
                 (version, version.id, upload))
        activity.log_create(amo.LOG.ADD_VERSION, version, addon)
        # Update the add-on e10s compatibility since we're creating a new
        # version that may change that.
        e10s_compatibility = parsed_data.get('e10s_compatibility')
        if e10s_compatibility is not None:
            feature_compatibility = (
                AddonFeatureCompatibility.objects.get_or_create(
                    addon=addon)[0])
            feature_compatibility.update(e10s=e10s_compatibility)

        compatible_apps = {}
        for app in parsed_data.get('apps', []):
            compatible_apps[app.appdata] = ApplicationsVersions(
                version=version, min=app.min, max=app.max, application=app.id)
            compatible_apps[app.appdata].save()

        # See #2828: sometimes when we generate the filename(s) below, in
        # File.from_upload(), cache-machine is confused and has trouble
        # fetching the ApplicationsVersions that were just created. To work
        # around this we pre-generate version.compatible_apps and avoid the
        # queries completely.
        version._compatible_apps = compatible_apps

        if addon.type in [amo.ADDON_SEARCH, amo.ADDON_STATICTHEME]:
            # Search extensions and static themes are always for all platforms.
            platforms = [amo.PLATFORM_ALL.id]
        else:
            platforms = cls._make_safe_platform_files(platforms)

        # Create as many files as we have platforms. Update the all_files
        # cached property on the Version while we're at it, because we might
        # need it afterwards.
        version.all_files = [
            File.from_upload(upload,
                             version,
                             platform,
                             parsed_data=parsed_data) for platform in platforms
        ]

        version.inherit_nomination(from_statuses=[amo.STATUS_AWAITING_REVIEW])
        version.disable_old_files()
        # After the upload has been copied to all platforms, remove the upload.
        storage.delete(upload.path)
        version_uploaded.send(sender=version)

        # Generate a preview and icon for listed static themes
        if (addon.type == amo.ADDON_STATICTHEME
                and channel == amo.RELEASE_CHANNEL_LISTED):
            dst_root = os.path.join(user_media_path('addons'), str(addon.id))
            theme_data = parsed_data.get('theme', {})
            version_root = os.path.join(dst_root, unicode(version.id))

            utils.extract_header_img(version.all_files[0].file_path,
                                     theme_data, version_root)
            generate_static_theme_preview(theme_data, version_root, version.pk)

        # Track the time it took from first upload through validation
        # (and whatever else) until a version was created.
        upload_start = utc_millesecs_from_epoch(upload.created)
        now = datetime.datetime.now()
        now_ts = utc_millesecs_from_epoch(now)
        upload_time = now_ts - upload_start

        log.info('Time for version {version} creation from upload: {delta}; '
                 'created={created}; now={now}'.format(delta=upload_time,
                                                       version=version,
                                                       created=upload.created,
                                                       now=now))
        statsd.timing('devhub.version_created_from_upload', upload_time)

        return version
Esempio n. 35
0
def remove_icons(destination):
    for size in ADDON_ICON_SIZES:
        filename = '%s-%s.png' % (destination, size)
        if storage.exists(filename):
            storage.delete(filename)
Esempio n. 36
0
    def from_upload(cls,
                    upload,
                    addon,
                    selected_apps,
                    channel,
                    parsed_data=None):
        """
        Create a Version instance and corresponding File(s) from a
        FileUpload, an Addon, a list of compatible app ids, a channel id and
        the parsed_data generated by parse_addon().

        Note that it's the caller's responsability to ensure the file is valid.
        We can't check for that here because an admin may have overridden the
        validation results.
        """
        from olympia.addons.models import AddonReviewerFlags
        from olympia.git.utils import create_git_extraction_entry

        assert parsed_data is not None

        if addon.status == amo.STATUS_DISABLED:
            raise VersionCreateError(
                'Addon is Mozilla Disabled; no new versions are allowed.')

        license_id = None
        if channel == amo.RELEASE_CHANNEL_LISTED:
            previous_version = addon.find_latest_version(channel=channel,
                                                         exclude=())
            if previous_version and previous_version.license_id:
                license_id = previous_version.license_id
        approval_notes = None
        if parsed_data.get('is_mozilla_signed_extension'):
            approval_notes = (u'This version has been signed with '
                              u'Mozilla internal certificate.')
        version = cls.objects.create(
            addon=addon,
            approval_notes=approval_notes,
            version=parsed_data['version'],
            license_id=license_id,
            channel=channel,
        )
        email = upload.user.email if upload.user and upload.user.email else ''
        with core.override_remote_addr(upload.ip_address):
            log.info('New version: %r (%s) from %r' %
                     (version, version.id, upload),
                     extra={
                         'email': email,
                         'guid': addon.guid,
                         'upload': upload.uuid.hex,
                         'user_id': upload.user_id,
                         'from_api': upload.source == amo.UPLOAD_SOURCE_API,
                     })
            activity.log_create(amo.LOG.ADD_VERSION,
                                version,
                                addon,
                                user=upload.user or get_task_user())

        if addon.type == amo.ADDON_STATICTHEME:
            # We don't let developers select apps for static themes
            selected_apps = [app.id for app in amo.APP_USAGE]

        compatible_apps = {}
        for app in parsed_data.get('apps', []):
            if app.id not in selected_apps:
                # If the user chose to explicitly deselect Firefox for Android
                # we're not creating the respective `ApplicationsVersions`
                # which will have this add-on then be listed only for
                # Firefox specifically.
                continue

            compatible_apps[app.appdata] = ApplicationsVersions(
                version=version, min=app.min, max=app.max, application=app.id)
            compatible_apps[app.appdata].save()

        # See #2828: sometimes when we generate the filename(s) below, in
        # File.from_upload(), cache-machine is confused and has trouble
        # fetching the ApplicationsVersions that were just created. To work
        # around this we pre-generate version.compatible_apps and avoid the
        # queries completely.
        version._compatible_apps = compatible_apps

        # For backwards compatibility. We removed specific platform
        # support during submission but we don't handle it any different
        # beyond that yet. That means, we're going to simply set it
        # to `PLATFORM_ALL` and also have the backend create separate
        # files for each platform. Cleaning that up is another step.
        # Given the timing on this, we don't care about updates to legacy
        # add-ons as well.
        # Create relevant file and update the all_files cached property on the
        # Version, because we might need it afterwards.
        version.all_files = [
            File.from_upload(upload=upload,
                             version=version,
                             platform=amo.PLATFORM_ALL.id,
                             parsed_data=parsed_data)
        ]

        version.inherit_nomination(from_statuses=[amo.STATUS_AWAITING_REVIEW])
        version.disable_old_files()

        # After the upload has been copied to all platforms, remove the upload.
        storage.delete(upload.path)
        upload.path = ''
        upload.save()

        version_uploaded.send(instance=version, sender=Version)

        if version.is_webextension:
            if (waffle.switch_is_active('enable-yara')
                    or waffle.switch_is_active('enable-customs')
                    or waffle.switch_is_active('enable-wat')):
                ScannerResult.objects.filter(upload_id=upload.id).update(
                    version=version)

        if waffle.switch_is_active('enable-uploads-commit-to-git-storage'):
            # Schedule this version for git extraction.
            transaction.on_commit(
                lambda: create_git_extraction_entry(version=version))

        # Generate a preview and icon for listed static themes
        if (addon.type == amo.ADDON_STATICTHEME
                and channel == amo.RELEASE_CHANNEL_LISTED):
            theme_data = parsed_data.get('theme', {})
            generate_static_theme_preview(theme_data, version.pk)

        # Reset add-on reviewer flags to disable auto-approval and require
        # admin code review if the package has already been signed by mozilla.
        reviewer_flags_defaults = {}
        is_mozilla_signed = parsed_data.get('is_mozilla_signed_extension')
        if upload.validation_timeout:
            reviewer_flags_defaults['needs_admin_code_review'] = True
        if is_mozilla_signed and addon.type != amo.ADDON_LPAPP:
            reviewer_flags_defaults['needs_admin_code_review'] = True
            reviewer_flags_defaults['auto_approval_disabled'] = True

        if reviewer_flags_defaults:
            AddonReviewerFlags.objects.update_or_create(
                addon=addon, defaults=reviewer_flags_defaults)

        # Authors need to be notified about auto-approval delay again since
        # they are submitting a new version.
        addon.reset_notified_about_auto_approval_delay()

        # Track the time it took from first upload through validation
        # (and whatever else) until a version was created.
        upload_start = utc_millesecs_from_epoch(upload.created)
        now = datetime.datetime.now()
        now_ts = utc_millesecs_from_epoch(now)
        upload_time = now_ts - upload_start

        log.info('Time for version {version} creation from upload: {delta}; '
                 'created={created}; now={now}'.format(delta=upload_time,
                                                       version=version,
                                                       created=upload.created,
                                                       now=now))
        statsd.timing('devhub.version_created_from_upload', upload_time)

        return version
Esempio n. 37
0
def delete(request):
    """
    Delete existing File/Directory.

    When trying to delete a Directory, the Directory has to be empty.
    """

    if request.method != "POST":
        return HttpResponseRedirect(reverse("fb_browse"))

    # QUERY / PATH CHECK
    query = request.GET
    path = get_path(query.get('dir', ''))
    filename = query.get('filename', '')
    if path is None or filename is None:
        if path is None:
            msg = _('The requested Folder does not exist.')
        else:
            msg = _('The requested File does not exist.')
        messages.add_message(request, messages.ERROR, msg)
        return HttpResponseRedirect(reverse("fb_browse"))
    abs_path = os.path.join(get_directory(), path)

    normalized = os.path.normpath(os.path.join(get_directory(), path,
                                               filename))

    if not normalized.startswith(
            get_directory().strip("/")) or ".." in normalized:
        msg = _("An error occurred")
        messages.add_message(request, messages.ERROR, msg)
    elif request.GET.get('filetype') != "Folder":
        relative_server_path = os.path.join(get_directory(), path, filename)
        try:
            # PRE DELETE SIGNAL
            filebrowser_pre_delete.send(sender=request,
                                        path=path,
                                        filename=filename)
            # DELETE FILE
            default_storage.delete(os.path.join(abs_path, filename))
            # POST DELETE SIGNAL
            filebrowser_post_delete.send(sender=request,
                                         path=path,
                                         filename=filename)
            # MESSAGE & REDIRECT
            msg = _('The file %s was successfully deleted.') % (
                filename.lower())
            messages.add_message(request, messages.SUCCESS, msg)
        except OSError:
            msg = _("An error occurred")
            messages.add_message(request, messages.ERROR, msg)
    else:
        try:
            # PRE DELETE SIGNAL
            filebrowser_pre_delete.send(sender=request,
                                        path=path,
                                        filename=filename)
            # DELETE FOLDER
            default_storage.rmtree(os.path.join(abs_path, filename))
            # POST DELETE SIGNAL
            filebrowser_post_delete.send(sender=request,
                                         path=path,
                                         filename=filename)
            # MESSAGE & REDIRECT
            msg = _('The folder %s was successfully deleted.') % (
                filename.lower())
            messages.add_message(request, messages.SUCCESS, msg)
        except OSError:
            msg = _("An error occurred")
            messages.add_message(request, messages.ERROR, msg)
    qs = query_helper(query, "", "filename,filetype")
    return HttpResponseRedirect(reverse("fb_browse") + qs)
Esempio n. 38
0
 def delete(self):
     try:
         default_storage.delete(self.sample_filename())
     except:
         pass
     models.Model.delete(self)
Esempio n. 39
0
def process_export(identifier, user_id):
    field_list = [
        'guid',
        'slug',
        'timezone',
        'headline',
        'summary',
        'body',
        'source',
        'first_name',
        'last_name',
        'phone',
        'fax',
        'email',
        'website',
        'release_dt',
        'syndicate',
        'featured',
        'design_notes',
        'tags',
        'enclosure_url',
        'enclosure_type',
        'enclosure_length',
        'not_official_content',
        'entity',
    ]

    identifier = identifier or int(ttime.time())
    file_name_temp = 'export/articles/%s_temp.csv' % (identifier)

    with default_storage.open(file_name_temp, 'wb') as csvfile:
        csv_writer = UnicodeWriter(csvfile, encoding='utf-8')
        csv_writer.writerow(field_list)

        articles = Article.objects.filter(status_detail='active')

        for article in articles:
            items_list = []
            for field_name in field_list:
                item = getattr(article, field_name)

                if isinstance(item, datetime):
                    item = item.strftime('%Y-%m-%d %H:%M:%S')
                elif isinstance(item, date):
                    item = item.strftime('%Y-%m-%d')
                elif isinstance(item, time):
                    item = item.strftime('%H:%M:%S')
                else:
                    item = escape_csv(item)
                items_list.append(item)
            csv_writer.writerow(items_list)

    # rename the file name
    file_name = 'export/articles/%s.csv' % identifier
    default_storage.save(file_name, default_storage.open(file_name_temp, 'rb'))

    # delete the temp file
    default_storage.delete(file_name_temp)

    # notify user that export is ready to download
    [user] = User.objects.filter(pk=user_id)[:1] or [None]
    if user and user.email:
        download_url = reverse('article.export_download', args=[identifier])

        site_url = get_setting('site', 'global', 'siteurl')
        site_display_name = get_setting('site', 'global', 'sitedisplayname')
        parms = {
            'download_url': download_url,
            'user': user,
            'site_url': site_url,
            'site_display_name': site_display_name,
            'date_today': datetime.now()
        }

        subject = render_to_string(
            template_name='articles/notices/export_ready_subject.html',
            context=parms)
        subject = subject.strip('\n').strip('\r')

        body = render_to_string(
            template_name='articles/notices/export_ready_body.html',
            context=parms)

        email = Email(recipient=user.email, subject=subject, body=body)
        email.send()
Esempio n. 40
0
        except Exception, e:
            errors.append((key, str(e)))

    email = render_to_string(
        'contacts/import_upload_is_truth_completed_mail.txt', {
            'count': counter,
            'errors': errors,
            'group': group,
            'user': user_profile.user,
        })

    send_mail('Contact import completed.',
              email,
              settings.DEFAULT_FROM_EMAIL, [user_profile.user.email],
              fail_silently=False)
    default_storage.delete(file_path)


@task(ignore_result=True)
def import_upload_is_truth_contacts_file(account_key, group_key, file_name,
                                         file_path, fields, has_header):
    def merge_operation(contact, contact_dictionary):
        # NOTE:     The order here is important, the new extra is
        #           the truth which we want to maintain
        new_extra = {}
        new_extra.update(dict(contact.extra))
        new_extra.update(contact_dictionary.pop('extra', {}))

        new_subscription = {}
        new_subscription.update(dict(contact.subscription))
        new_subscription.update(contact_dictionary.pop('subscription', {}))
Esempio n. 41
0
    def __init__(self, file_path, parse_netstream=False, obj=None):
        self.replay = Replay(file_path)
        self.replay_id = self.replay.header['Id']

        self.actor_metadata = {}
        self.goal_metadata = {}
        self.match_metadata = {}
        self.team_metadata = {}
        self.actors = {}
        self.cars = {}
        self.boost_data = {}
        self.heatmap_json_filename = None

        assert len(self.team_metadata) == 0

        pickle_filename = 'uploads/pickle_files/{}.pickle'.format(
            self.replay_id)
        heatmap_json_filename = 'uploads/replay_json_files/{}.json'.format(
            self.replay_id)
        location_json_filename = 'uploads/replay_location_json_files/{}.json'.format(
            self.replay_id)

        if parse_netstream:
            try:
                self.replay = pickle.loads(
                    default_storage.open(pickle_filename).read())
            except (FileNotFoundError, OSError, S3ResponseError):
                try:
                    self.replay.parse_netstream()
                    default_storage.save(
                        pickle_filename,
                        ContentFile(pickle.dumps(self.replay)))
                except FrameParsingError:
                    # Bail us out of here early, just provide an 'old school' parse.
                    parse_netstream = False
                    traceback.print_exc()

        if not parse_netstream:
            return

        self._get_actors()

        # If the number of goals in the header doesn't match the number of goals
        # in the game, try to get the missing goal data from the netstream.
        """
         ('3e_Team1',
          {'actor_id': 3,
           'actor_type': 'Archetypes.Teams.Team1',
           'data': {'Engine.TeamInfo:Score': 1},
           'new': False,
           'startpos': 2053839}),
               """
        if len(self.replay.header.get('Goals', [])) < self.replay.header.get(
                'Team0Score', 0) + self.replay.header.get('Team1Score', 0):
            for index, frame in self.replay.netstream.items():
                for _, actor in frame.actors.items():
                    if 'data' not in actor:
                        continue

                    if ('Engine.TeamInfo:Score' in actor['data']
                            and 'TAGame.Team_TA:GameEvent' not in actor['data']
                            and actor['actor_type'].startswith(
                                'Archetypes.Teams.Team')):
                        if 'Goals' not in self.replay.header:
                            self.replay.header['Goals'] = []

                        self.replay.header['Goals'].append({
                            'PlayerName':
                            'Unknown player (own goal?)',
                            'PlayerTeam':
                            actor['actor_type'].replace(
                                'Archetypes.Teams.Team', ''),
                            'frame':
                            index
                        })

        # Extract the goal information.
        if 'Goals' in self.replay.header:
            for goal in self.replay.header['Goals']:
                self._extract_goal_data(goal['frame'])

        if 'NumFrames' in self.replay.header:
            assert len(self.team_metadata) == 2

        for player in self.actors.copy():
            # Get their position data.
            if 'type' not in self.actors[player]:
                continue

            if self.actors[player]['type'] == 'player':
                self.actors[player][
                    'position_data'] = self._get_player_position_data(player)
            elif self.actors[player]['type'] == 'ball':
                if 'ball' not in self.actors:
                    self.actors['ball'] = {'position_data': {}}

                ball_data = self._get_player_position_data(player)

                self.actors['ball']['position_data'] = {
                    **self.actors['ball']['position_data'],
                    **ball_data
                }

                del self.actors[player]

        # Compress the location data per (player) actor.
        compressed_data = {}

        for actor in self.actors:
            if 'type' not in self.actors[actor]:
                continue

            if self.actors[actor]['type'] == 'player':
                compressed_data[actor] = {}

                current_key = ''
                key = ''

                keys = self.actors[actor]['position_data'].keys()

                if len(keys) == 0:
                    continue

                for frame in range(min(keys), max(keys)):
                    if frame in self.actors[actor]['position_data']:
                        data = self.actors[actor]['position_data'][frame]
                        key = '{},{}'.format(data['x'], data['y'])

                    if key == current_key:
                        compressed_data[actor][key] += 1
                    else:
                        if key not in compressed_data[actor]:
                            compressed_data[actor][key] = 1
                        else:
                            compressed_data[actor][key] += 1

                assert sum([
                    i[1] for i in compressed_data[actor].items()
                ]) == max(self.actors[actor]['position_data'], key=int) - min(
                    self.actors[actor]['position_data'], key=int)

        if default_storage.exists(heatmap_json_filename):
            default_storage.delete(heatmap_json_filename)

        heatmap_json_filename = default_storage.save(
            heatmap_json_filename,
            ContentFile(json.dumps(compressed_data, separators=(',', ':'))))

        self.heatmap_json_filename = heatmap_json_filename

        if obj.eligble_for_analysis():
            # Advanced replay parsing.
            # Restructure the data so that it's chunkable.
            frame_data = []

            for frame in range(self.replay.header['NumFrames']):
                frame_dict = {
                    'time': self.replay.netstream[frame].current,
                    'actors': []
                }

                for player in self.actors:
                    position_data = self.actors[player]['position_data']

                    if frame in position_data:
                        frame_dict['actors'].append({
                            'id':
                            player,
                            'type':
                            self.actors[player].get('type', 'ball'),
                            **position_data[frame]
                        })

                frame_data.append(frame_dict)

            if default_storage.exists(location_json_filename):
                default_storage.delete(location_json_filename)

            self._get_boost_data()
            self._get_seconds_remaining()
            # pprint(self.boost_data)

            small_actors = {}

            for key, value in self.actors.items():
                small_actors[key] = value

                del small_actors[key]['position_data']

            final_data = {
                'frame_data': frame_data,
                'goals': self.replay.header.get('Goals', []),
                'boost': self.boost_data,
                'seconds_mapping': self.seconds_mapping,
                'actors': self.actors,
                'teams': self.team_metadata
            }

            location_json_filename = default_storage.save(
                location_json_filename,
                ContentFile(json.dumps(final_data, separators=(',', ':'))))
            self.location_json_filename = location_json_filename
Esempio n. 42
0
    def post(self, request, *args, **kwargs):
        if "background" in request.FILES:
            error, fileobj = self.process_upload()
            if error:
                return JsonResponse({"status": "error", "error": error})
            c = CachedFile()
            c.expires = now() + timedelta(days=7)
            c.date = now()
            c.filename = 'background_preview.pdf'
            c.type = 'application/pdf'
            c.file = fileobj
            c.save()
            c.refresh_from_db()
            return JsonResponse({
                "status":
                "ok",
                "id":
                c.id,
                "url":
                reverse('plugins:ticketoutputpdf:pdf',
                        kwargs={
                            'event': request.event.slug,
                            'organizer': request.organizer.slug,
                            'filename': str(c.id)
                        })
            })

        cf = None
        if request.POST.get("background", "").strip():
            try:
                cf = CachedFile.objects.get(id=request.POST.get("background"))
            except CachedFile.DoesNotExist:
                pass

        if "preview" in request.POST:
            with rolledback_transaction(), language(
                    request.event.settings.locale):
                p = self._get_preview_position()

                prov = self.get_output(
                    override_layout=(json.loads(request.POST.get("data"))
                                     if request.POST.get("data") else None),
                    override_background=cf.file if cf else None)
                fname, mimet, data = prov.generate(p)

            resp = HttpResponse(data, content_type=mimet)
            ftype = fname.split(".")[-1]
            resp[
                'Content-Disposition'] = 'attachment; filename="ticket-preview.{}"'.format(
                    ftype)
            return resp
        elif "data" in request.POST:
            if cf:
                fexisting = request.event.settings.get(
                    'ticketoutput_{}_layout'.format(self.identifier),
                    as_type=File)
                if fexisting:
                    try:
                        default_storage.delete(fexisting.name)
                    except OSError:  # pragma: no cover
                        logger.error('Deleting file %s failed.' %
                                     fexisting.name)

                # Create new file
                nonce = get_random_string(length=8)
                fname = 'pub/%s-%s/%s/%s.%s.%s' % (
                    'event', 'settings', self.request.event.pk,
                    'ticketoutput_{}_layout'.format(
                        self.identifier), nonce, 'pdf')
                newname = default_storage.save(fname, cf.file)
                request.event.settings.set(
                    'ticketoutput_{}_background'.format(self.identifier),
                    'file://' + newname)

            request.event.settings.set(
                'ticketoutput_{}_layout'.format(self.identifier),
                request.POST.get("data"))

            CachedTicket.objects.filter(
                order_position__order__event=self.request.event,
                provider=self.identifier).delete()
            CachedCombinedTicket.objects.filter(
                order__event=self.request.event,
                provider=self.identifier).delete()

            return JsonResponse({'status': 'ok'})
        return HttpResponseBadRequest()
Esempio n. 43
0
    def from_upload(cls,
                    upload,
                    addon,
                    selected_apps,
                    channel,
                    parsed_data=None):
        """
        Create a Version instance and corresponding File(s) from a
        FileUpload, an Addon, a list of compatible app ids, a channel id and
        the parsed_data generated by parse_addon().

        Note that it's the caller's responsability to ensure the file is valid.
        We can't check for that here because an admin may have overridden the
        validation results.
        """
        assert parsed_data is not None

        from olympia.addons.models import AddonFeatureCompatibility

        if addon.status == amo.STATUS_DISABLED:
            raise VersionCreateError(
                'Addon is Mozilla Disabled; no new versions are allowed.')

        license_id = None
        if channel == amo.RELEASE_CHANNEL_LISTED:
            previous_version = addon.find_latest_version(channel=channel,
                                                         exclude=())
            if previous_version and previous_version.license_id:
                license_id = previous_version.license_id
        approvalnotes = None
        if parsed_data.get('is_mozilla_signed_extension'):
            approvalnotes = (u'This version has been signed with '
                             u'Mozilla internal certificate.')
        version = cls.objects.create(
            addon=addon,
            approvalnotes=approvalnotes,
            version=parsed_data['version'],
            license_id=license_id,
            channel=channel,
        )
        log.info('New version: %r (%s) from %r' %
                 (version, version.id, upload))
        activity.log_create(amo.LOG.ADD_VERSION, version, addon)
        # Update the add-on e10s compatibility since we're creating a new
        # version that may change that.
        e10s_compatibility = parsed_data.get('e10s_compatibility')
        if e10s_compatibility is not None:
            feature_compatibility = (
                AddonFeatureCompatibility.objects.get_or_create(
                    addon=addon)[0])
            feature_compatibility.update(e10s=e10s_compatibility)

        compatible_apps = {}
        for app in parsed_data.get('apps', []):
            if app.id not in selected_apps:
                # If the user chose to explicitly deselect Firefox for Android
                # we're not creating the respective `ApplicationsVersions`
                # which will have this add-on then be listed only for
                # Firefox specifically.
                continue

            compatible_apps[app.appdata] = ApplicationsVersions(
                version=version, min=app.min, max=app.max, application=app.id)
            compatible_apps[app.appdata].save()

        # See #2828: sometimes when we generate the filename(s) below, in
        # File.from_upload(), cache-machine is confused and has trouble
        # fetching the ApplicationsVersions that were just created. To work
        # around this we pre-generate version.compatible_apps and avoid the
        # queries completely.
        version._compatible_apps = compatible_apps

        # For backwards compatibility. We removed specific platform
        # support during submission but we don't handle it any different
        # beyond that yet. That means, we're going to simply set it
        # to `PLATFORM_ALL` and also have the backend create separate
        # files for each platform. Cleaning that up is another step.
        # Given the timing on this, we don't care about updates to legacy
        # add-ons as well.
        # Create relevant file and update the all_files cached property on the
        # Version, because we might need it afterwards.
        version.all_files = [
            File.from_upload(upload=upload,
                             version=version,
                             platform=amo.PLATFORM_ALL.id,
                             parsed_data=parsed_data)
        ]

        version.inherit_nomination(from_statuses=[amo.STATUS_AWAITING_REVIEW])
        version.disable_old_files()
        # After the upload has been copied to all platforms, remove the upload.
        storage.delete(upload.path)
        version_uploaded.send(sender=version)

        if waffle.switch_is_active('enable-uploads-commit-to-git-storage'):
            # Extract into git repository
            AddonGitRepository.extract_and_commit_from_file_obj(
                file_obj=version.all_files[0],
                channel=channel,
                author=upload.user)

        # Generate a preview and icon for listed static themes
        if (addon.type == amo.ADDON_STATICTHEME
                and channel == amo.RELEASE_CHANNEL_LISTED):
            theme_data = parsed_data.get('theme', {})
            generate_static_theme_preview(theme_data, version.pk)

        # Track the time it took from first upload through validation
        # (and whatever else) until a version was created.
        upload_start = utc_millesecs_from_epoch(upload.created)
        now = datetime.datetime.now()
        now_ts = utc_millesecs_from_epoch(now)
        upload_time = now_ts - upload_start

        log.info('Time for version {version} creation from upload: {delta}; '
                 'created={created}; now={now}'.format(delta=upload_time,
                                                       version=version,
                                                       created=upload.created,
                                                       now=now))
        statsd.timing('devhub.version_created_from_upload', upload_time)

        return version
Esempio n. 44
0
def save_files(files):
    for file in files.values():
        if default_storage.exists(file.name):
            default_storage.delete(file.name)
        file_name = default_storage.save(file.name, file)
        print("Saved uploaded file to: " + default_storage.url(file_name))
Esempio n. 45
0
def details(request, id, size=None, crop=False, quality=90, download=False, constrain=False, template_name="files/details.html"):
    """
    Return an image response after paramters have been applied.
    """
    cache_key = generate_image_cache_key(
        file=id,
        size=size,
        pre_key=FILE_IMAGE_PRE_KEY,
        crop=crop,
        unique_key=id,
        quality=quality,
        constrain=constrain)

    cached_image = cache.get(cache_key)
    
    if cached_image:
        return redirect(cached_image)

    file = get_object_or_404(File, pk=id)

    # basic permissions
    if not has_view_perm(request.user, 'files.view_file', file):
        raise Http403

    # extra permission
    if not file.is_public:
        if not request.user.is_authenticated():
            raise Http403

    # if string and digit convert to integer
    if isinstance(quality, basestring) and quality.isdigit():
        quality = int(quality)

    # get image binary
    try:
        data = file.file.read()
        file.file.close()
    except IOError:  # no such file or directory
        raise Http404

    if download:  # log download
        attachment = u'attachment;'
        EventLog.objects.log(**{
            'event_id': 185000,
            'event_data': '%s %s (%d) dowloaded by %s' % (file.type(), file._meta.object_name, file.pk, request.user),
            'description': '%s downloaded' % file._meta.object_name,
            'user': request.user,
            'request': request,
            'instance': file,
        })
    else:  # log view
        attachment = u''
        if file.type() != 'image':
            EventLog.objects.log(**{
                'event_id': 186000,
                'event_data': '%s %s (%d) viewed by %s' % (file.type(), file._meta.object_name, file.pk, request.user),
                'description': '%s viewed' % file._meta.object_name,
                'user': request.user,
                'request': request,
                'instance': file,
            })

    # if image size specified
    if file.type() == 'image' and size:  # if size specified

        if file.ext() in ('.tif', '.tiff'):
            raise Http404  # tifs cannot (currently) be viewed via browsers

        size = [int(s) if s.isdigit() else 0 for s in size.split('x')]
        size = aspect_ratio(file.image_dimensions(), size, constrain)

        # check for dimensions
        # greater than zero
        if not all(size):
            raise Http404

        # gets resized image from cache or rebuilds
        image = get_image(file.file, size, FILE_IMAGE_PRE_KEY, cache=True, crop=crop, quality=quality, unique_key=None)
        response = HttpResponse(content_type=file.mime_type())
        response['Content-Disposition'] = '%s filename=%s' % (attachment, file.get_name())

        params = {'quality': quality}
        if image.format == 'GIF':
            params['transparency'] = 0

        image.save(response, image.format, **params)

        if file.is_public_file():
            file_name = "%s%s" % (file.get_name(), ".jpg")
            file_path = 'cached%s%s' % (request.path, file_name)
            default_storage.delete(file_path)
            default_storage.save(file_path, ContentFile(response.content))
            full_file_path = "%s%s" % (settings.MEDIA_URL, file_path)
            cache.set(cache_key, full_file_path)
            cache_group_key = "files_cache_set.%s" % file.pk
            cache_group_list = cache.get(cache_group_key)

            if cache_group_list is None:
                cache.set(cache_group_key, [cache_key])
            else:
                cache_group_list += [cache_key]
                cache.set(cache_group_key, cache_group_list)

        return response

    if file.is_public_file():
        cache.set(cache_key, file.get_file_public_url())
        set_s3_file_permission(file.file, public=True)
        cache_group_key = "files_cache_set.%s" % file.pk
        cache_group_list = cache.get(cache_group_key)

        if cache_group_list is None:
            cache.set(cache_group_key, [cache_key])
        else:
            cache_group_list += cache_key
            cache.set(cache_group_key, cache_group_list)

    # set mimetype
    if file.mime_type():
        response = HttpResponse(data, content_type=file.mime_type())
    else:
        raise Http404

    # return response
    if file.get_name().endswith(file.ext()):
        response['Content-Disposition'] = '%s filename=%s' % (attachment, file.get_name())
    else:
        response['Content-Disposition'] = '%s filename=%s' % (attachment, file.get_name_ext())
    return response
Esempio n. 46
0
def _delete_file(sender, instance, **kwargs):
    """
    Deletes exercise attachment file after the exercise in database is removed.
    """
    default_storage.delete(instance.attachment.path)
Esempio n. 47
0
def delete_project_dump(project_id, project_slug, task_id):
    default_storage.delete("exports/{}/{}-{}.json".format(
        project_id, project_slug, task_id))
Esempio n. 48
0
 def remove(self):
     default_storage.delete(self.get_full_path())
Esempio n. 49
0
 def test_image_status_default(self):
     self.setup_image_status()
     storage.delete(self.icon_dest)
     self.get_addon().update(icon_type='icon/photos')
     result = json.loads(self.client.get(self.url).content)
     assert result['icons']
Esempio n. 50
0
    def update(self, request, id=None):

        if id is None:
            return None

        if not request.user.is_authenticated:
            return None

        impressao = self.impressaoRepository.getById(id=id)

        if impressao is None:
            return None

        request_from = ""

        try:  #for api
            data = request.data
            request_from = "api"
        except:  #for form
            data = request.POST
            request_from = "website"

        if request.user.cliente and impressao.imprimida != True:  #cliente só pode editar se a impressão ainda não foi imprimida

            if "colorida" in data:
                colorida = True if (data["colorida"] == 'on'
                                    or data["colorida"] == 'true') else False
                impressao.colorida = colorida
            elif request_from == "website":
                #se não vier no form e vier do website

                #se a requisição vier do website, o campo "colorida" não vem com o form caso não estiver marcado
                #se não vier com o form é pq é False
                impressao.colorida = False

            if 'comentario' in data:
                impressao.comentario = data["comentario"]

            if 'turma' in data:
                turma = self.turmaRepository.getById(data["turma"])
                impressao.turma = turma

            if request.FILES.get("uri_arquivo"):

                file = request.FILES.get("uri_arquivo")
                default_storage.delete(str(
                    impressao.uri_arquivo))  #delete old file
                path = default_storage.save(file.name, ContentFile(
                    file.read()))  #save file in default dir
                impressao.uri_arquivo = path  #set file

            if 'qtd_copias' in data:
                impressao.qtd_copias = data["qtd_copias"]

            if 'tipo' in data:
                impressao.tipo = TipoImpressaoRepository().getById(
                    id=int(data["tipo"]))

            # UPDATE FILE

            impressao.save()

            return True

        #Não está sendo usado
        # if request.user.funcionario:
        #     #campos que o funcionario pode editar
        #     # if "vizualizao_em" in data:
        #     #     impressao.vizualizao_em = data["vizualizao_em"]

        #     if "imprimida" in data:
        #         impressao.imprimida : data["imprimida"]

        #     if "prazo_entrega" in data:
        #         impressao.prazo_entrega : data["prazo_entrega"]

        #     impressao.save()

        #     return True

        return False
Esempio n. 51
0
 def test_preview_status_fails(self):
     self.setup_image_status()
     storage.delete(self.preview.thumbnail_path)
     result = json.loads(self.client.get(self.url).content)
     assert not result['previews']
    def test_adding_file(self):
        # Create default thing storing reference to file
        # in the local media directory.
        test_fqfn = os.path.join(self.media_dir, 'test.txt')
        open(test_fqfn, 'w').write('hello there')
        o1 = o = Thing()
        test_fn = 'i/special/test.txt'
        o.upload = test_fn
        o.save()
        obj_id = o.id

        # Confirm thing was saved.
        Thing.objects.update()
        q = Thing.objects.all()
        self.assertEqual(q.count(), 1)
        self.assertEqual(q[0].upload.name, test_fn)

        # Confirm the file only exists on the file system
        # and hasn't been loaded into the database.
        q = File.objects.all()
        self.assertEqual(q.count(), 0)

        # Verify we can read the contents of thing.
        o = Thing.objects.get(id=obj_id)
        self.assertEqual(o.upload.read(), b"hello there")

        # Verify that by attempting to read the file, we've automatically
        # loaded it into the database.
        File.objects.update()
        q = File.objects.all()
        self.assertEqual(q.count(), 1)
        self.assertEqual(q[0].content, b"hello there")

        # Load a dynamically created file outside /media.
        test_file = files.temp.NamedTemporaryFile(
            suffix='.txt',
            dir=os.path.join(settings.PROJECT_DIR, 'media'),
        )
        data0 = b'1234567890'
        test_file.write(data0)
        test_file.seek(0)
        t = Thing.objects.create(upload=files.File(test_file), )
        self.assertEqual(File.objects.count(), 2)
        t = Thing.objects.get(pk=t.pk)
        self.assertEqual(t.upload.file.size, 10)
        self.assertEqual(t.upload.file.name[-4:], '.txt')
        self.assertEqual(t.upload.file.read(), data0)
        t.upload.delete()
        self.assertEqual(File.objects.count(), 1)

        # Delete file from local filesystem and re-export it from the database.
        self.assertEqual(os.path.isfile(test_fqfn), True)
        os.remove(test_fqfn)
        self.assertEqual(os.path.isfile(test_fqfn), False)
        o1.upload.read()  # This forces the re-export to the filesystem.
        self.assertEqual(os.path.isfile(test_fqfn), True)

        # This dumps all files to the filesystem.
        File.dump_files()

        # Confirm when delete a file from the database, we also delete it from
        # the filesystem.
        self.assertEqual(default_storage.exists('i/special/test.txt'), True)
        default_storage.delete('i/special/test.txt')
        self.assertEqual(default_storage.exists('i/special/test.txt'), False)
        self.assertEqual(os.path.isfile(test_fqfn), False)
Esempio n. 53
0
def delete_filefield(sender, **kwargs):
    article_file = kwargs.get('instance')
    default_storage.delete(article_file.upload_file.path)
Esempio n. 54
0
 def test_image_status_persona(self):
     self.setup_image_status()
     storage.delete(self.icon_dest)
     self.get_addon().update(type=amo.ADDON_PERSONA)
     result = json.loads(self.client.get(self.url).content)
     assert result['icons']
Esempio n. 55
0
 def delete_photo(self, photo):
     if default_storage.exists(photo.name):
         default_storage.delete(photo.name)
Esempio n. 56
0
 def test_image_status_fails(self):
     self.setup_image_status()
     storage.delete(self.icon_dest)
     result = json.loads(self.client.get(self.url).content)
     assert not result['icons']
Esempio n. 57
0
def delete_filefield(**kwargs):
    book = kwargs.get('instance')
    default_storage.delete(book.book_file.path)
Esempio n. 58
0
def git_upload(request):
    username = request.user.username
    logger.info("user %s upload file from git", username)

    if settings.UPLOAD_MODE == "local_only":
        context = {
            "status": -1,
            "error": "only allow to upload from local file"
        }
        return HttpResponse(json.dumps(context),
                            content_type='application/json')
    repository = request.POST.get("repository")
    branch = request.POST.get("branch")
    version = request.POST.get("version")
    buildName = request.POST.get("build")
    if UserGit.objects.filter(username=username,
                              git_repository=repository).count() == 0:
        context = {
            "status": -1,
            "error":
            "you doesn't have permission to access [" + repository + "]"
        }
        return HttpResponse(json.dumps(context),
                            content_type='application/json')
    repoFolder = repository.split("/")[-1]
    repoTmpDir = settings.GIT_REPOSITORY_PATH + username + "/" + repoFolder
    try:
        repo = Repo.clone_from(repository, repoTmpDir)
        logger.info("clone from %s successfully", repository)
    except GitCommandError:
        #can't clone repository or repository already exists
        try:
            repo = Repo(repoTmpDir)
            logger.info("find repository from local dir: %s", repoTmpDir)
        except NoSuchPathError:
            context = {
                "status": -1,
                "error": "can't clone from [" + repository + "]"
            }
            return HttpResponse(json.dumps(context),
                                content_type='application/json')

    repo.git.checkout(branch)
    repo.git.pull()
    repo.git.checkout(version)

    confFile = repoTmpDir + "/collie_build.xml"
    try:
        builds = confParser(confFile)
        for build in builds:
            if buildName == build['name']:
                cmd = "cd " + repoTmpDir + ";" + build['cmd']
                logger.info("build cmd: %s", cmd)
                if os.system(cmd) != 0:
                    context = {"status": -1, "error": "build cmd error"}
                    return HttpResponse(json.dumps(context),
                                        content_type='application/json')
                targetFile = repoTmpDir + "/" + build['target']
                if not os.path.exists(targetFile):
                    raise NoSuchPathError("can't find target file")
                targetFileName = os.path.basename(targetFile)
                if default_storage.exists(targetFileName):
                    default_storage.delete(targetFileName)
                path = default_storage.save(
                    targetFileName, ContentFile(open(targetFile).read()))
                logger.info(path)
                break

    except (NoSuchPathError, KeyError) as e:
        logger.info(e)
        context = {"status": -1, "error": "can't parse collie_build.xml"}
        return HttpResponse(json.dumps(context),
                            content_type='application/json')

    context = {"status": 1, "file_name": targetFileName}
    return HttpResponse(json.dumps(context), content_type='application/json')
Esempio n. 59
0
def server_authentication(request):
    if request.method == 'POST':
        card_id = request.POST['card_id']
        member = Member.objects.get(card_id=card_id)
        last_image_name = ''
        # save images to /tmp folder
        for face_key in request.FILES:
            last_image_name = face_key
            data = request.FILES[face_key]
            face = ImageFile(data)
            face_path = 'tmp/' + str(data)
            if default_storage.exists(face_path):
                default_storage.delete(face_path)
            default_storage.save(face_path, face)

        # get result of predict list images
        list_predicts = face_recognize.recognition(member.recognize_label)
        # list_predicts = []
        if len(list_predicts):
            last_image_name = list_predicts[0][0]

        # check threshold
        result_auth = False
        f_name = None
        for file_name, conf in list_predicts:
            print(conf)
            if conf < member.threshold:
                result_auth = True
                f_name = file_name
                break
        # publish result auth to mqtt topic /pas/mqtt/icse/auth
        result_auth_payload = 'OK' if result_auth else 'FAIL'
        mqtt.publish(const.MQTT_AUTH_TOPIC, result_auth_payload)

        # get latest logs to check user in or out
        try:
            # TODO: check last log for new day, not last day
            last_log = Logs.objects.filter(member_id=member.id).latest('time_stamp')
            is_go_in = False if last_log.is_go_in else True
        except Logs.DoesNotExist:
            is_go_in = True

        member.is_in_lab = True if is_go_in else False
        member.save()

        # publish latest user scan to web browser
        latest_user_scan_payload = {
            'member_name': member.name,
            'state': 'Goes In' if is_go_in else 'Goes Out'
        }
        mqtt.publish(const.MQTT_LATEST_USER_SCAN, json.dumps(latest_user_scan_payload))

        # save logs
        log = Logs(
            time_stamp=timezone.now(),
            member=member,
            result_auth=result_auth,
            is_go_in=is_go_in,
        )
        f_name = f_name if result_auth else last_image_name
        file_path = os.path.join(const.TMP_FOLDER, f_name)
        file_data = File(open(file_path, 'rb'))
        log.image.save(f_name, file_data, save=True)
        log.save()

        return HttpResponse("POST request success")
    return HttpResponse("Not valid request type!")
Esempio n. 60
0
def delete_imagefield(**kwargs):
    image = kwargs.get('instance')
    default_storage.delete(image.image_file.path)