示例#1
0
文件: tests.py 项目: zraurum/django
 def test_file_mode(self):
     # Should not set mode to None if it is not present.
     # See #14681, stdlib gzip module crashes if mode is set to None
     file = SimpleUploadedFile("mode_test.txt", b"content")
     self.assertFalse(hasattr(file, 'mode'))
     g = gzip.GzipFile(fileobj=file)
示例#2
0
 def test_file_upload_no_data(self):
     f = SimpleUploadedFile("myfile.txt", None)
     response = self.post(self.file_upload_url, {'file': f},
                          format='multipart')
     assert response.status_code == 500
示例#3
0
def create_path_for_photos_thumbanails(photos, product):
    #Creating path for large photos
    photosgroup = ''
    count = len(photos)
    for uploaded_file in photos:
        count = count - 1
        handle_uploaded_file(uploaded_file, product)
        if count == 0:
            photosgroup = photosgroup + str(product.photos)
        else:
            photosgroup = photosgroup + str(product.photos) + ','
    large_photos = photosgroup
    print "large_photos", large_photos

    # Creating path for thumbnail photos
    photo = str(large_photos)
    photos = photo.split(',')

    imagecount = len(photos)
    print "imagecount", imagecount

    thumbnail_group = ''
    if large_photos:
        try:
            count = len(photos)
            for photo in photos:
                print "photos", photo
                count = count - 1
                THUMBNAIL_SIZE = (400, 400)  # dimensions
                image = ImageObj.open(settings.MEDIA_ROOT + '/' + photo)
                print "image", image
                print "THUMBNAIL_SIZE", THUMBNAIL_SIZE
                # Convert to RGB if necessary
                if image.mode not in ('L', 'RGB'): image = image.convert('RGB')
                # create a thumbnail + use antialiasing for a smoother thumbnail
                image.thumbnail(THUMBNAIL_SIZE, ImageObj.ANTIALIAS)
                # fetch image into memory
                temp_handle = StringIO()
                # print "temp", temp_handle
                image.save(temp_handle, 'png')
                temp_handle.seek(0)
                disassembled = urlparse(photo)
                filename, file_ext = splitext(basename(disassembled.path))
                suf = SimpleUploadedFile(filename + file_ext,
                                         temp_handle.read(),
                                         content_type='image/png')
                product.thumbnail.save(filename + '_thumbnail' + '.png',
                                       suf,
                                       save=False)
                # print product.thumbnail
                if count == 0:
                    thumbnail_group = thumbnail_group + str(product.thumbnail)
                else:
                    thumbnail_group = thumbnail_group + str(
                        product.thumbnail) + ','
            # print thumbnail_group
        except ImportError:
            pass
    thumbnail_photos = thumbnail_group
    return large_photos, imagecount, thumbnail_photos
    return large_photos, imagecount
示例#4
0
 def as_file(self, filename):
     return SimpleUploadedFile(name=filename, content=self.write().read())
示例#5
0
 def test_readonly_root(self):
     """Permission errors are not swallowed"""
     os.chmod(MEDIA_ROOT, 0o500)
     self.addCleanup(os.chmod, MEDIA_ROOT, 0o700)
     with self.assertRaises(PermissionError):
         self.obj.testfile.save('foo.txt', SimpleUploadedFile('foo.txt', b'x'), save=False)
 def test_invalid_upload(self):
     self.login()
     response = self.client.post(
         reverse('assets-list'),
         {'file': SimpleUploadedFile('teste.mp4', b'')})
     self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
示例#7
0
def get_mock_photo():
    return SimpleUploadedFile('small.gif', small_gif, 'content_type=image/gif')
示例#8
0
    def setUp(self):
        user_details = {
            'username': '******',
            'password': '******',
            'email': '*****@*****.**',
            'first_name': 'Test',
            'last_name': 'User',
        }
        self.user = User.objects.create_user(**user_details)
        self.org, self.org_user, _ = create_organization(self.user)
        self.org_b, self.org_user, _ = create_organization(self.user)
        self.client.login(**user_details)

        cycle_factory = FakeCycleFactory(organization=self.org, user=self.user)
        cycle_a = cycle_factory.get_cycle(name="Cycle A")
        cycle_b = cycle_factory.get_cycle(name="Cycle B")

        property_factory = FakePropertyFactory(organization=self.org)
        self.property_a = property_factory.get_property()
        property_b = property_factory.get_property()

        property_state_factory = FakePropertyStateFactory(
            organization=self.org)
        property_state_a = property_state_factory.get_property_state()
        property_state_b = property_state_factory.get_property_state()
        property_state_c = property_state_factory.get_property_state()
        property_state_d = property_state_factory.get_property_state()

        # create an analysis with two property views, each with the same property but a different cycle
        self.analysis_a = Analysis.objects.create(name='test a',
                                                  service=Analysis.BSYNCR,
                                                  status=Analysis.CREATING,
                                                  user=self.user,
                                                  organization=self.org)
        self.analysis_property_view_a = AnalysisPropertyView.objects.create(
            analysis=self.analysis_a,
            property=self.property_a,
            cycle=cycle_a,
            property_state=property_state_a)
        self.analysis_property_view_b = AnalysisPropertyView.objects.create(
            analysis=self.analysis_a,
            property=self.property_a,
            cycle=cycle_b,
            property_state=property_state_b)

        # create an analysis with two property views, each with the same cycle but a different property
        self.analysis_b = Analysis.objects.create(name='test b',
                                                  service=Analysis.BSYNCR,
                                                  status=Analysis.READY,
                                                  user=self.user,
                                                  organization=self.org)
        self.analysis_property_view_c = AnalysisPropertyView.objects.create(
            analysis=self.analysis_b,
            property=self.property_a,
            cycle=cycle_a,
            property_state=property_state_c)
        self.analysis_property_view_d = AnalysisPropertyView.objects.create(
            analysis=self.analysis_b,
            property=property_b,
            cycle=cycle_a,
            property_state=property_state_d)

        # create an analysis with no property views
        self.analysis_c = Analysis.objects.create(name='test c',
                                                  service=Analysis.BSYNCR,
                                                  status=Analysis.QUEUED,
                                                  user=self.user,
                                                  organization=self.org)

        # create an analysis with a different organization
        self.analysis_d = Analysis.objects.create(name='test d',
                                                  service=Analysis.BSYNCR,
                                                  status=Analysis.RUNNING,
                                                  user=self.user,
                                                  organization=self.org_b)

        # create an output file and add to 3 analysis property views
        self.analysis_output_file_a = AnalysisOutputFile.objects.create(
            file=SimpleUploadedFile('test file a', b'test file a contents'),
            content_type=AnalysisOutputFile.BUILDINGSYNC)
        self.analysis_output_file_a.analysis_property_views.add(
            self.analysis_property_view_a)
        self.analysis_output_file_a.analysis_property_views.add(
            self.analysis_property_view_b)
        self.analysis_output_file_a.analysis_property_views.add(
            self.analysis_property_view_c)

        # create an output file and add to 1 analysis property view
        self.analysis_output_file_b = AnalysisOutputFile.objects.create(
            file=SimpleUploadedFile('test file b', b'test file b contents'),
            content_type=AnalysisOutputFile.BUILDINGSYNC)
        self.analysis_output_file_b.analysis_property_views.add(
            self.analysis_property_view_a)
示例#9
0
sys.path.append("../server/")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dva.settings")
django.setup()
from django.core.files.uploadedfile import SimpleUploadedFile
from dvaui.view_shared import handle_uploaded_file
from dvaapp.models import Video, TEvent, DVAPQL, Retriever, TrainedModel, Export
from django.conf import settings
from dvaapp.processing import DVAPQLProcess
from dvaapp.tasks import perform_dataset_extraction, perform_indexing, perform_export, perform_import, \
    perform_detection, \
    perform_video_segmentation, perform_transformation

if __name__ == '__main__':
    for fname in glob.glob('data/citest*.mp4'):
        name = fname.split('/')[-1].split('.')[0]
        f = SimpleUploadedFile(fname, file(fname).read(), content_type="video/mp4")
        handle_uploaded_file(f, name)
    if settings.DEBUG:
        for fname in glob.glob('data/*.zip'):
            name = fname.split('/')[-1].split('.')[0]
            f = SimpleUploadedFile(fname, file(fname).read(), content_type="application/zip")
            handle_uploaded_file(f, name)
    for i, v in enumerate(Video.objects.all()):
        perform_import(TEvent.objects.get(video=v, operation='perform_import').pk)
        if v.dataset:
            arguments = {'sync': True}
            perform_dataset_extraction(TEvent.objects.create(video=v, arguments=arguments).pk)
        else:
            arguments = {'sync': True}
            perform_video_segmentation(TEvent.objects.create(video=v, arguments=arguments).pk)
        arguments = {'index': 'inception', 'target': 'frames'}
示例#10
0
    def setUp(self):
        self.client = APIClient(enforce_csrf_checks=True)

        self.user = User.objects.create(
            username='******',
            email="*****@*****.**",
            password='******')

        EmailAddress.objects.create(
            user=self.user,
            email='*****@*****.**',
            primary=True,
            verified=True)

        self.user1 = User.objects.create(
            username='******',
            email="*****@*****.**",
            password='******')

        EmailAddress.objects.create(
            user=self.user1,
            email='*****@*****.**',
            primary=True,
            verified=True)

        self.challenge_host_team = ChallengeHostTeam.objects.create(
            team_name='Test Challenge Host Team',
            created_by=self.user)

        self.participant_team = ParticipantTeam.objects.create(
            team_name='Participant Team for Challenge',
            created_by=self.user1)

        self.participant = Participant.objects.create(
            user=self.user1,
            status=Participant.SELF,
            team=self.participant_team)

        self.challenge = Challenge.objects.create(
            title='Test Challenge',
            description='Description for test challenge',
            terms_and_conditions='Terms and conditions for test challenge',
            submission_guidelines='Submission guidelines for test challenge',
            creator=self.challenge_host_team,
            start_date=timezone.now() - timedelta(days=2),
            end_date=timezone.now() + timedelta(days=1),
            published=False,
            enable_forum=True,
            anonymous_leaderboard=False)

        try:
            os.makedirs('/tmp/evalai')
        except OSError:
            pass

        with self.settings(MEDIA_ROOT='/tmp/evalai'):
            self.challenge_phase = ChallengePhase.objects.create(
                name='Challenge Phase',
                description='Description for Challenge Phase',
                leaderboard_public=False,
                is_public=False,
                start_date=timezone.now() - timedelta(days=2),
                end_date=timezone.now() + timedelta(days=1),
                challenge=self.challenge,
                test_annotation=SimpleUploadedFile('test_sample_file.txt',
                                                   'Dummy file content', content_type='text/plain')
            )

        self.dataset_split = DatasetSplit.objects.create(name="Test Dataset Split", codename="test-split")

        self.leaderboard = Leaderboard.objects.create(schema=json.dumps({'hello': 'world'}))

        self.challenge_phase_split = ChallengePhaseSplit.objects.create(
            dataset_split=self.dataset_split,
            challenge_phase=self.challenge_phase,
            leaderboard=self.leaderboard,
            visibility=ChallengePhaseSplit.PUBLIC
            )

        self.submission = Submission.objects.create(
            participant_team=self.participant_team,
            challenge_phase=self.challenge_phase,
            created_by=self.challenge_host_team.created_by,
            status='submitted',
            input_file=self.challenge_phase.test_annotation,
            method_name="Test Method",
            method_description="Test Description",
            project_url="http://testserver/",
            publication_url="http://testserver/",
            is_public=True,
        )

        self.client.force_authenticate(user=self.user)
示例#11
0
    def test_post_shape_file(self):
        """
        testing upload a shape file
        """
        path = os.path.dirname(os.path.realpath(__file__))

        shp = open(path+'/points.shp', 'rb')
        dbf = open(path+'/points.dbf', 'rb')
        shx = open(path+'/points.shx', 'rb')



        params = {
            'shp': SimpleUploadedFile(
            name=shp.name, content=shp.read(), content_type='application/x-esri-shape'),
                  'dbf': SimpleUploadedFile(
                      name=dbf.name, content=dbf.read(),
                      content_type='application/x-dbf'),
                  'shx': SimpleUploadedFile(
                      name=shx.name, content=shx.read(),
                      content_type='application/x-esri-shape-index'),

                  'format_file': 'nt',
                  'class_store': 'type',
                  'name': 'point',
                  'attribute': 'osm_id',
                  'target_store': 'GeoSparql',
                  'ontology_NS_prefix': 'geo',
                  'ignore': 'UNK',
                  'ns_URI': 'http://www.opengis.net/ont/geosparql',
                  'ontology_NS': 'http://www.opengis.net/ont/geosparql',
                  'source_RS': '',
                  'target_RS': '',
                  'ns_prefix': '',
                  'default_lang': 'en',
                  'feature_string': 'points',
                  'input_file': 'ppp',
                  'output_file': 'points',
                  'type_wkt': 'point',


        }

        shp.close()
        shx.close()
        dbf.close()

        #content = encode_multipart('test', params)

       # print content


        request = self.factory.post(self.uri, params)
        #request = self.factory.post(self.uri, content
        #                            ,content_type='multipart/form-data; boundary=test')
        force_authenticate(request, user=self.user)
        response = self.view(request)
        response.render()

        self.assertEqual(response.status_code, 201,
            'Expected Response Code 201, received {0} instead.'
                         .format(response.status_code))
示例#12
0
def process_recap_zip(self, pk):
    """Process a zip uploaded from a PACER district court

    The general process is to use our existing infrastructure. We open the zip,
    identify the documents inside, and then associate them with the rest of our
    collection.

    :param self: A celery task object
    :param pk: The PK of the ProcessingQueue object to process
    :return: A list of new PQ's that were created, one per PDF that was
    enqueued.
    """
    pq = ProcessingQueue.objects.get(pk=pk)
    mark_pq_status(pq, "", PROCESSING_STATUS.IN_PROGRESS)

    logger.info("Processing RECAP zip (debug is: %s): %s", pq.debug, pq)
    with ZipFile(pq.filepath_local.path, "r") as archive:
        # Security: Check for zip bombs.
        max_file_size = convert_size_to_bytes("200MB")
        for zip_info in archive.infolist():
            if zip_info.file_size < max_file_size:
                continue
            mark_pq_status(
                pq,
                "Zip too large; possible zip bomb. File in zip named %s "
                "would be %s bytes expanded." %
                (zip_info.filename, zip_info.file_size),
                PROCESSING_STATUS.INVALID_CONTENT,
            )
            return {"new_pqs": [], "tasks": []}

        # For each document in the zip, create a new PQ
        new_pqs = []
        tasks = []
        for file_name in archive.namelist():
            file_content = archive.read(file_name)
            f = SimpleUploadedFile(file_name, file_content)

            file_name = file_name.split(".pdf")[0]
            if "-" in file_name:
                doc_num, att_num = file_name.split("-")
                if att_num == "main":
                    att_num = None
            else:
                doc_num = file_name
                att_num = None

            if att_num:
                # An attachment, ∴ nuke the pacer_doc_id value, since it
                # corresponds to the main doc only.
                pacer_doc_id = ""
            else:
                pacer_doc_id = pq.pacer_doc_id

            # Create a new PQ and enqueue it for processing
            new_pq = ProcessingQueue.objects.create(
                court=pq.court,
                uploader=pq.uploader,
                pacer_case_id=pq.pacer_case_id,
                pacer_doc_id=pacer_doc_id,
                document_number=doc_num,
                attachment_number=att_num,
                filepath_local=f,
                status=PROCESSING_STATUS.ENQUEUED,
                upload_type=UPLOAD_TYPE.PDF,
                debug=pq.debug,
            )
            new_pqs.append(new_pq.pk)
            tasks.append(process_recap_pdf.delay(new_pq.pk))

        # At the end, mark the pq as successful and return the PQ
        mark_pq_status(
            pq,
            f"Successfully created ProcessingQueue objects: {oxford_join(new_pqs)}",
            PROCESSING_STATUS.SUCCESSFUL,
        )

        # Returning the tasks allows tests to wait() for the PDFs to complete
        # before checking assertions.
        return {
            "new_pqs": new_pqs,
            "tasks": tasks,
        }
示例#13
0
def ci():
    """
    Used in conjunction with travis for Continuous Integration testing
    :return:
    """
    import django
    sys.path.append(os.path.dirname(__file__))
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dva.settings")
    django.setup()
    import base64
    from django.core.files.uploadedfile import SimpleUploadedFile
    from dvaapp.views import handle_uploaded_file, handle_youtube_video, pull_vdn_list\
        ,import_vdn_dataset_url
    from dvaapp.models import Video, Clusters, IndexEntries, TEvent, VDNServer
    from django.conf import settings
    from dvaapp.operations.query_processing import QueryProcessing
    from dvaapp.tasks import extract_frames, inception_index_by_id, perform_ssd_detection_by_id,\
        perform_yolo_detection_by_id, inception_index_regions_by_id, export_video_by_id, import_video_by_id,\
        execute_index_subquery, perform_clustering, assign_open_images_text_tags_by_id, perform_face_detection,\
        perform_face_indexing
    for fname in glob.glob('tests/ci/*.mp4'):
        name = fname.split('/')[-1].split('.')[0]
        f = SimpleUploadedFile(fname,
                               file(fname).read(),
                               content_type="video/mp4")
        handle_uploaded_file(f, name, False)
    for fname in glob.glob('tests/*.mp4'):
        name = fname.split('/')[-1].split('.')[0]
        f = SimpleUploadedFile(fname,
                               file(fname).read(),
                               content_type="video/mp4")
        handle_uploaded_file(f, name, False)
    for fname in glob.glob('tests/*.zip'):
        name = fname.split('/')[-1].split('.')[0]
        f = SimpleUploadedFile(fname,
                               file(fname).read(),
                               content_type="application/zip")
        handle_uploaded_file(f, name)
    # handle_youtube_video('world is not enough', 'https://www.youtube.com/watch?v=P-oNz3Nf50Q') # Temporarily disabled due error in travis
    for i, v in enumerate(Video.objects.all()):
        extract_frames(TEvent.objects.create(video=v).pk)
        inception_index_by_id(TEvent.objects.create(video=v).pk)
        if i == 0:  # save travis time by just running detection on first video
            perform_ssd_detection_by_id(TEvent.objects.create(video=v).pk)
            perform_face_detection(TEvent.objects.create(video=v).pk)
            inception_index_regions_by_id(TEvent.objects.create(video=v).pk)
            assign_open_images_text_tags_by_id(
                TEvent.objects.create(video=v).pk)
        fname = export_video_by_id(
            TEvent.objects.create(video=v, event_type=TEvent.EXPORT).pk)
        f = SimpleUploadedFile(fname,
                               file("{}/exports/{}".format(
                                   settings.MEDIA_ROOT, fname)).read(),
                               content_type="application/zip")
        vimported = handle_uploaded_file(f, fname)
        import_video_by_id(TEvent.objects.create(video=vimported).pk)
    dc = Clusters()
    dc.indexer_algorithm = 'inception'
    dc.included_index_entries_pk = [
        k.pk for k in IndexEntries.objects.all().filter(
            algorithm=dc.indexer_algorithm)
    ]
    dc.components = 32
    dc.save()
    clustering_task = TEvent()
    clustering_task.clustering = dc
    clustering_task.event_type = TEvent.CLUSTERING
    clustering_task.operation = 'perform_clustering'
    clustering_task.save()
    perform_clustering(clustering_task.pk)
    query_dict = {
        'image_data_b64': base64.encodestring(file('tests/query.png').read()),
        'indexers': [{
            'algorithm': 'inception',
            'count': 10,
            'approximate': False
        }]
    }
    qp = QueryProcessing()
    qp.create_from_json(query_dict)
    execute_index_subquery(qp.indexer_queries[0].pk)
    query_dict = {
        'image_data_b64': base64.encodestring(file('tests/query.png').read()),
        'indexers': [{
            'algorithm': 'inception',
            'count': 10,
            'approximate': True
        }]
    }
    qp = QueryProcessing()
    qp.create_from_json(query_dict)
    execute_index_subquery(qp.indexer_queries[0].pk)
    server, datasets, detectors = pull_vdn_list(1)
    for k in datasets:
        if k['name'] == 'MSCOCO_Sample_500':
            print 'FOUND MSCOCO SAMPLE'
            import_vdn_dataset_url(VDNServer.objects.get(pk=1), k['url'], None)
    test_backup()
示例#14
0
文件: tests.py 项目: zraurum/django
    def test_files(self):
        temp_storage.save('tests/default.txt', ContentFile('default content'))
        # Attempting to access a FileField from the class raises a descriptive
        # error
        self.assertRaises(AttributeError, lambda: Storage.normal)

        # An object without a file has limited functionality.
        obj1 = Storage()
        self.assertEqual(obj1.normal.name, "")
        self.assertRaises(ValueError, lambda: obj1.normal.size)

        # Saving a file enables full functionality.
        obj1.normal.save("django_test.txt", ContentFile("content"))
        self.assertEqual(obj1.normal.name, "tests/django_test.txt")
        self.assertEqual(obj1.normal.size, 7)
        self.assertEqual(obj1.normal.read(), b"content")
        obj1.normal.close()

        # File objects can be assigned to FileField attributes, but shouldn't
        # get committed until the model it's attached to is saved.
        obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
        dirs, files = temp_storage.listdir("tests")
        self.assertEqual(dirs, [])
        self.assertEqual(sorted(files), ["default.txt", "django_test.txt"])

        obj1.save()
        dirs, files = temp_storage.listdir("tests")
        self.assertEqual(
            sorted(files), ["assignment.txt", "default.txt", "django_test.txt"]
        )

        # Files can be read in a little at a time, if necessary.
        obj1.normal.open()
        self.assertEqual(obj1.normal.read(3), b"con")
        self.assertEqual(obj1.normal.read(), b"tent")
        self.assertEqual(list(obj1.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
        obj1.normal.close()

        # Save another file with the same name.
        obj2 = Storage()
        obj2.normal.save("django_test.txt", ContentFile("more content"))
        self.assertEqual(obj2.normal.name, "tests/django_test_1.txt")
        self.assertEqual(obj2.normal.size, 12)

        # Push the objects into the cache to make sure they pickle properly
        cache.set("obj1", obj1)
        cache.set("obj2", obj2)
        self.assertEqual(cache.get("obj2").normal.name, "tests/django_test_1.txt")

        # Deleting an object does not delete the file it uses.
        obj2.delete()
        obj2.normal.save("django_test.txt", ContentFile("more content"))
        self.assertEqual(obj2.normal.name, "tests/django_test_2.txt")

        # Multiple files with the same name get _N appended to them.
        objs = [Storage() for i in range(3)]
        for o in objs:
            o.normal.save("multiple_files.txt", ContentFile("Same Content"))
        self.assertEqual(
            [o.normal.name for o in objs],
            ["tests/multiple_files.txt", "tests/multiple_files_1.txt", "tests/multiple_files_2.txt"]
        )
        for o in objs:
            o.delete()

        # Default values allow an object to access a single file.
        obj3 = Storage.objects.create()
        self.assertEqual(obj3.default.name, "tests/default.txt")
        self.assertEqual(obj3.default.read(), b"default content")
        obj3.default.close()

        # But it shouldn't be deleted, even if there are no more objects using
        # it.
        obj3.delete()
        obj3 = Storage()
        self.assertEqual(obj3.default.read(), b"default content")
        obj3.default.close()

        # Verify the fix for #5655, making sure the directory is only
        # determined once.
        obj4 = Storage()
        obj4.random.save("random_file", ContentFile("random content"))
        self.assertTrue(obj4.random.name.endswith("/random_file"))
示例#15
0
def generate_vdn(fast=False):
    kill()
    import django
    sys.path.append(os.path.dirname(__file__))
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dva.settings")
    django.setup()
    from django.core.files.uploadedfile import SimpleUploadedFile
    from dvaapp.views import handle_uploaded_file, handle_youtube_video
    from dvaapp import models
    from dvaapp.tasks import extract_frames, perform_face_detection_indexing_by_id, inception_index_by_id, \
        perform_ssd_detection_by_id, perform_yolo_detection_by_id, inception_index_ssd_detection_by_id, \
        export_video_by_id
    dirname = get_coco_dirname()
    local(
        'wget https://www.dropbox.com/s/2dq085iu34y0hdv/coco_input.zip?dl=1 -O coco.zip'
    )
    local('unzip coco.zip')
    with lcd(dirname):
        local("zip coco_input.zip -r *.jpg")
    fname = '{}/coco_input.zip'.format(dirname)
    with open('{}/coco_sample_metadata.json'.format(dirname)) as datafile:
        data = json.load(datafile)
    f = SimpleUploadedFile("coco_input.zip",
                           file(fname).read(),
                           content_type="application/zip")
    v = handle_uploaded_file(f, 'mscoco_sample_500')
    extract_frames(v.pk)
    video = v
    models.Annotation.objects.all().filter(video=video).delete()
    for frame in models.Frame.objects.all().filter(video=video):
        frame_id = str(int(frame.name.split('_')[-1].split('.')[0]))
        annotation = models.Annotation()
        annotation.video = v
        annotation.frame = frame
        annotation.full_frame = True
        annotation.metadata_json = json.dumps(data[frame_id]['image'])
        annotation.label = 'metadata'
        annotation.save()
    for frame in models.Frame.objects.all().filter(video=video):
        frame_id = str(int(frame.name.split('_')[-1].split('.')[0]))
        for a in data[frame_id][u'annotations']:
            annotation = models.Annotation()
            annotation.video = v
            annotation.frame = frame
            annotation.metadata_json = json.dumps(a)
            annotation.full_frame = False
            annotation.x = a['bbox'][0]
            annotation.y = a['bbox'][1]
            annotation.w = a['bbox'][2]
            annotation.h = a['bbox'][3]
            label, _ = models.VLabel.objects.get_or_create(
                video=video,
                label_name='coco_instance/{}/{}'.format(
                    a[u'category'][u'supercategory'], a[u'category'][u'name']))
            annotation.label = label.label_name
            annotation.label_parent = label
            annotation.save()
        for a in data[frame_id][u'keypoints']:
            annotation = models.Annotation()
            annotation.video = v
            annotation.frame = frame
            annotation.metadata_json = json.dumps(a)
            annotation.x = a['bbox'][0]
            annotation.y = a['bbox'][1]
            annotation.w = a['bbox'][2]
            annotation.h = a['bbox'][3]
            label, _ = models.VLabel.objects.get_or_create(
                video=video,
                label_name='coco_keypoints/{}/{}'.format(
                    a[u'category'][u'supercategory'], a[u'category'][u'name']))
            annotation.label = label.label_name
            annotation.label_parent = label
            annotation.save()
        for caption in data[frame_id][u'captions']:
            annotation = models.Annotation()
            annotation.video = v
            annotation.frame = frame
            annotation.metadata_text = caption['caption']
            annotation.full_frame = True
            label, _ = models.VLabel.objects.get_or_create(
                video=video, label_name='coco_caption')
            annotation.label = label.label_name
            annotation.label_parent = label
            annotation.save()
    if not fast:
        inception_index_by_id(v.pk)
        perform_ssd_detection_by_id(v.pk)
        perform_face_detection_indexing_by_id(v.pk)
        inception_index_ssd_detection_by_id(v.pk)
    export_video_by_id(v.pk)
    v = handle_youtube_video("Zelda",
                             "https://www.youtube.com/watch?v=vHiTxNrbB4M")
    extract_frames(v.pk)
    if not fast:
        inception_index_by_id(v.pk)
        perform_ssd_detection_by_id(v.pk)
        perform_face_detection_indexing_by_id(v.pk)
        inception_index_ssd_detection_by_id(v.pk)
    export_video_by_id(v.pk)
    v = handle_youtube_video("Paris",
                             "https://www.youtube.com/watch?v=zEAqJmS6ajk")
    extract_frames(v.pk)
    if not fast:
        inception_index_by_id(v.pk)
        perform_ssd_detection_by_id(v.pk)
        perform_face_detection_indexing_by_id(v.pk)
        inception_index_ssd_detection_by_id(v.pk)
    export_video_by_id(v.pk)
    local(
        'wget https://www.dropbox.com/s/g8dv5yeh9bmflec/lfw_funneled.zip?dl=1 -O lfw.zip'
    )
    f = SimpleUploadedFile("lfw.zip",
                           file("lfw.zip").read(),
                           content_type="application/zip")
    v = handle_uploaded_file(f, 'LFW subset')
    extract_frames(v.pk)
    if not fast:
        inception_index_by_id(v.pk)
        perform_face_detection_indexing_by_id(v.pk)
    export_video_by_id(v.pk)
示例#16
0
    signals.pre_save.connect(geoserver_pre_save_maplayer, sender=MapLayer)
    signals.post_save.connect(geoserver_post_save_map, sender=Map)
    signals.pre_save.connect(geoserver_pre_save, sender=Layer)
    signals.post_save.connect(geoserver_post_save, sender=Layer)


if 'geonode.geoserver' in settings.INSTALLED_APPS:
    disconnect_signals()

# This is used to populate the database with the search fixture data. This is
# primarily used as a first step to generate the json data for the fixture using
# django's dumpdata

imgfile = StringIO.StringIO('GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00'
                                '\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
f = SimpleUploadedFile('test_img_file.gif', imgfile.read(), 'image/gif')


def all_public():
    '''ensure all layers, maps and documents are publicly viewable'''
    for l in Layer.objects.all():
        l.set_default_permissions()
    for m in Map.objects.all():
        m.set_default_permissions()
    for d in Document.objects.all():
        d.set_default_permissions()


def create_fixtures():
    biota = TopicCategory.objects.get(identifier='biota')
    location = TopicCategory.objects.get(identifier='location')
示例#17
0
def product_image():
    img_data = BytesIO()
    image = Image.new('RGB', size=(1, 1))
    image.save(img_data, format='JPEG')
    return SimpleUploadedFile('product.jpg', img_data.getvalue())
示例#18
0
文件: test_admin.py 项目: saadow123/1
    def test_oauth2_provider_edit_icon_image(self):
        """
        Test that we can update an OAuth provider's icon image from the admin
        form.

        OAuth providers are updated using KeyedConfigurationModelAdmin, which
        updates models by adding a new instance that replaces the old one,
        instead of editing the old instance directly.

        Updating the icon image is tricky here because
        KeyedConfigurationModelAdmin copies data over from the previous
        version by injecting its attributes into request.GET, but the icon
        ends up in request.FILES. We need to ensure that the value is
        prepopulated correctly, and that we can clear and update the image.
        """
        # Login as a super user
        user = UserFactory.create(is_staff=True, is_superuser=True)
        user.save()
        self.client.login(username=user.username, password='******')

        # Get baseline provider count
        providers = OAuth2ProviderConfig.objects.all()
        pcount = len(providers)

        # Create a provider
        provider1 = self.configure_dummy_provider(
            enabled=True,
            icon_class='',
            icon_image=SimpleUploadedFile(
                'icon.svg', b'<svg><rect width="50" height="100"/></svg>'),
        )

        # Get the provider instance with active flag
        providers = OAuth2ProviderConfig.objects.all()
        self.assertEqual(len(providers), 1)
        self.assertEqual(providers[pcount].id, provider1.id)

        # Edit the provider via the admin edit link
        admin = OAuth2ProviderConfigAdmin(provider1, AdminSite())
        update_url = reverse('admin:{}_{}_add'.format(
            admin.model._meta.app_label, admin.model._meta.model_name))
        update_url += "?source={}".format(provider1.pk)

        # Remove the icon_image from the POST data, to simulate unchanged icon_image
        post_data = models.model_to_dict(provider1)
        del post_data['icon_image']
        # Remove max_session_length and organization. A default null value must be POSTed
        # back as an absent value, rather than as a "null-like" included value.
        del post_data['max_session_length']
        del post_data['organization']

        # Change the name, to verify POST
        post_data['name'] = 'Another name'

        # Post the edit form: expecting redirect
        response = self.client.post(update_url, post_data)
        self.assertEqual(response.status_code, 302)

        # Editing the existing provider creates a new provider instance
        providers = OAuth2ProviderConfig.objects.all()
        self.assertEqual(len(providers), pcount + 2)
        self.assertEqual(providers[pcount].id, provider1.id)
        provider2 = providers[pcount + 1]

        # Ensure the icon_image was preserved on the new provider instance
        self.assertEqual(provider2.icon_image, provider1.icon_image)
        self.assertEqual(provider2.name, post_data['name'])
示例#19
0
    def test_plugin_config_attachment(self, mock_get, mock_reload):
        with self.settings(PLUGINS_INSTALL_VIA_API=True,
                           PLUGINS_CONFIGURE_VIA_API=True):
            tmp_file_1 = SimpleUploadedFile(
                "foo-database-1.db",
                base64.b64decode(HELLO_WORLD_PLUGIN_GITHUB_ZIP[1]),
                content_type="application/octet-stream",
            )
            tmp_file_2 = SimpleUploadedFile(
                "foo-database-2.db",
                base64.b64decode(HELLO_WORLD_PLUGIN_GITHUB_ATTACHMENT_ZIP[1]),
                content_type="application/zip",
            )

            self.assertEqual(PluginAttachment.objects.count(), 0)
            response = self.client.post(
                "/api/plugin/",
                {
                    "url":
                    "https://github.com/PostHog/helloworldplugin/commit/{}".
                    format(HELLO_WORLD_PLUGIN_GITHUB_ATTACHMENT_ZIP[0])
                },
                format="multipart",
            )
            plugin_id = response.data["id"]  # type: ignore
            response = self.client.post(
                "/api/plugin_config/",
                {
                    "plugin": plugin_id,
                    "enabled": True,
                    "order": 0,
                    "config": json.dumps({"bar": "moop"}),
                    "add_attachment[foodb]": tmp_file_1,
                },
            )
            plugin_config_id = response.data["id"]  # type: ignore
            plugin_attachment_id = response.data["config"]["foodb"][
                "uid"]  # type: ignore

            response = self.client.get(
                "/api/plugin_config/{}".format(plugin_config_id))
            self.assertEqual(
                response.data["config"],  # type: ignore
                {
                    "bar": "moop",
                    "foodb": {
                        "uid": plugin_attachment_id,
                        "saved": True,
                        "size": 1964,
                        "name": "foo-database-1.db",
                        "type": "application/octet-stream",
                    },
                },
            )

            response = self.client.patch(
                "/api/plugin_config/{}".format(plugin_config_id),
                {"add_attachment[foodb]": tmp_file_2},
                format="multipart",
            )
            self.assertEqual(PluginAttachment.objects.count(), 1)

            self.assertEqual(
                response.data["config"],  # type: ignore
                {
                    "bar": "moop",
                    "foodb": {
                        "uid": plugin_attachment_id,
                        "saved": True,
                        "size": 2279,
                        "name": "foo-database-2.db",
                        "type": "application/zip",
                    },
                },
            )

            response = self.client.patch(
                "/api/plugin_config/{}".format(plugin_config_id),
                {"remove_attachment[foodb]": True},
                format="multipart")
            self.assertEqual(response.data["config"],
                             {"bar": "moop"})  # type: ignore
            self.assertEqual(PluginAttachment.objects.count(), 0)
示例#20
0
def get_uploaded_file(name):
    data = open(get_image_path(name), 'rb').read()
    return SimpleUploadedFile(name, data,
                              content_type=mimetypes.guess_type(name)[0])
示例#21
0
def object_from_message(message, queue, logger):
    # 'message' must be an RFC822 formatted message.
    message = email.message_from_string(message)

    subject = message.get('subject', _('Comment from e-mail'))
    subject = decode_mail_headers(decodeUnknown(message.get_charset(),
                                                subject))
    for affix in STRIPPED_SUBJECT_STRINGS:
        subject = subject.replace(affix, "")
    subject = subject.strip()

    sender = message.get('from', _('Unknown Sender'))
    sender = decode_mail_headers(decodeUnknown(message.get_charset(), sender))
    # to address bug #832, we wrap all the text in front of the email address in
    # double quotes by using replace() on the email string. Then,
    # take first item of list, second item of tuple is the actual email address.
    # Note that the replace won't work on just an email with no real name,
    # but the getaddresses() function seems to be able to handle just unclosed quotes
    # correctly. Not ideal, but this seems to work for now.
    sender_email = email.utils.getaddresses(
        ['\"' + sender.replace('<', '\" <')])[0][1]

    body_plain, body_html = '', ''

    cc = message.get_all('cc', None)
    if cc:
        # first, fixup the encoding if necessary
        cc = [
            decode_mail_headers(decodeUnknown(message.get_charset(), x))
            for x in cc
        ]
        # get_all checks if multiple CC headers, but individual emails may be comma separated too
        tempcc = []
        for hdr in cc:
            tempcc.extend(hdr.split(','))
        # use a set to ensure no duplicates
        cc = set([x.strip() for x in tempcc])

    for ignore in IgnoreEmail.objects.filter(
            Q(queues=queue) | Q(queues__isnull=True)):
        if ignore.test(sender_email):
            if ignore.keep_in_mailbox:
                # By returning 'False' the message will be kept in the mailbox,
                # and the 'True' will cause the message to be deleted.
                return False
            return True

    matchobj = re.match(r".*\[" + queue.slug + r"-(?P<id>\d+)\]", subject)
    if matchobj:
        # This is a reply or forward.
        ticket = matchobj.group('id')
        logger.info("Matched tracking ID %s-%s" % (queue.slug, ticket))
    else:
        logger.info("No tracking ID matched.")
        ticket = None

    body = None
    counter = 0
    files = []

    for part in message.walk():
        if part.get_content_maintype() == 'multipart':
            continue

        name = part.get_param("name")
        if name:
            name = email.utils.collapse_rfc2231_value(name)

        if part.get_content_maintype() == 'text' and name is None:
            if part.get_content_subtype() == 'plain':
                body = part.get_payload(decode=True)
                # https://github.com/django-helpdesk/django-helpdesk/issues/732
                if part['Content-Transfer-Encoding'] == '8bit' and part.get_content_charset(
                ) == 'utf-8':
                    body = body.decode('unicode_escape')
                body = decodeUnknown(part.get_content_charset(), body)
                body = EmailReplyParser.parse_reply(body)
                # workaround to get unicode text out rather than escaped text
                try:
                    body = body.encode('ascii').decode('unicode_escape')
                except UnicodeEncodeError:
                    body.encode('utf-8')
                logger.debug("Discovered plain text MIME part")
            else:
                try:
                    email_body = encoding.smart_text(
                        part.get_payload(decode=True))
                except UnicodeDecodeError:
                    email_body = encoding.smart_text(
                        part.get_payload(decode=False))

                payload = """
<html>
<head>
<meta charset="utf-8"/>
</head>
%s
</html>""" % email_body
                files.append(
                    SimpleUploadedFile(_("email_html_body.html"),
                                       payload.encode("utf-8"), 'text/html'))
                logger.debug("Discovered HTML MIME part")
        else:
            if not name:
                ext = mimetypes.guess_extension(part.get_content_type())
                name = "part-%i%s" % (counter, ext)
            payload = part.get_payload()
            if isinstance(payload, list):
                payload = payload.pop().as_string()
            payloadToWrite = payload
            # check version of python to ensure use of only the correct error type
            non_b64_err = TypeError
            try:
                logger.debug("Try to base64 decode the attachment payload")
                payloadToWrite = base64.decodebytes(payload)
            except non_b64_err:
                logger.debug("Payload was not base64 encoded, using raw bytes")
                payloadToWrite = payload
            files.append(
                SimpleUploadedFile(name, part.get_payload(decode=True),
                                   mimetypes.guess_type(name)[0]))
            logger.debug("Found MIME attachment %s" % name)

        counter += 1

    if not body:
        mail = BeautifulSoup(str(message), "html.parser")
        beautiful_body = mail.find('body')
        if beautiful_body:
            try:
                body = beautiful_body.text
            except AttributeError:
                pass
        if not body:
            body = ""

    smtp_priority = message.get('priority', '')
    smtp_importance = message.get('importance', '')
    high_priority_types = {'high', 'important', '1', 'urgent'}
    priority = 2 if high_priority_types & {smtp_priority, smtp_importance
                                           } else 3

    payload = {
        'body': body,
        'subject': subject,
        'queue': queue,
        'sender_email': sender_email,
        'priority': priority,
        'files': files,
    }

    return create_object_from_email_message(message,
                                            ticket,
                                            payload,
                                            files,
                                            logger=logger)
def webinar(cohort) -> Webinar:
    image = SimpleUploadedFile(name='renzo-nuccitelli.png', content=open(img_path, 'rb').read(),
                               content_type='image/png')
    return mommy.make(Webinar, cohort=cohort, image=image)
示例#23
0
 def get_members(self):
     return (SimpleUploadedFile(name=filename,
                                content=self.member_contents(filename))
             for filename in self.members())
示例#24
0
def create_photo_file():
    data = BytesIO()
    Image.new('RGB', (100, 100)).save(data, 'PNG')
    data.seek(0)
    return SimpleUploadedFile('photo.png', data.getvalue())
示例#25
0
    def get_seeds(self):
        """Return a new instance of SimpleUploadedFile. This file can only
        be used once."""

        return SimpleUploadedFile('ht.seeds', bytes('This is some content.\n'),
                                  'utf-8')
示例#26
0
 def test_tips_check_input_isoweek_valid(self):
     with open(self.file_path_nl, 'rb') as f:
         error_boo, msg = tips_utils.check_input(
             SimpleUploadedFile('Dyflexis_excel_export', f.read()), True,
             self.data['tips_amount'], self.data['overtips_amount'])
     self.assertFalse(error_boo)
示例#27
0
 def setUp(self):
     self.authorized_client = Client()
     self.authorized_client.force_login(NewPostFormTests.user)
     self.uploaded = SimpleUploadedFile(name='small.gif',
                                        content=NewPostFormTests.image,
                                        content_type='image/gif')
示例#28
0
def ci():
    """
    Used in conjunction with travis for Continuous Integration testing
    :return:
    """
    import django
    sys.path.append(os.path.dirname(__file__))
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dva.settings")
    django.setup()
    import base64
    from django.core.files.uploadedfile import SimpleUploadedFile
    from dvaapp.views import handle_uploaded_file, handle_youtube_video, create_query
    from dvaapp.models import Video, Clusters, IndexEntries
    from django.conf import settings
    from dvaapp.tasks import extract_frames, perform_face_indexing, inception_index_by_id, perform_ssd_detection_by_id,\
        perform_yolo_detection_by_id, inception_index_ssd_detection_by_id, export_video_by_id, import_video_by_id,\
        inception_query_by_image, perform_clustering
    for fname in glob.glob('tests/ci/*.mp4'):
        name = fname.split('/')[-1].split('.')[0]
        f = SimpleUploadedFile(fname,
                               file(fname).read(),
                               content_type="video/mp4")
        handle_uploaded_file(f, name, False)
    for fname in glob.glob('tests/*.zip'):
        name = fname.split('/')[-1].split('.')[0]
        f = SimpleUploadedFile(fname,
                               file(fname).read(),
                               content_type="application/zip")
        handle_uploaded_file(f, name)
    handle_youtube_video('tomorrow never dies',
                         'https://www.youtube.com/watch?v=gYtz5sw98Bc')
    for i, v in enumerate(Video.objects.all()):
        extract_frames(v.pk)
        inception_index_by_id(v.pk)
        if i == 0:  # save travis time by just running detection on first video
            perform_ssd_detection_by_id(v.pk)
            perform_yolo_detection_by_id(v.pk)
            perform_face_indexing(v.pk)
            inception_index_ssd_detection_by_id(v.pk)
        fname = export_video_by_id(v.pk)
        f = SimpleUploadedFile(fname,
                               file("{}/exports/{}".format(
                                   settings.MEDIA_ROOT, fname)).read(),
                               content_type="application/zip")
        vimported = handle_uploaded_file(f, fname)
        import_video_by_id(vimported.pk)
    dc = Clusters()
    dc.indexer_algorithm = 'inception'
    dc.included_index_entries_pk = [
        k.pk for k in IndexEntries.objects.all().filter(
            algorithm=dc.indexer_algorithm)
    ]
    dc.components = 32
    dc.save()
    perform_clustering(dc.pk)
    query, dv = create_query(
        10, False, [
            'inception',
        ], [], 'data:image/png;base64,' +
        base64.encodestring(file('tests/query.png').read()))
    inception_query_by_image(query.pk)
    query, dv = create_query(
        10, True, [
            'inception',
        ], [], 'data:image/png;base64,' +
        base64.encodestring(file('tests/query.png').read()))
    inception_query_by_image(query.pk)
    test_backup()
示例#29
0
    def test_wronghexdigest(self):
        uploadedfile = SimpleUploadedFile("file", b"filecontents")

        self.assertNotEquals(md5reader(uploadedfile),
                             md5(b"lerolero").hexdigest())
示例#30
0
 def setup(self):
     self.user = models.User.objects.create(username="******")
     img = SimpleUploadedFile(name="test.jpg", content=b"file data", content_type="image/jpeg")
     self.img = models.OfficerImg.objects.create(officer=self.user, img=img)
     self.img.save()