コード例 #1
0
    def test_to_xls_export_respects_custom_field_delimiter(self):
        survey = self._create_childrens_survey()
        export_builder = ExportBuilder()
        export_builder.GROUP_DELIMITER = ExportBuilder.GROUP_DELIMITER_DOT
        export_builder.set_survey(survey)
        xls_file = NamedTemporaryFile(suffix='.xls')
        filename = xls_file.name
        export_builder.to_xls_export(filename, self.data)
        xls_file.seek(0)
        wb = load_workbook(filename)

        # check header columns
        main_sheet = wb.get_sheet_by_name('childrens_survey')
        expected_column_headers = [
            u'name', u'age', u'geo.geolocation', u'geo._geolocation_latitude',
            u'geo._geolocation_longitude', u'geo._geolocation_altitude',
            u'geo._geolocation_precision', u'tel.tel.office',
            u'tel.tel.mobile', u'_id', u'meta.instanceID', u'_uuid',
            u'_submission_time', u'_index', u'_parent_index',
            u'_parent_table_name', u'_tags', '_notes', '_version',
            '_duration', '_submitted_by']
        column_headers = [c[0].value for c in main_sheet.columns]
        self.assertEqual(sorted(column_headers),
                         sorted(expected_column_headers))
        xls_file.close()
コード例 #2
0
    def test_child_record_parent_table_is_updated_when_sheet_is_renamed(self):
        survey = create_survey_from_xls(_logger_fixture_path(
            'childrens_survey_with_a_very_long_name.xls'))
        export_builder = ExportBuilder()
        export_builder.set_survey(survey)
        xls_file = NamedTemporaryFile(suffix='.xlsx')
        filename = xls_file.name
        export_builder.to_xls_export(filename, self.long_survey_data)
        xls_file.seek(0)
        wb = load_workbook(filename)

        # get the children's sheet
        ws1 = wb.get_sheet_by_name('childrens_survey_with_a_very_l1')

        # parent_table is in cell K2
        parent_table_name = ws1.cell('K2').value
        expected_parent_table_name = 'childrens_survey_with_a_very_lo'
        self.assertEqual(parent_table_name, expected_parent_table_name)

        # get cartoons sheet
        ws2 = wb.get_sheet_by_name('childrens_survey_with_a_very_l2')
        parent_table_name = ws2.cell('G2').value
        expected_parent_table_name = 'childrens_survey_with_a_very_l1'
        self.assertEqual(parent_table_name, expected_parent_table_name)
        xls_file.close()
コード例 #3
0
ファイル: alignment.py プロジェクト: jlhg/bdorseq
def multiple_dna(*args):
    """
    List of tuples: (seq_name, seq_frame, seq)
    """
    seq_name_lengths = []
    input_file = NamedTemporaryFile(prefix="mafft_")

    for arg in args:
        seq_name, seq_frame, seq = arg

        if seq_frame < 0:
            seq_name = "%s(%s)" % (seq_name, "-")
            seq = Seq(seq).reverse_complement().tostring()
        elif seq_frame > 0:
            seq_name = "%s(%s)" % (seq_name, "+")

        input_file.write(">%s\n%s\n" % (seq_name, seq.upper()))
        seq_name_lengths.append(len(seq_name))

    input_file.flush()

    namelength = max(seq_name_lengths) + 4

    mafft_cmd = (
        "mafft --genafpair --maxiterate 1000 --preservecase --clustalout --namelength "
        + str(namelength)
        + " "
        + input_file.name
    )
    mafft_proc = Popen(mafft_cmd, stdout=PIPE, stderr=PIPE, shell=True)

    stdout, stderr = mafft_proc.communicate()
    input_file.close()

    return stdout
コード例 #4
0
def savefile():
    # Create a local file
    f = NamedTemporaryFile()
    f.write('I am contents')
    new_file = FileManager.objects.create(name='test file')
    # Save local file to FileField in our model
    new_file.file.save('new file name.oeb', File(f))
    f.close()
コード例 #5
0
    def copy_oracle_base(self, queryset):
        fichier = NamedTemporaryFile(suffix='.json')
        data = serializers.serialize("json", queryset)
        fichier.writelines(data)
        fichier.flush()

        call_command('loaddata', fichier.name.__str__())
        fichier.close()
コード例 #6
0
 def copy_oracle_base(self, queryset, usings=['default']):
     fichier = NamedTemporaryFile(suffix='.json')
     data = serializers.serialize("json", queryset)
     fichier.writelines(data)
     fichier.flush()
     for using in usings:
         call_command('loaddata', fichier.name.__str__(), database=using)
     fichier.close()
コード例 #7
0
ファイル: views.py プロジェクト: sinyawskiy/ryabina
    def post(self, request):
        if 'qqfile' in request.GET:
            request_file_name = request.GET['qqfile']
            base, ext = os.path.splitext(request_file_name)
            if ext.upper() not in self.SUPPORTED_IMAGE_EXTS:
                result = { 'upload': False, 'filename': request_file_name, 'file_id': u'', 'error_message' : u'Тип файла не поддерживается' }
                return HttpResponse(json.dumps(result), content_type='text/html')

            create_dir(self.UPLOAD_DIR)

            destination = NamedTemporaryFile(delete=False, suffix=ext)
            temp_full_name = destination.name

            chunk = self.get_chunk(request)

            counted = 0
            while len(chunk) > 0:
                counted += len(chunk)
                if counted > self.MAX_FILE_SIZE:
                    destination.close()
                    os.remove(temp_full_name)
                    result = { 'upload': False, 'filename': request_file_name, 'file_id': u'', 'error_message' : u'Файл более 512КБ' }
                    return HttpResponse(json.dumps(result), content_type='text/html')

                destination.write(chunk)
                chunk = self.get_chunk(request)

            destination.close()
            file_name = '%s.jpeg' % id_generator()
            img = Image.open(temp_full_name)
            k = 1.0 * self.IMAGE_SIZE[1] / self.IMAGE_SIZE[0]
            h_new = int(k * img.size[0])
            if h_new != img.size[1]:
                if h_new < img.size[1]: # cut top and bottom
                    cut_top = int((img.size[1] - h_new) / 2.0)
                    cropped = img.crop((0, cut_top, img.size[0], cut_top + h_new))
                else:                   # cut left and right
                    w_new = int(img.size[1] / k)
                    cut_left = int((img.size[0] - w_new) / 2.0)
                    cropped = img.crop((cut_left, 0, cut_left + w_new, img.size[1]))
            else:
                cropped = img
            full_path = os.path.join(self.UPLOAD_DIR, file_name)
            finish_file = cropped.resize(self.IMAGE_SIZE, Image.ANTIALIAS)
            finish_file.convert('RGB').save(full_path, quality=100)

            os.unlink(temp_full_name)
            
            cp = CommentPhoto()
            cp.photo = '%s/%s' % (self.IMAGE_PATH, file_name)
            cp.save()
            
            result = { 'upload': True, 'filename': os.path.join(settings.MEDIA_URL, self.IMAGE_PATH, file_name), 'file_id': u'%s' % cp.temp, 'error_message' : u'' }
            # except Exception:
            #     result = { 'upload': False, 'filename': request_file_name, 'error_message' : u'Тип файла не поддерживается' }
            return HttpResponse(json.dumps(result), content_type='text/html')
        else:
            raise Http404()
コード例 #8
0
 def test_xls_convert_dates_before_1900(self):
     survey = create_survey_from_xls(viewer_fixture_path("test_data_types/test_data_types.xls"))
     export_builder = ExportBuilder()
     export_builder.set_survey(survey)
     data = [{"name": "Abe", "when": "1899-07-03"}]
     # create export file
     temp_xls_file = NamedTemporaryFile(suffix=".xlsx")
     export_builder.to_xls_export(temp_xls_file.name, data)
     temp_xls_file.close()
コード例 #9
0
ファイル: forms.py プロジェクト: reichert/trustsign-portal
 def converte_p7_pem(self, certificado):
     file_in = NamedTemporaryFile(delete=False)
     file_in.write(certificado)
     path_in = file_in.name
     file_in.close()
     # Como a biblioteca PyOpenSSL não trata pkcs7, é usado a própria openssl do linux.
     cert = self._run("openssl pkcs7 -print_certs -in %s" % path_in)
     os.remove(path_in)
     return cert
コード例 #10
0
ファイル: export_tools.py プロジェクト: KeithDoyle/formhub
def generate_export(export_type, extension, username, id_string,
                    export_id = None, filter_query=None,
                    group_delimiter='/',
                    split_select_multiples=True):
    """
    Create appropriate export object given the export type
    """
    from odk_viewer.models import Export
    xform = XForm.objects.get(user__username=username, id_string=id_string)

    df_builder = _df_builder_for_export_type(
        export_type, username, id_string, group_delimiter,
        split_select_multiples, filter_query)
    if hasattr(df_builder, 'get_exceeds_xls_limits')\
            and df_builder.get_exceeds_xls_limits():
        extension = 'xlsx'

    temp_file = NamedTemporaryFile(suffix=("." + extension))
    df_builder.export_to(temp_file.name)
    basename = "%s_%s" % (id_string,
                             datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
    filename = basename + "." + extension

    # check filename is unique
    while not Export.is_filename_unique(xform, filename):
        filename = increment_index_in_filename(filename)

    file_path = os.path.join(
        username,
        'exports',
        id_string,
        export_type,
        filename)

    # TODO: if s3 storage, make private - how will we protect local storage??
    storage = get_storage_class()()
    # seek to the beginning as required by storage classes
    temp_file.seek(0)
    export_filename = storage.save(
        file_path,
        File(temp_file, file_path))
    temp_file.close()

    dir_name, basename = os.path.split(export_filename)

    # get or create export object
    if(export_id):
        export = Export.objects.get(id=export_id)
    else:
        export = Export(xform=xform, export_type=export_type)
    export.filedir = dir_name
    export.filename = basename
    export.internal_status = Export.SUCCESSFUL
    # dont persist exports that have a filter
    if filter_query == None:
        export.save()
    return export
コード例 #11
0
ファイル: models.py プロジェクト: kabirh/riotvine
    def _create_resized_images(self, raw_field, save):
        """Generate scaled down images for avatars."""
        if not self.avatar_image:
            return None
        # Derive base filename (strip out the relative directory).
        filename = os.path.split(self.avatar_image.name)[-1]
        ctype = guess_type(filename)[0]
        ext = os.path.splitext(filename)[1]
        if not ext:
            ext = '.jpg'

        t = None
        try:
            try:
                pth = self.avatar_image.path
            except NotImplementedError:
                from django.core.files.temp import NamedTemporaryFile
                t = NamedTemporaryFile(suffix=ext)
                ix = self.avatar_image
                for d in ix.chunks(4000000):
                    t.write(d)
                t.flush()
                t.seek(0)
                pth = t

            # Generate avatar.
            remove_model_image(self, 'avatar')
            self.avatar = None
            avatar_contents = resize_in_memory(pth, settings.AVATAR_IMAGE_CROP, crop=settings.AVATAR_IMAGE_CROP, crop_before_resize=True)
            if avatar_contents:
                avatar_file = str_to_file(avatar_contents)
                avatar_field = InMemoryUploadedFile(avatar_file, None, None, ctype, len(avatar_contents), None)
                self.avatar.save(name='avatar-%s' % filename, content=avatar_field, save=save)
                avatar_file.close()

            # Generate medium-sized avatar.
            remove_model_image(self, 'avatar_medium')
            self.avatar_medium = None
            if t:
                t.seek(0)
            avatar_contents = resize_in_memory(pth, settings.AVATAR_MEDIUM_IMAGE_CROP, crop=settings.AVATAR_MEDIUM_IMAGE_CROP, crop_before_resize=True)
            if avatar_contents:
                avatar_file = str_to_file(avatar_contents)
                avatar_field = InMemoryUploadedFile(avatar_file, None, None, ctype, len(avatar_contents), None)
                self.avatar_medium.save(name='avatar-med-%s' % filename, content=avatar_field, save=save)
                avatar_file.close()

            if t:
                t.close()
            if save:
                super(UserProfile, self).save()
        except Exception:
            raise
        finally:
            if t:
                t.close()
コード例 #12
0
ファイル: views.py プロジェクト: Paddock/paddock-registration
def car_avatar(request, car_id):
    """Handles a POST or PUT to a car for an avatar file upload, returns JSON"""

    car = Car.objects.get(pk=car_id)

    if car.user_profile.user != request.user: 
        return HttpResponse(mimetype="application/json",
                            status=403)

    if request.FILES == None:
            msg = "No Files uploaded!"
            return HttpResponse(content=json.dumps({'msg': msg}),
                                mimetype="application/json",
                                status=415)

    #getting file data for farther manipulations
    data = {}
    if not request.FILES: 
        #clear out the images
        car.avatar = None
        car.thumb = None
        car.save()
        data["msg"] = "Upload successful"
        data['avatar'] = None
        data['thumb'] = None
    else:    
        uploaded = request.FILES['file']

        avatar_img = Image.open(uploaded)

        avatar_img.thumbnail((400, 400), Image.ANTIALIAS)
        avatar_file = NamedTemporaryFile()
        avatar_img.save(avatar_file, 'JPEG')
        name = '%d_avatar.jpg'%car.pk
        if os.path.exists(name): 
            os.remove(name)
        car.avatar.save(name, File(avatar_file))
        avatar_file.close()

        uploaded.seek(0)
        thumb_img = Image.open(uploaded)
        thumb_img.thumbnail((100, 100), Image.ANTIALIAS)
        thumb_file = NamedTemporaryFile()
        thumb_img.save(thumb_file, 'JPEG')
        name = '%d_thumb.jpg'%car.pk
        if os.path.exists(name): 
            os.remove(name)
        car.thumb.save(name, File(thumb_file))
        thumb_file.close()

        uploaded.close()
        car.save()
        data["msg"] = "Upload successful"
        data['avatar'] = car.avatar.url
        data['thumb'] = car.thumb.url
    return HttpResponse(json.dumps(data), mimetype='application/json')
コード例 #13
0
ファイル: models.py プロジェクト: nim65s/django-cineclub
 def save(self, *args, **kwargs):
     if self.pk is None or Film.objects.get(pk=self.pk).imdb_poster_url != self.imdb_poster_url:
         img = requests.get(self.imdb_poster_url)
         if img.status_code == requests.codes.ok:
             img_temp = NamedTemporaryFile(delete=True)
             img_temp.write(img.content)
             img_temp.flush()
             self.imdb_poster.save(self.slug, File(img_temp), save=False)
             img_temp.close()
     super().save(*args, **kwargs)
コード例 #14
0
ファイル: sync.py プロジェクト: vinitkumar/aldryndemo
def _sync_changed_files(sync_key, last_commit_hash, sync_url, project_dir):
    if not os.path.exists(COMMIT_CACHE_FILEPATH):
        open(COMMIT_CACHE_FILEPATH, 'w').close()
    commit_cache_file = open(COMMIT_CACHE_FILEPATH, 'r+')
    fd = commit_cache_file.fileno()
    temp_file = None
    try:
        fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError as e:
        print "other process is already running the sync:\n%s" % repr(e)
    else:
        last_commit_hash_from_cache = commit_cache_file.read()
        if last_commit_hash_from_cache:
            last_commit_hash = last_commit_hash_from_cache
        temp_file = NamedTemporaryFile(prefix='sync_changed_files', suffix='.tar.gz')
        signer = URLSafeTimedSerializer(sync_key)
        signed_data = signer.dumps(last_commit_hash)
        data = {'last_commit_hash': signed_data}
        response = requests.post(sync_url, data=data, stream=True)
        if response.ok:
            data_signature = hmac.new(key=str(sync_key), digestmod=hashlib.sha1)
            for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
                data_signature.update(chunk)
                temp_file.write(chunk)
            temp_file.seek(0)
            data_signature = data_signature.hexdigest()
            header_signature = response.headers.get('aldryn-sync-signature')
            if not constant_time_compare(header_signature, data_signature):
                # TODO log failed attempt to corrupt the website's data
                raise RuntimeError(
                    'Sync signatures does not match:\ndata:\t%s\nheader:\t%s' %
                    (data_signature, header_signature))
            tarball = tarfile.open(mode='r:gz', fileobj=temp_file)
            for member in tarball.getmembers():
                path = member.name
                if path.startswith(('static/', 'templates/')):
                    full_path = os.path.join(project_dir, path)
                    directory = os.path.dirname(full_path)
                    if not os.path.exists(directory):
                        os.makedirs(directory)
                    tarball.extract(member, project_dir)
            tarball.close()
            # Successfully synced files, storing the newest commit hash
            current_commit_hash = response.headers.get(
                'aldryn-sync-current-commit', last_commit_hash)
            commit_cache_file.seek(0)
            commit_cache_file.truncate()
            commit_cache_file.write(current_commit_hash)
        else:
            response.raise_for_status()
    finally:
        commit_cache_file.close()
        if temp_file:
            temp_file.close()
コード例 #15
0
ファイル: import_blog.py プロジェクト: okfde/fragdenstaat_de
 def get_image(self, image_url):
     resp = requests.get(image_url)
     if resp.status_code != 200:
         print('Warning: %s does not exist' % image_url)
         return None
     img_tmp = NamedTemporaryFile(delete=False)
     img_tmp.write(resp.content)
     img_tmp.flush()
     img_tmp.close()
     filename = os.path.basename(image_url)
     return File(open(img_tmp.name, 'rb'), name=filename)
コード例 #16
0
 def test_zipped_csv_export_works_with_unicode(self):
     """
     cvs writer doesnt handle unicode we we have to encode to ascii
     """
     survey = create_survey_from_xls(_logger_fixture_path(
         'childrens_survey_unicode.xls'))
     export_builder = ExportBuilder()
     export_builder.set_survey(survey)
     temp_zip_file = NamedTemporaryFile(suffix='.zip')
     export_builder.to_zipped_csv(temp_zip_file.name, self.data_utf8)
     temp_zip_file.seek(0)
     temp_dir = tempfile.mkdtemp()
     zip_file = zipfile.ZipFile(temp_zip_file.name, "r")
     zip_file.extractall(temp_dir)
     zip_file.close()
     temp_zip_file.close()
     # check that the children's file (which has the unicode header) exists
     self.assertTrue(
         os.path.exists(
             os.path.join(temp_dir, "children.info.csv")))
     # check file's contents
     with open(os.path.join(temp_dir, "children.info.csv")) as csv_file:
         reader = csv.reader(csv_file)
         expected_headers = ['children.info/name.first',
                             'children.info/age',
                             'children.info/fav_colors',
                             u'children.info/fav_colors/red\u2019s',
                             u'children.info/fav_colors/blue\u2019s',
                             u'children.info/fav_colors/pink\u2019s',
                             'children.info/ice_creams',
                             'children.info/ice_creams/vanilla',
                             'children.info/ice_creams/strawberry',
                             'children.info/ice_creams/chocolate', '_id',
                             '_uuid', '_submission_time', '_index',
                             '_parent_table_name', '_parent_index',
                             u'_tags', '_notes', '_version',
                             '_duration', '_submitted_by']
         rows = [row for row in reader]
         actual_headers = [h.decode('utf-8') for h in rows[0]]
         self.assertEqual(sorted(actual_headers), sorted(expected_headers))
         data = dict(zip(rows[0], rows[1]))
         self.assertEqual(
             data[u'children.info/fav_colors/red\u2019s'.encode('utf-8')],
             'True')
         self.assertEqual(
             data[u'children.info/fav_colors/blue\u2019s'.encode('utf-8')],
             'True')
         self.assertEqual(
             data[u'children.info/fav_colors/pink\u2019s'.encode('utf-8')],
             'False')
         # check that red and blue are set to true
     shutil.rmtree(temp_dir)
コード例 #17
0
ファイル: tests.py プロジェクト: AlfioEmanueleFresta/jorvik
    def test_zip(self):

        p = Persona(
            nome="Mario",
            cognome="Rossi",
            codice_fiscale="FRSSAKJSIKAJD2",
            data_nascita="1994-2-5"
        )
        p.save()

        z = Zip(oggetto=p)

        # Crea file 1
        f1 = NamedTemporaryFile(delete=False, mode='wt')
        f1.write(self.CONTENUTO_1)
        f1.close()

        # Crea file 2
        f2 = NamedTemporaryFile(delete=False, mode='wt')
        f2.write(self.CONTENUTO_2)
        f2.close()

        # Genera ZIP file
        z.aggiungi_file(f1.name, self.NOME_1)
        z.aggiungi_file(f2.name, self.NOME_2)
        z.comprimi_e_salva(nome='TestZip.zip')

        with ZipFile(z.file.path, 'r') as zip:

            self.assertIsNone(
                zip.testzip(),
                msg="Il file Zip non e' corrotto"
            )

            r1 = zip.open(self.NOME_1)
            self.assertTrue(
                r1.read().decode() == self.CONTENUTO_1,
                msg="Il contenuto del primo file coincide"
            )

            r2 = zip.open(self.NOME_2)
            self.assertTrue(
                r2.read().decode() == self.CONTENUTO_2,
                msg="Il contenuto del secondo file coincide"
            )

            zip.close()

        self.assertTrue(
            p.allegati.all(),
            msg="Allegato associato correttamente alla persona"
        )
コード例 #18
0
ファイル: utils.py プロジェクト: kabirh/riotvine
def create_photo_versions(sender, instance, **kwargs):
    """Create `PhotoVersion`` objects for the photo object defined by `instance`.

    A version is created for a bounding box defined by each PhotoSize instance.

    """    
    from photo.models import Photo, PhotoSize, PhotoVersion
    photo = instance
    ext = '.jpg'
    t = None
    try:
        pth = photo.image.path
    except NotImplementedError:
        from django.core.files.temp import NamedTemporaryFile
        t = NamedTemporaryFile(suffix=ext)
        ix = photo.image
        if ix.closed:
            # Reload from DB
            photo = Photo.objects.get(pk=photo.pk)
            ix = photo.image
        for d in ix.chunks(4000000):
            t.write(d)
        t.flush()
        t.seek(0)
        pth = t
    for size in PhotoSize.objects.all():
        # Create a suitable filename.
        filename = '%s-%s-%s%s' % (photo.pk, uuid4().hex[::7], slugify(size.name)[:10], ext)
        ctype = guess_type(filename)[0]
        temp_file = TemporaryUploadedFile(name=filename, content_type=ctype, size=0, charset=None)
        if t:
            t.seek(0)
        try:
            version = PhotoVersion.objects.get(photo=photo, size=size)
            remove_model_image(version, 'image')
            version.image = None
        except PhotoVersion.DoesNotExist:
            version = PhotoVersion(photo=photo, size=size)
        if size.do_crop:
            resize_to, crop_box, input_image = get_perfect_fit_resize_crop(size.bounding_box, (photo.width, photo.height))
        else:
            resize_to = size.bounding_box
            crop_box = None
        # Resize to a temporary location.
        resize(pth, resize_to, out_file_path=temp_file, crop=crop_box)
        # Save resized copy to `version` instance.
        temp_file.seek(0) # Prepare file for a re-read.
        version.image.save(name=filename, content=temp_file, save=True)
        temp_file.close()
    if t:
        t.close()
コード例 #19
0
ファイル: image.py プロジェクト: ZhouYunan/moto-moe
    def save(self, r):
        if isinstance(r,ResumableFile):
            file_obj =  NamedTemporaryFile(delete=True)
            for data in r.chunks():
                file_obj.write(data)
        else: #temp file
            file_obj = r
        try:
            img = Image.open(file_obj)
        except Exception as e:
            pass
        if (IMAGE_SIZE.size and (img.size[0] > IMAGE_SIZE.size['width'] or
                            img.size[1] > IMAGE_SIZE.size['height'])):
            size = IMAGE_SIZE.size

            if size['force']:
                img = ImageOps.fit(img,
                                   (size['width'],
                                    size['height']),
                                   Image.ANTIALIAS)
            else:
                img.thumbnail((size['width'],
                               size['height']),
                              Image.ANTIALIAS)
            try:
                img.save(self.large_path)
            except FileNotFoundError:
                os.makedirs(os.path.dirname(self.large_path),755)
                img.save(self.large_path)
        else:
             try:
                img.save(self.large_path)
             except FileNotFoundError:
                os.makedirs(os.path.dirname(self.large_path),755)
                img.save(self.large_path)
        if IMAGE_SIZE.thumbnail:
            size = IMAGE_SIZE.thumbnail

            if size['force']:
                thumbnail = ImageOps.fit(img, (size['width'], size['height']), Image.ANTIALIAS)
            else:
                thumbnail = img.copy()
                thumbnail.thumbnail((size['width'],
                                     size['height']),
                                    Image.ANTIALIAS)
            try:
                thumbnail.save(self.thumbnail_path)
            except FileNotFoundError:
                os.makedirs(os.path.dirname(self.thumbnail_path))
                thumbnail.save(self.thumbnail_path)
        file_obj.close()
コード例 #20
0
ファイル: client.py プロジェクト: ArcTanSusan/django
    def runshell_db(cls, conn_params):
        args = [cls.executable_name]

        host = conn_params.get('host', '')
        port = conn_params.get('port', '')
        dbname = conn_params.get('database', '')
        user = conn_params.get('user', '')
        passwd = conn_params.get('password', '')

        if user:
            args += ['-U', user]
        if host:
            args += ['-h', host]
        if port:
            args += ['-p', str(port)]
        args += [dbname]

        temp_pgpass = None
        sigint_handler = signal.getsignal(signal.SIGINT)
        try:
            if passwd:
                # Create temporary .pgpass file.
                temp_pgpass = NamedTemporaryFile(mode='w+')
                try:
                    print(
                        _escape_pgpass(host) or '*',
                        str(port) or '*',
                        _escape_pgpass(dbname) or '*',
                        _escape_pgpass(user) or '*',
                        _escape_pgpass(passwd),
                        file=temp_pgpass,
                        sep=':',
                        flush=True,
                    )
                    os.environ['PGPASSFILE'] = temp_pgpass.name
                except UnicodeEncodeError:
                    # If the current locale can't encode the data, let the
                    # user input the password manually.
                    pass
            # Allow SIGINT to pass to psql to abort queries.
            signal.signal(signal.SIGINT, signal.SIG_IGN)
            subprocess.check_call(args)
        finally:
            # Restore the original SIGINT handler.
            signal.signal(signal.SIGINT, sigint_handler)
            if temp_pgpass:
                temp_pgpass.close()
                if 'PGPASSFILE' in os.environ:  # unit tests need cleanup
                    del os.environ['PGPASSFILE']
コード例 #21
0
    def test_to_sav_export(self):
        survey = self._create_childrens_survey()
        export_builder = ExportBuilder()
        export_builder.set_survey(survey)
        temp_zip_file = NamedTemporaryFile(suffix='.zip')
        filename = temp_zip_file.name
        export_builder.to_zipped_sav(filename, self.data)
        temp_zip_file.seek(0)
        temp_dir = tempfile.mkdtemp()
        zip_file = zipfile.ZipFile(temp_zip_file.name, "r")
        zip_file.extractall(temp_dir)
        zip_file.close()
        temp_zip_file.close()

        # generate data to compare with
        index = 1
        indices = {}
        survey_name = survey.name
        outputs = []
        for d in self.data:
            outputs.append(
                dict_to_joined_export(d, index, indices, survey_name))
            index += 1

        # check that each file exists
        self.assertTrue(
            os.path.exists(
                os.path.join(temp_dir, "{0}.sav".format(survey.name))))

        def _test_sav_file(section):
            with SavReader(
                    os.path.join(
                        temp_dir, "{0}.sav".format(section)),
                    returnHeader=True) as reader:
                header = next(reader)
                rows = [r for r in reader]

                # open comparison file
                with SavReader(_logger_fixture_path(
                        'spss', "{0}.sav".format(section)),
                        returnHeader=True) as fixture_reader:
                    fixture_header = next(fixture_reader)
                    self.assertEqual(header, fixture_header)
                    expected_rows = [r for r in fixture_reader]
                    self.assertEqual(rows, expected_rows)

        for section in export_builder.sections:
            section_name = section['name'].replace('/', '_')
            _test_sav_file(section_name)
コード例 #22
0
 def test_xls_convert_dates_before_1900(self):
     survey = create_survey_from_xls(viewer_fixture_path(
         'test_data_types/test_data_types.xls'))
     export_builder = ExportBuilder()
     export_builder.set_survey(survey)
     data = [
         {
             'name': 'Abe',
             'when': '1899-07-03',
         }
     ]
     # create export file
     temp_xls_file = NamedTemporaryFile(suffix='.xlsx')
     export_builder.to_xls_export(temp_xls_file.name, data)
     temp_xls_file.close()
コード例 #23
0
 def test_xls_export_works_with_unicode(self):
     survey = create_survey_from_xls(_logger_fixture_path("childrens_survey_unicode.xls"))
     export_builder = ExportBuilder()
     export_builder.set_survey(survey)
     temp_xls_file = NamedTemporaryFile(suffix=".xlsx")
     export_builder.to_xls_export(temp_xls_file.name, self.data_utf8)
     temp_xls_file.seek(0)
     # check that values for red\u2019s and blue\u2019s are set to true
     wb = load_workbook(temp_xls_file.name)
     children_sheet = wb.get_sheet_by_name("children.info")
     data = dict([(r[0].value, r[1].value) for r in children_sheet.columns])
     self.assertTrue(data[u"children.info/fav_colors/red\u2019s"])
     self.assertTrue(data[u"children.info/fav_colors/blue\u2019s"])
     self.assertFalse(data[u"children.info/fav_colors/pink\u2019s"])
     temp_xls_file.close()
コード例 #24
0
ファイル: views.py プロジェクト: NilsJPWerner/Sublet-Uchicago
def ajax_fb_photo(request):
    if request.method == "POST":
        try:
            fb = SocialAccount.objects.get(user=request.user, provider="facebook")
            url = fb.get_avatar_url()
            img_temp = NamedTemporaryFile(delete=True)
            img_temp.write(urllib2.urlopen(url).read())
            img_temp.flush()
            file_name = "profile_picture_" + str(request.user.id)
            request.user.extendeduser.profile_picture.save(file_name, File(img_temp))
            img_temp.close()
            return HttpResponse(request.user.extendeduser.profile_picture.url)
        except:
            return HttpResponse("not_logged_in")
    else:
        return HttpResponseBadRequest
コード例 #25
0
ファイル: export_tools.py プロジェクト: onaio/onadata
def write_temp_file_to_path(suffix, content, file_path):
    """ Write a temp file and return the name of the file.
    :param suffix: The file suffix
    :param content: The content to write
    :param file_path: The path to write the temp file to
    :return: The filename written to
    """
    temp_file = NamedTemporaryFile(suffix=suffix)
    temp_file.write(content)
    temp_file.seek(0)
    export_filename = default_storage.save(
        file_path,
        File(temp_file, file_path))
    temp_file.close()

    return export_filename
コード例 #26
0
ファイル: models.py プロジェクト: nim65s/django-cineclub
 def save(self, *args, **kwargs):
     # Check if we need to update the slug
     update = self.pk is None
     if not update:
         orig = Film.objects.get(pk=self.pk)
         if orig.slug != self.slug or orig.imdb_poster_url != self.imdb_poster_url:
             update = True
     if update:
         self.slug = slugify(self.titre)[:48]
         img = requests.get(self.imdb_poster_url)
         if img.status_code == requests.codes.ok:
             img_temp = NamedTemporaryFile(delete=True)
             img_temp.write(img.content)
             img_temp.flush()
             self.imdb_poster.save(self.slug, File(img_temp), save=False)
             img_temp.close()
     super(Film, self).save(*args, **kwargs)
コード例 #27
0
ファイル: importstrips.py プロジェクト: fish2000/achewood
def AWGetTemporaryFileForURL(url, **kwargs):
	if str(url).startswith('http'):
		suffix = "gif"
		if 'suffix' in kwargs:
			suffix = kwargs['suffix']
			del kwargs['suffix']
		
		itemp = NamedTemporaryFile(suffix=(".%s" % suffix), **kwargs)
		
		try:
			itemp.write(urllib2.urlopen(url).read())
		except urllib2.URLError, urlerr:
			itemp.close()
			itemp = None
		else:
			itemp.flush()
		return itemp
コード例 #28
0
def generate_osm_export(
        export_type, extension, username, id_string, export_id=None,
        filter_query=None):
    # TODO resolve circular import
    from onadata.apps.viewer.models.export import Export
    xform = XForm.objects.get(user__username=username, id_string=id_string)
    attachments = Attachment.objects.filter(
        extension=Attachment.OSM,
        instance__xform=xform
    )
    content = get_combined_osm([a.media_file for a in attachments])

    basename = "%s_%s" % (id_string,
                          datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
    filename = basename + "." + extension
    file_path = os.path.join(
        username,
        'exports',
        id_string,
        export_type,
        filename)

    storage = get_storage_class()()
    temp_file = NamedTemporaryFile(suffix=extension)
    temp_file.write(content)
    temp_file.seek(0)
    export_filename = storage.save(
        file_path,
        File(temp_file, file_path))
    temp_file.close()

    dir_name, basename = os.path.split(export_filename)

    # get or create export object
    if(export_id):
        export = Export.objects.get(id=export_id)
    else:
        export = Export.objects.create(xform=xform, export_type=export_type)

    export.filedir = dir_name
    export.filename = basename
    export.internal_status = Export.SUCCESSFUL
    export.save()

    return export
コード例 #29
0
 def test_to_xls_export_generates_valid_sheet_names(self):
     survey = create_survey_from_xls(_logger_fixture_path(
         'childrens_survey_with_a_very_long_name.xls'))
     export_builder = ExportBuilder()
     export_builder.set_survey(survey)
     xls_file = NamedTemporaryFile(suffix='.xls')
     filename = xls_file.name
     export_builder.to_xls_export(filename, self.data)
     xls_file.seek(0)
     wb = load_workbook(filename)
     # check that we have childrens_survey, children, children_cartoons
     # and children_cartoons_characters sheets
     expected_sheet_names = ['childrens_survey_with_a_very_lo',
                             'childrens_survey_with_a_very_l1',
                             'childrens_survey_with_a_very_l2',
                             'childrens_survey_with_a_very_l3']
     self.assertEqual(wb.get_sheet_names(), expected_sheet_names)
     xls_file.close()
コード例 #30
0
def generate_kml_export(
        export_type, extension, username, id_string, export_id = None,
        filter_query=None):
    from odk_viewer.models import Export

    user = User.objects.get(username=username)
    xform = XForm.objects.get(user__username=username, id_string=id_string)
    response = render_to_response(
        'survey.kml', {'data': kml_export_data(id_string, user)})

    basename = "%s_%s" % (id_string,
                             datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
    filename = basename + "." + extension
    file_path = os.path.join(
        username,
        'exports',
        id_string,
        export_type,
        filename)

    storage = get_storage_class()()
    temp_file = NamedTemporaryFile(suffix=extension)
    temp_file.write(response.content)
    temp_file.seek(0)
    export_filename = storage.save(
        file_path,
        File(temp_file, file_path))
    temp_file.close()

    dir_name, basename = os.path.split(export_filename)

    # get or create export object
    if(export_id):
        export = Export.objects.get(id=export_id)
    else:
        export = Export.objects.create(xform=xform,
            export_type=export_type)

    export.filedir = dir_name
    export.filename = basename
    export.internal_status = Export.SUCCESSFUL
    export.save()

    return export
コード例 #31
0
    def runshell_db(cls, conn_params):
        args = [cls.executable_name]

        host = conn_params.get('host', '')
        port = conn_params.get('port', '')
        dbname = conn_params.get('database', '')
        user = conn_params.get('user', '')
        passwd = conn_params.get('password', '')

        if user:
            args += ['-U', user]
        if host:
            args += ['-h', host]
        if port:
            args += ['-p', str(port)]
        args += [dbname]

        temp_pgpass = None
        try:
            if passwd:
                # Create temporary .pgpass file.
                temp_pgpass = NamedTemporaryFile(mode='w+')
                try:
                    print(
                        _escape_pgpass(host) or '*',
                        str(port) or '*',
                        _escape_pgpass(dbname) or '*',
                        _escape_pgpass(user) or '*',
                        _escape_pgpass(passwd),
                        file=temp_pgpass,
                        sep=':',
                        flush=True,
                    )
                    os.environ['PGPASSFILE'] = temp_pgpass.name
                except UnicodeEncodeError:
                    # If the current locale can't encode the data, we let
                    # the user input the password manually.
                    pass
            subprocess.check_call(args)
        finally:
            if temp_pgpass:
                temp_pgpass.close()
                if 'PGPASSFILE' in os.environ:  # unit tests need cleanup
                    del os.environ['PGPASSFILE']
コード例 #32
0
ファイル: client.py プロジェクト: yephper/django
    def runshell_db(cls, settings_dict):
        args = [cls.executable_name]

        host = settings_dict.get('HOST', '')
        port = settings_dict.get('PORT', '')
        name = settings_dict.get('NAME', '')
        user = settings_dict.get('USER', '')
        passwd = settings_dict.get('PASSWORD', '')

        if user:
            args += ['-U', user]
        if host:
            args += ['-h', host]
        if port:
            args += ['-p', str(port)]
        args += [name]

        temp_pgpass = None
        try:
            if passwd:
                # Create temporary .pgpass file.
                temp_pgpass = NamedTemporaryFile(mode='w+')
                try:
                    print_(
                        _escape_pgpass(host) or '*',
                        str(port) or '*',
                        _escape_pgpass(name) or '*',
                        _escape_pgpass(user) or '*',
                        _escape_pgpass(passwd),
                        file=temp_pgpass,
                        sep=':',
                        flush=True,
                    )
                    os.environ['PGPASSFILE'] = temp_pgpass.name
                except UnicodeEncodeError:
                    # If the current locale can't encode the data, we let
                    # the user input the password manually.
                    pass
            subprocess.call(args)
        finally:
            if temp_pgpass:
                temp_pgpass.close()
                if 'PGPASSFILE' in os.environ:  # unit tests need cleanup
                    del os.environ['PGPASSFILE']
コード例 #33
0
ファイル: export_tools.py プロジェクト: Mbosco/formhub
def generate_kml_export(
        export_type, extension, username, id_string, export_id = None,
        filter_query=None):
    from odk_viewer.models import Export

    user = User.objects.get(username=username)
    xform = XForm.objects.get(user__username=username, id_string=id_string)
    response = render_to_response(
        'survey.kml', {'data': kml_export_data(id_string, user)})

    basename = "%s_%s" % (id_string,
                             datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
    filename = basename + "." + extension
    file_path = os.path.join(
        username,
        'exports',
        id_string,
        export_type,
        filename)

    storage = get_storage_class()()
    temp_file = NamedTemporaryFile(suffix=extension)
    temp_file.write(response.content)
    temp_file.seek(0)
    export_filename = storage.save(
        file_path,
        File(temp_file, file_path))
    temp_file.close()

    dir_name, basename = os.path.split(export_filename)

    # get or create export object
    if(export_id):
        export = Export.objects.get(id=export_id)
    else:
        export = Export.objects.create(xform=xform,
            export_type=export_type)

    export.filedir = dir_name
    export.filename = basename
    export.internal_status = Export.SUCCESSFUL
    export.save()

    return export
コード例 #34
0
ファイル: models.py プロジェクト: bouttier/django-cine
    def save(self, *args, **kwargs):
        update = self.pk is None
        if not update:
            orig = Film.objects.get(pk=self.pk)
            if orig.slug != self.slug or orig.imdb_poster_url != self.imdb_poster_url:
                update = True
        if update:
            self.slug = slugify(self.titre)
            img = requests.get(self.imdb_poster_url)
            if img.status_code == requests.codes.ok:
                img_temp = NamedTemporaryFile(delete=True)
                img_temp.write(img.content)
                img_temp.flush()
                self.imdb_poster.save(self.slug, File(img_temp), save=False)
                img_temp.close()
        super(Film, self).save(*args, **kwargs)

        if update:
            self.nouveau()
コード例 #35
0
ファイル: views.py プロジェクト: meetrilok/bottleDetect
def classify_api(request):
    data = {"success": False}
    TotalData = None

    if request.method == "POST":
        tmp_f = NamedTemporaryFile()

        if request.FILES.get("image", None) is not None:
            image_request = request.FILES["image"]
            image_bytes = image_request.read()
            image = Image.open(io.BytesIO(image_bytes))
            image.save(tmp_f, image.format)
        elif request.POST.get("image64", None) is not None:
            base64_data = request.POST.get("image64", None).split(',', 1)[1]
            plain_data = b64decode(base64_data)
            tmp_f.write(plain_data)
            try:

                im = Image.open(BytesIO(plain_data))

                im.save('bottleImage1.jpg', 'JPEG')

                #print(checkBlueColor(im))
            except Exception as ex:
                template = "An exception of type {0} occurred. Arguments:\n{1!r}"
                message = template.format(type(ex).__name__, ex.args)
                print(message)

        classify_result = tf_classify(tmp_f, int(request.POST.get('k', MAX_K)))
        tmp_f.close()
        if checkBlueColor(im) == 'Yes':
            data["blue_color"] = "yes"
        else:
            data["blue_color"] = "No"

        if classify_result:
            data["success"] = True

            data["confidence"] = {}
            for res in classify_result:
                data["confidence"][res[0]] = float(res[1])

    return JsonResponse(data)
コード例 #36
0
ファイル: tests.py プロジェクト: thibaudcolas/django
class SettingsCustomLoggingTest(AdminScriptTestCase):
    """
    Using a logging defaults are still applied when using a custom
    callable in LOGGING_CONFIG (i.e., logging.config.fileConfig).
    """
    def setUp(self):
        super().setUp()
        logging_conf = """
[loggers]
keys=root
[handlers]
keys=stream
[formatters]
keys=simple
[logger_root]
handlers=stream
[handler_stream]
class=StreamHandler
formatter=simple
args=(sys.stdout,)
[formatter_simple]
format=%(message)s
"""
        self.temp_file = NamedTemporaryFile()
        self.temp_file.write(logging_conf.encode())
        self.temp_file.flush()
        self.write_settings(
            "settings.py",
            sdict={
                "LOGGING_CONFIG": '"logging.config.fileConfig"',
                "LOGGING": 'r"%s"' % self.temp_file.name,
            },
        )

    def tearDown(self):
        self.temp_file.close()

    def test_custom_logging(self):
        out, err = self.run_manage(["check"])
        self.assertNoOutput(err)
        self.assertOutput(out,
                          "System check identified no issues (0 silenced).")
コード例 #37
0
 def test_to_xls_export_generates_valid_sheet_names(self):
     survey = create_survey_from_xls(
         _logger_fixture_path('childrens_survey_with_a_very_long_name.xls'))
     export_builder = ExportBuilder()
     export_builder.set_survey(survey)
     xls_file = NamedTemporaryFile(suffix='.xls')
     filename = xls_file.name
     export_builder.to_xls_export(filename, self.data)
     xls_file.seek(0)
     wb = load_workbook(filename)
     # check that we have childrens_survey, children, children_cartoons
     # and children_cartoons_characters sheets
     expected_sheet_names = [
         'childrens_survey_with_a_very_lo',
         'childrens_survey_with_a_very_l1',
         'childrens_survey_with_a_very_l2',
         'childrens_survey_with_a_very_l3'
     ]
     self.assertEqual(wb.get_sheet_names(), expected_sheet_names)
     xls_file.close()
コード例 #38
0
def render_to_latex(template, context, context_instance=None):
    import os, codecs
    body = render_to_string(template, context, context_instance)
    #TODO: there is still a lot of HTML codes to replace here
    body = body.replace('&quote;', '"')
    body = body.replace('&quot;', '"')
    body = body.replace('&apos;', '\'')
    body = body.replace('&amp;', '\&')
    body = body.replace('&#39;', '\'')
    body = body.replace('<br>', '\\')
    body = body.replace('#', '\\#')
    tempf = NamedTemporaryFile()
    tempf.close()
    tempf = codecs.open(tempf.name, 'w', 'utf-8')
    tempf.write(body)
    tempf.close()
    for i in range(3):
        os.system('pdflatex -interaction nonstopmode -output-directory %s %s' %
                  (os.path.split(tempf.name)[0], tempf.name))
    return open(tempf.name + '.pdf', 'rb').read()
コード例 #39
0
def export_submissionset_csv(submissionsets, outfilename=None):
    """
        Returns a NamedTemporaryFile with data from each submisisonset
        in submissionsets.
    """
    if not outfilename:
        outfile = NamedTemporaryFile(suffix='.csv', delete=False)
    else:
        path = os.path.dirname(outfilename)
        if not os.path.exists(path):
            os.makedirs(path)
        outfile = open(outfilename, 'wb')

    csvWriter = UnicodeWriter(outfile)

    columns = ["Institution",
               "Type",
               "Date Submitted",
               "Version",
               "Rating",
               "Link"]

    csvWriter.writerow(columns)

    for submissionset in submissionsets:

        scorecard_url = ("https://" +
                         REPORTS_HOST +
                         submissionset.get_scorecard_url())

        csvWriter.writerow([submissionset.institution.name,
                            (submissionset.institution.institution_type or
                             "Unknown"),
                            unicode(submissionset.date_submitted),
                            submissionset.creditset.version,
                            submissionset.rating.name,
                            scorecard_url])

    outfile.close()
    print "Closing file: %s" % outfile.name
    return outfile.name
コード例 #40
0
def set_website_manifest_json(website):
    """
    Setting website sdk #Todo need to look into it later and remove unneccasery data.
    :param website:
    :return:
    """
    context = {
        'short_name': website.title.split(' ')[0],
        'name': website.title,
        'description': website.title,
        'icon_192_url': website.logo128.url,
        'icon_512_url': website.logo128_2x.url
    }
    data = render_template('manifest.json', context=context)
    tempfile = NamedTemporaryFile(delete=True)
    tempfile.write(str.encode(data))
    website.manifest_json.save(
        'websites/' + to_base64(str(website.id)) + '/manifest.json',
        tempfile
    )
    tempfile.close()
コード例 #41
0
ファイル: export_tools.py プロジェクト: smn/onadata
def generate_osm_export(export_type,
                        extension,
                        username,
                        id_string,
                        export_id=None,
                        filter_query=None):
    # TODO resolve circular import
    from onadata.apps.viewer.models.export import Export
    xform = XForm.objects.get(user__username=username, id_string=id_string)
    attachments = Attachment.objects.filter(extension=Attachment.OSM,
                                            instance__xform=xform)
    content = get_combined_osm([a.media_file for a in attachments])

    basename = "%s_%s" % (id_string,
                          datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
    filename = basename + "." + extension
    file_path = os.path.join(username, 'exports', id_string, export_type,
                             filename)

    storage = get_storage_class()()
    temp_file = NamedTemporaryFile(suffix=extension)
    temp_file.write(content)
    temp_file.seek(0)
    export_filename = storage.save(file_path, File(temp_file, file_path))
    temp_file.close()

    dir_name, basename = os.path.split(export_filename)

    # get or create export object
    if (export_id):
        export = Export.objects.get(id=export_id)
    else:
        export = Export.objects.create(xform=xform, export_type=export_type)

    export.filedir = dir_name
    export.filename = basename
    export.internal_status = Export.SUCCESSFUL
    export.save()

    return export
コード例 #42
0
    def from_url(creator, url):
        try:
            return Cover.objects.get(original_url=url)
        except Cover.DoesNotExist:
            pass

        img_temp = None

        try:
            cover = Cover(creator=creator,
                          create_date=datetime.now(),
                          original_url=url)

            response = urllib.request.urlopen(url)

            if 'content-length' not in response.headers or int(
                    response.headers['content-length']) > 1000000:
                return None

            data = response.read()

            Image.open(BytesIO(data)).verify()

            img = Image.open(BytesIO(data))
            img = img.resize((150, 150), Image.ANTIALIAS)

            img_temp = NamedTemporaryFile(delete=True)
            ext = url.split('.')[-1].upper()
            if ext == 'JPG':
                ext = 'JPEG'
            img.save(img_temp, format=ext)

            cover.file.save(f(None, url), File(img_temp), save=True)

            return cover
        except:
            return None
        finally:
            if img_temp:
                img_temp.close()
コード例 #43
0
def classify_api(request):
    data = {"success": False}
    message = None
    explanation = None
    status_code = 500
    # Get an instance of a logger
    logger = logging.getLogger(__name__)

    if request.method == "POST":  # and request.is_ajax()
        tmp_f = NamedTemporaryFile()

        if request.FILES.get("image", None) is not None:
            image_request = request.FILES["image"]
            image_bytes = image_request.read()
            image = Image.open(io.BytesIO(image_bytes))
            image.save(tmp_f, image.format)
        elif request.POST.get("image64", None) is not None:
            base64_data = request.POST.get("image64", None).split(',', 1)[1]
            plain_data = b64decode(base64_data)
            tmp_f.write(plain_data)

        classify_result = tf_classify(tmp_f, int(request.POST.get('k', MAX_K)))
        tmp_f.close()

        if classify_result:
            print("SUCCESSS")
            response = HttpResponse("fdfd")
            print(response)
            data["success"] = True
            data["confidence"] = {}
            data['message'] = message
            data['status'] = status_code
            for res in classify_result:
                data["confidence"][res[0]] = float(res[1])
        if not classify_result:
            logger.error('Something went wrong!')
            print("failure")
            response = HttpResponse("fdfd")
            print(response)
    return JsonResponse(data)
コード例 #44
0
def build_report(rows):
    """
        A dummy report task. DownloadExportView expects a path to a file, so
        one needs to be created.

        I'm using a NamedTemporaryFile here that doesn't automatically delete
        you may need a cleanup method if you do it this way.
    """

    outfile = NamedTemporaryFile(suffix='.csv', delete=False)

    with open(outfile.name, 'wb') as csvfile:
        writer = csv.writer(csvfile,
                            delimiter='\t',
                            quotechar='|',
                            quoting=csv.QUOTE_MINIMAL)
        writer.writerow(['Column #1', 'Column #2', 'Column #3'])
        for i in range(int(rows)):
            writer.writerow(['Row #%d' % i, 'from task', 'build_report'])

    outfile.close()
    return outfile.name
コード例 #45
0
def classify_api(request):
    data = {"success": False}
    print("my_classify api called")
    if request.method == "POST":
        tmp_f = NamedTemporaryFile()

        if request.FILES.get("image", None) is not None:
            image_request = request.FILES["image"]
            image_bytes = image_request.read()
            image = Image.open(io.BytesIO(image_bytes))
            image.save(tmp_f, image.format)
        elif request.POST.get("image64", None) is not None:
            base64_data = request.POST.get("image64", None).split(',', 1)[1]
            plain_data = b64decode(base64_data)
            tmp_f.write(plain_data)

        print("calling mytf_classify")
        # classify_result = mytf_classify(tmp_f, int(request.POST.get('k', MAX_K)))

        test_batch = get_image_batch(tmp_f)
        print(len(test_batch))
        classify_result = [
            recognize_V3(SESS3, RESULT_V3, INPUT_V3, test_batch)
        ]

        tmp_f.close()
        print(classify_result)
        if classify_result:
            data["success"] = True
            data["confidence"] = {}

            #data["confidence"][res[0]] = float(res[1])
            data["confidence"][classify_result[0]] = float(1)

            return_text = classify_result[0]
    #return JsonResponse(data)
    return HttpResponse(return_text)
コード例 #46
0
    def test_to_xls_export_respects_custom_field_delimiter(self):
        survey = self._create_childrens_survey()
        export_builder = ExportBuilder()
        export_builder.GROUP_DELIMITER = ExportBuilder.GROUP_DELIMITER_DOT
        export_builder.set_survey(survey)
        xls_file = NamedTemporaryFile(suffix='.xls')
        filename = xls_file.name
        export_builder.to_xls_export(filename, self.data)
        xls_file.seek(0)
        wb = load_workbook(filename)

        # check header columns
        main_sheet = wb.get_sheet_by_name('childrens_survey')
        expected_column_headers = [
            'name', 'age', 'geo.geolocation', 'geo._geolocation_latitude',
            'geo._geolocation_longitude', 'geo._geolocation_altitude',
            'geo._geolocation_precision', 'tel.tel.office', 'tel.tel.mobile',
            '_id', 'meta.instanceID', '_uuid', '_submission_time', '_index',
            '_parent_index', '_parent_table_name', '_tags', '_notes'
        ]
        column_headers = [c[0].value for c in main_sheet.columns]
        self.assertEqual(sorted(column_headers),
                         sorted(expected_column_headers))
        xls_file.close()
コード例 #47
0
ファイル: urlutil.py プロジェクト: artscoop/scoop
def download_url_resource(path, output=None):
    """
    Télécharger un fichier à une URL et renvoyer le chemin du fichier local téléchargé

    :param output: Chemin de sortie, fichier dans le répertoire temporaire si None
    :type output: str | None
    """
    if output and os.path.exists(output):
        logging.warning(
            _("The download destination file at {path} already exists. Skipped."
              ).format(path=output))
        return output
    try:
        resource = get_url_resource(path, False, stream=True)
    except (IOError, OSError):
        raise URLError(
            "The resource at {path} cannot be downloaded.".format(path=path),
            path)
    resource_file = NamedTemporaryFile(
        delete=False) if output is None else open(output, 'wb')
    for chunk in resource.iter_content(16384):
        resource_file.write(chunk)
    resource_file.close()
    return resource_file.name
コード例 #48
0
ファイル: export_tools.py プロジェクト: asangansi/onadata
def generate_export(export_type,
                    xform,
                    export_id=None,
                    options=None,
                    retries=0):
    """
    Create appropriate export object given the export type.

    param: export_type
    param: xform
    params: export_id: ID of export object associated with the request
    param: options: additional parameters required for the lookup.
        binary_select_multiples: boolean flag
        end: end offset
        ext: export extension type
        dataview_pk: dataview pk
        group_delimiter: "/" or "."
        query: filter_query for custom queries
        remove_group_name: boolean flag
        split_select_multiples: boolean flag
        index_tag: ('[', ']') or ('_', '_')
    """
    username = xform.user.username
    id_string = xform.id_string
    end = options.get("end")
    extension = options.get("extension", export_type)
    filter_query = options.get("query")
    remove_group_name = options.get("remove_group_name", False)
    start = options.get("start")

    export_type_func_map = {
        Export.XLS_EXPORT: 'to_xls_export',
        Export.CSV_EXPORT: 'to_flat_csv_export',
        Export.CSV_ZIP_EXPORT: 'to_zipped_csv',
        Export.SAV_ZIP_EXPORT: 'to_zipped_sav',
        Export.GOOGLE_SHEETS_EXPORT: 'to_google_sheets',
    }

    if xform is None:
        xform = XForm.objects.get(user__username__iexact=username,
                                  id_string__iexact=id_string)

    dataview = None
    if options.get("dataview_pk"):
        dataview = DataView.objects.get(pk=options.get("dataview_pk"))
        records = dataview.query_data(dataview,
                                      all_data=True,
                                      filter_query=filter_query)
        total_records = dataview.query_data(dataview,
                                            count=True)[0].get('count')
    else:
        records = query_data(xform, query=filter_query, start=start, end=end)

        if filter_query:
            total_records = query_data(xform,
                                       query=filter_query,
                                       start=start,
                                       end=end,
                                       count=True)[0].get('count')
        else:
            total_records = xform.num_of_submissions

    if isinstance(records, QuerySet):
        records = records.iterator()

    export_builder = ExportBuilder()

    export_builder.TRUNCATE_GROUP_TITLE = True \
        if export_type == Export.SAV_ZIP_EXPORT else remove_group_name
    export_builder.GROUP_DELIMITER = options.get("group_delimiter",
                                                 DEFAULT_GROUP_DELIMITER)
    export_builder.SPLIT_SELECT_MULTIPLES = options.get(
        "split_select_multiples", True)
    export_builder.BINARY_SELECT_MULTIPLES = options.get(
        "binary_select_multiples", False)
    export_builder.INCLUDE_LABELS = options.get('include_labels', False)
    export_builder.INCLUDE_LABELS_ONLY = options.get('include_labels_only',
                                                     False)
    export_builder.INCLUDE_HXL = options.get('include_hxl', False)

    export_builder.INCLUDE_IMAGES \
        = options.get("include_images", settings.EXPORT_WITH_IMAGE_DEFAULT)

    export_builder.VALUE_SELECT_MULTIPLES = options.get(
        'value_select_multiples', False)

    export_builder.REPEAT_INDEX_TAGS = options.get("repeat_index_tags",
                                                   DEFAULT_INDEX_TAGS)

    # 'win_excel_utf8' is only relevant for CSV exports
    if 'win_excel_utf8' in options and export_type != Export.CSV_EXPORT:
        del options['win_excel_utf8']

    export_builder.set_survey(xform.survey, xform)

    temp_file = NamedTemporaryFile(suffix=("." + extension))

    columns_with_hxl = export_builder.INCLUDE_HXL and get_columns_with_hxl(
        xform.survey_elements)

    # get the export function by export type
    func = getattr(export_builder, export_type_func_map[export_type])
    try:
        func.__call__(temp_file.name,
                      records,
                      username,
                      id_string,
                      filter_query,
                      start=start,
                      end=end,
                      dataview=dataview,
                      xform=xform,
                      options=options,
                      columns_with_hxl=columns_with_hxl,
                      total_records=total_records)
    except NoRecordsFoundError:
        pass
    except SPSSIOError as e:
        export = get_or_create_export(export_id, xform, export_type, options)
        export.error_message = str(e)
        export.internal_status = Export.FAILED
        export.save()
        report_exception("SAV Export Failure", e, sys.exc_info())
        return export

    # generate filename
    basename = "%s_%s" % (id_string,
                          datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f"))

    if remove_group_name:
        # add 'remove group name' flag to filename
        basename = "{}-{}".format(basename, GROUPNAME_REMOVED_FLAG)
    if dataview:
        basename = "{}-{}".format(basename, DATAVIEW_EXPORT)

    filename = basename + "." + extension

    # check filename is unique
    while not Export.is_filename_unique(xform, filename):
        filename = increment_index_in_filename(filename)

    file_path = os.path.join(username, 'exports', id_string, export_type,
                             filename)

    # TODO: if s3 storage, make private - how will we protect local storage??
    # seek to the beginning as required by storage classes
    temp_file.seek(0)
    export_filename = default_storage.save(file_path,
                                           File(temp_file, file_path))
    temp_file.close()

    dir_name, basename = os.path.split(export_filename)

    # get or create export object
    export = get_or_create_export(export_id, xform, export_type, options)

    export.filedir = dir_name
    export.filename = basename
    export.internal_status = Export.SUCCESSFUL
    # do not persist exports that have a filter
    # Get URL of the exported sheet.
    if export_type == Export.GOOGLE_SHEETS_EXPORT:
        export.export_url = export_builder.url

    # if we should create a new export is true, we should not save it
    if start is None and end is None:
        export.save()
    return export
コード例 #49
0
class CompilerFilter(FilterBase):
    """
    A filter subclass that is able to filter content via
    external commands.
    """
    command = None
    options = ()
    default_encoding = settings.FILE_CHARSET

    def __init__(self, content, command=None, *args, **kwargs):
        super(CompilerFilter, self).__init__(content, *args, **kwargs)
        self.cwd = None

        if command:
            self.command = command
        if self.command is None:
            raise FilterError("Required attribute 'command' not given")

        if isinstance(self.options, dict):
            # turn dict into a tuple
            new_options = ()
            for item in kwargs.items():
                new_options += (item, )
            self.options = new_options

        # append kwargs to self.options
        for item in kwargs.items():
            self.options += (item, )

        self.stdout = self.stdin = self.stderr = subprocess.PIPE
        self.infile = self.outfile = None

    def input(self, **kwargs):
        encoding = self.default_encoding
        options = dict(self.options)

        if self.infile is None and "{infile}" in self.command:
            # create temporary input file if needed
            if self.filename is None:
                self.infile = NamedTemporaryFile(mode='wb')
                self.infile.write(self.content.encode(encoding))
                self.infile.flush()
                options["infile"] = self.infile.name
            else:
                # we use source file directly, which may be encoded using
                # something different than utf8. If that's the case file will
                # be included with charset="something" html attribute and
                # charset will be available as filter's charset attribute
                encoding = self.charset  # or self.default_encoding
                self.infile = open(self.filename)
                options["infile"] = self.filename

        if "{outfile}" in self.command and "outfile" not in options:
            # create temporary output file if needed
            ext = self.type and ".%s" % self.type or ""
            self.outfile = NamedTemporaryFile(mode='r+', suffix=ext)
            options["outfile"] = self.outfile.name

        # Quote infile and outfile for spaces etc.
        if "infile" in options:
            options["infile"] = shell_quote(options["infile"])
        if "outfile" in options:
            options["outfile"] = shell_quote(options["outfile"])

        try:
            command = self.command.format(**options)
            proc = subprocess.Popen(command,
                                    shell=True,
                                    cwd=self.cwd,
                                    stdout=self.stdout,
                                    stdin=self.stdin,
                                    stderr=self.stderr)
            if self.infile is None:
                # if infile is None then send content to process' stdin
                filtered, err = proc.communicate(self.content.encode(encoding))
            else:
                filtered, err = proc.communicate()
            filtered, err = filtered.decode(encoding), err.decode(encoding)
        except (IOError, OSError) as e:
            raise FilterError('Unable to apply %s (%r): %s' %
                              (self.__class__.__name__, self.command, e))
        else:
            if proc.wait() != 0:
                # command failed, raise FilterError exception
                if not err:
                    err = ('Unable to apply %s (%s)' %
                           (self.__class__.__name__, self.command))
                    if filtered:
                        err += '\n%s' % filtered
                raise FilterError(err)

            if self.verbose:
                self.logger.debug(err)

            outfile_path = options.get('outfile')
            if outfile_path:
                with io.open(outfile_path, 'r', encoding=encoding) as file:
                    filtered = file.read()
        finally:
            if self.infile is not None:
                self.infile.close()
            if self.outfile is not None:
                self.outfile.close()

        return smart_text(filtered)
コード例 #50
0
    def test_to_xls_export_works(self):
        survey = self._create_childrens_survey()
        export_builder = ExportBuilder()
        export_builder.set_survey(survey)
        xls_file = NamedTemporaryFile(suffix='.xls')
        filename = xls_file.name
        export_builder.to_xls_export(filename, self.data)
        xls_file.seek(0)
        wb = load_workbook(filename)
        # check that we have childrens_survey, children, children_cartoons
        # and children_cartoons_characters sheets
        expected_sheet_names = [
            'childrens_survey', 'children', 'children_cartoons',
            'children_cartoons_characters'
        ]
        self.assertEqual(wb.get_sheet_names(), expected_sheet_names)

        # check header columns
        main_sheet = wb.get_sheet_by_name('childrens_survey')
        expected_column_headers = [
            'name', 'age', 'geo/geolocation', 'geo/_geolocation_latitude',
            'geo/_geolocation_longitude', 'geo/_geolocation_altitude',
            'geo/_geolocation_precision', 'tel/tel.office', 'tel/tel.mobile',
            '_id', 'meta/instanceID', '_uuid', '_submission_time', '_index',
            '_parent_index', '_parent_table_name', '_tags', '_notes'
        ]
        column_headers = [c[0].value for c in main_sheet.columns]
        self.assertEqual(sorted(column_headers),
                         sorted(expected_column_headers))

        childrens_sheet = wb.get_sheet_by_name('children')
        expected_column_headers = [
            'children/name', 'children/age', 'children/fav_colors',
            'children/fav_colors/red', 'children/fav_colors/blue',
            'children/fav_colors/pink', 'children/ice.creams',
            'children/ice.creams/vanilla', 'children/ice.creams/strawberry',
            'children/ice.creams/chocolate', '_id', '_uuid',
            '_submission_time', '_index', '_parent_index',
            '_parent_table_name', '_tags', '_notes'
        ]
        column_headers = [c[0].value for c in childrens_sheet.columns]
        self.assertEqual(sorted(column_headers),
                         sorted(expected_column_headers))

        cartoons_sheet = wb.get_sheet_by_name('children_cartoons')
        expected_column_headers = [
            'children/cartoons/name', 'children/cartoons/why', '_id', '_uuid',
            '_submission_time', '_index', '_parent_index',
            '_parent_table_name', '_tags', '_notes'
        ]
        column_headers = [c[0].value for c in cartoons_sheet.columns]
        self.assertEqual(sorted(column_headers),
                         sorted(expected_column_headers))

        characters_sheet = wb.get_sheet_by_name('children_cartoons_characters')
        expected_column_headers = [
            'children/cartoons/characters/name',
            'children/cartoons/characters/good_or_evil', '_id', '_uuid',
            '_submission_time', '_index', '_parent_index',
            '_parent_table_name', '_tags', '_notes'
        ]
        column_headers = [c[0].value for c in characters_sheet.columns]
        self.assertEqual(sorted(column_headers),
                         sorted(expected_column_headers))

        xls_file.close()
コード例 #51
0
    def test_zipped_csv_export_works(self):
        survey = self._create_childrens_survey()
        export_builder = ExportBuilder()
        export_builder.set_survey(survey)
        temp_zip_file = NamedTemporaryFile(suffix='.zip')
        export_builder.to_zipped_csv(temp_zip_file.name, self.data)
        temp_zip_file.seek(0)
        temp_dir = tempfile.mkdtemp()
        zip_file = zipfile.ZipFile(temp_zip_file.name, "r")
        zip_file.extractall(temp_dir)
        zip_file.close()
        temp_zip_file.close()

        # generate data to compare with
        index = 1
        indices = {}
        survey_name = survey.name
        outputs = []
        for d in self.data:
            outputs.append(
                dict_to_joined_export(d, index, indices, survey_name))
            index += 1

        # check that each file exists
        self.assertTrue(
            os.path.exists(
                os.path.join(temp_dir, "{0}.csv".format(survey.name))))
        with open(os.path.join(temp_dir,
                               "{0}.csv".format(survey.name))) as csv_file:
            reader = csv.reader(csv_file)
            rows = [r for r in reader]

            # open comparison file
            with open(_logger_fixture_path(
                    'csvs', 'childrens_survey.csv')) as fixture_csv:
                fixture_reader = csv.reader(fixture_csv)
                expected_rows = [r for r in fixture_reader]
                self.assertEqual(rows, expected_rows)

        self.assertTrue(os.path.exists(os.path.join(temp_dir, "children.csv")))
        with open(os.path.join(temp_dir, "children.csv")) as csv_file:
            reader = csv.reader(csv_file)
            rows = [r for r in reader]

            # open comparison file
            with open(_logger_fixture_path('csvs',
                                           'children.csv')) as fixture_csv:
                fixture_reader = csv.reader(fixture_csv)
                expected_rows = [r for r in fixture_reader]
                self.assertEqual(rows, expected_rows)

        self.assertTrue(
            os.path.exists(os.path.join(temp_dir, "children_cartoons.csv")))
        with open(os.path.join(temp_dir, "children_cartoons.csv")) as csv_file:
            reader = csv.reader(csv_file)
            rows = [r for r in reader]

            # open comparison file
            with open(_logger_fixture_path(
                    'csvs', 'children_cartoons.csv')) as fixture_csv:
                fixture_reader = csv.reader(fixture_csv)
                expected_rows = [r for r in fixture_reader]
                self.assertEqual(rows, expected_rows)

        self.assertTrue(
            os.path.exists(
                os.path.join(temp_dir, "children_cartoons_characters.csv")))
        with open(os.path.join(
                temp_dir, "children_cartoons_characters.csv")) as csv_file:
            reader = csv.reader(csv_file)
            rows = [r for r in reader]

            # open comparison file
            with open(
                    _logger_fixture_path(
                        'csvs',
                        'children_cartoons_characters.csv')) as fixture_csv:
                fixture_reader = csv.reader(fixture_csv)
                expected_rows = [r for r in fixture_reader]
                self.assertEqual(rows, expected_rows)

        shutil.rmtree(temp_dir)
コード例 #52
0
    def google_products(self, request):
        def prettify_xml(elem):
            """
            Return a pretty-printed XML string for the Element.
            """
            rough_string = tostring(elem)
            reparsed = minidom.parseString(rough_string)
            return reparsed.toprettyxml(indent='\t').encode('utf-8', 'replace')

        products = get_product_model().objects.filter(feed_google=True)
        root = Element('rss')
        root.attrib['xmlns:g'] = 'http://base.google.com/ns/1.0'
        root.attrib['version'] = '2.0'
        channel = SubElement(root, 'channel')
        title = SubElement(channel, 'title')
        title.text = request.settings.name
        link = SubElement(channel, 'link')
        link.text = settings.DOMAIN_NAME
        description = SubElement(channel, 'description')

        for p in products:
            # availability
            if p.is_available and not p.pre_order:
                txt_availability = 'in stock'
            elif p.pre_order:
                txt_availability = 'preorder'
            else:
                txt_availability = 'out of stock'

            # determine delivery charge by placing the product onto the basket
            basket = Basket()
            basket.add_item(p, None, 1)
            delivery_charge = basket.delivery

            # determine feed item attributes
            txt_id = unicode(p.id)
            txt_title = clean_unicode(p.title).strip()
            txt_link = p.get_absolute_url()
            txt_description = text_from_html(p.description, 5000)
            txt_condition = 'new'
            txt_price = '%.2f GBP' % p.price
            txt_google_category = p.category.google_product_category if p.category and p.category.google_product_category else None
            txt_category = p.category.get_taxonomy_path(
            ) if p.category else None
            txt_country = 'GB'
            txt_delivery_price = '%s %s' % (delivery_charge, 'GBP')
            txt_barcode = p.barcode.strip() if p.barcode else None
            txt_part_number = p.part_number.strip() if p.part_number else None
            txt_brand = p.get_brand_title()

            # create item
            item = SubElement(channel, 'item')

            # id
            _id = SubElement(item, 'g:id')
            _id.text = txt_id

            # title
            title = SubElement(item, 'title')
            title.text = txt_title

            # link/url
            link = SubElement(item, 'link')
            link.text = txt_link

            # main text
            description = SubElement(item, 'description')
            description.text = txt_description

            # condition
            condition = SubElement(item, 'g:condition')
            condition.text = txt_condition

            # price
            price = SubElement(item, 'g:price')
            price.text = txt_price

            # availability
            availability = SubElement(item, 'g:availability')
            availability.text = txt_availability

            # google shopping category
            if txt_google_category:
                gcategory = SubElement(item, 'g:google_product_category')
                gcategory.text = txt_google_category

            # product type
            if txt_category:
                category = SubElement(item, 'g:product_type')
                category.text = txt_category

            # shipping
            shipping = SubElement(item, 'g:shipping')

            # country
            country = SubElement(shipping, 'g:country')
            country.text = txt_country

            # delivery price
            delivery_price = SubElement(shipping, 'g:price')
            delivery_price.text = txt_delivery_price

            # barcode, must be a valid UPC-A (GTIN-12), EAN/JAN (GTIN-13)
            # or GTIN-14, so we need to have at least 12 characters.
            if txt_barcode:
                gtin = SubElement(item, 'g:gtin')
                gtin.text = txt_barcode

            # part number
            if txt_part_number:
                _mpn = SubElement(item, 'g:mpn')
                _mpn.text = txt_part_number

            # brand
            if txt_brand:
                brand = SubElement(item, 'g:brand')
                brand.text = txt_brand

            # image
            if p.image:
                image = SubElement(item, 'g:image_link')
                image.text = p.image.large_url

            # additional images
            if len(p.gallery) > 0:
                for m in p.gallery[:10]:
                    additional_image_link = SubElement(
                        item, 'g:additional_image_link')
                    additional_image_link.text = m.large_url

        # get temp. filename
        f = NamedTemporaryFile(delete=False)
        tmp_filename = f.name
        f.close()

        # create tmp file (utf-8)
        f = open(tmp_filename, 'w+b')
        f.write(prettify_xml(root))
        f.seek(0)

        # send response
        filename = 'google_products_%s.xml' % datetime.date.today().strftime(
            '%d_%m_%Y')
        response = HttpResponse(FileWrapper(f), content_type='text/plain')
        response['Content-Disposition'] = 'attachment; filename=%s' % filename
        return response
コード例 #53
0
    def perform_create(self, serializer):

        placeid = self.request.data.get('placeid', None)
        #API_KEY=''
        API_KEY = ''  #Api key google maps
        if placeid is not None:
            try:

                try:
                    alreadyexisting = Location.objects.get(
                        origin=Location.GOOGLEPLACES, originid=placeid)
                    queryset = alreadyexisting
                    self.finalserializer = LocationAddGoogleSerializer(
                        alreadyexisting)
                    self.conflict = True
                    return
                except:
                    pass

                self.conflict = False
                self.finalserializer = serializer
                r = requests.get(
                    "https://maps.googleapis.com/maps/api/place/details/json?placeid=%s&key=%s"
                    % (placeid, API_KEY))
                data = json.loads(r.text)
                name = data['result']['name']
                lat = data['result']['geometry']['location']['lat']
                lng = data['result']['geometry']['location']['lng']
                address = data['result'].get('formatted_address', '')
                country = FindAddressComponent(
                    data['result']['address_components'], 'country')
                postcode = FindAddressComponent(
                    data['result']['address_components'], 'postal_code')
                phonenumber = data['result'].get('international_phone_number',
                                                 '')
                if not phonenumber:
                    phonenumber = data['result'].get('formatted_phone_number',
                                                     '')
                website = data['result'].get('website', '')

                #To Do
                #Country and Post Code
                #Place id in new field in databasa

                try:
                    r = requests.get(
                        "http://api.geonames.org/timezoneJSON?lat=%g&lng=%g&username=gohappyhour"
                        % (lat, lng))
                    geodata = json.loads(r.text)
                    timezoneid = geodata['timezoneId']
                except:
                    raise ServiceUnavailable()
                    return

            except:
                raise PlaceUnavailable()
                return

            try:
                location = serializer.save(owner=self.request.user,
                                           name=name,
                                           latitude=lat,
                                           longitude=lng,
                                           address=address,
                                           phonenumber=phonenumber,
                                           postcode=postcode,
                                           country=country,
                                           timezoneid=timezoneid,
                                           origin=Location.GOOGLEPLACES,
                                           originid=placeid,
                                           website=website)
                #We try to store the picture
                try:
                    #Decide on a photo
                    maxSizePhoto = 0
                    maxSizeInd = -1
                    for i in range(len(data['result']['photos'])):
                        sizeAux = int(
                            data['result']['photos'][i]['height']) * int(
                                data['result']['photos'][i]['width'])
                        if sizeAux > maxSizePhoto:
                            maxSizePhoto = sizeAux
                            maxSizeInd = i

                    if (maxSizeInd >= 0):
                        photoreference = data['result']['photos'][maxSizeInd][
                            'photo_reference']
                        locationpicture = LocationPicture(
                            owner=self.request.user,
                            location=location,
                            origin=LocationPicture.GOOGLEPLACES,
                            originid=photoreference)
                        locationpicture.save()

                        r = requests.get(
                            "https://maps.googleapis.com/maps/api/place/photo?maxwidth=1000&photoreference=%s&key=%s"
                            % (photoreference, API_KEY))
                        imageurl = r.url
                        filename = str(location.id)
                        extension = os.path.splitext(imageurl)[1]
                        if extension == "":
                            extension = '.jpg'

                        temp = NamedTemporaryFile(suffix=extension)

                        tempfilename = temp.name
                        temp.write(r.content)

                        im = Image.open(tempfilename)
                        temp.close()
                        im.thumbnail(
                            iu.ObtainSizeLocationPicture(
                                im.size[0], im.size[1]))
                        im.save(tempfilename)

                        reopen = open(tempfilename, 'rb')
                        django_picture = ImageFile(reopen)
                        locationpicture.picture.save(filename + extension,
                                                     django_picture,
                                                     save=True)
                        reopen.close()

                        im = Image.open(tempfilename)
                        temp.close()
                        im.thumbnail(
                            iu.ObtainSizeLocationThumbnail(
                                im.size[0], im.size[1]))
                        im.save(tempfilename)

                        reopen = open(tempfilename, 'rb')
                        django_picture = ImageFile(reopen)
                        locationpicture.thumbnail.save(filename + extension,
                                                       django_picture,
                                                       save=True)
                        reopen.close()

                        os.remove(tempfilename)

                except:
                    pass
            except:
                raise LocationAlreadyExists()
                return

        else:
            raise PlaceUnavailable()
            return
コード例 #54
0
class PugCompilerFilter(CompilerFilter):
    binary = './node_modules/pug-cli/index.js'
    args = '-c -D'
    command = "{binary} {infile} {args} -o {outfile}"

    options = (
        ("binary", binary),
        ("args", args),
    )

    def input(self, **kwargs):

        encoding = self.default_encoding
        options = dict(self.options)

        relative_path = self.filename.split('static/templates/')[1][:-4]

        if self.infile is None and "{infile}" in self.command:
            # we use source file directly, which may be encoded using
            # something different than utf8. If that's the case file will
            # be included with charset="something" html attribute and
            # charset will be available as filter's charset attribute
            encoding = self.charset  # or self.default_encoding
            self.infile = open(self.filename)
            options["infile"] = self.filename

        basename = os.path.basename(self.filename)[:-3]

        if "{outfile}" in self.command and "outfile" not in options:
            # create temporary output file if needed
            ext = self.type and ".%s" % self.type or ""
            self.outfile = NamedTemporaryFile(mode='r+', suffix=ext)
            options["outfile"] = os.path.dirname(self.outfile.name)

        # Quote infile and outfile for spaces etc.
        if "infile" in options:
            options["infile"] = shell_quote(options["infile"])
        if "outfile" in options:
            options["outfile"] = shell_quote(options["outfile"])

        try:
            command = self.command.format(**options)
            proc = subprocess.Popen(command,
                                    shell=True,
                                    cwd=self.cwd,
                                    stdout=self.stdout,
                                    stdin=self.stdin,
                                    stderr=self.stderr)
            if self.infile is None:
                # if infile is None then send content to process' stdin
                filtered, err = proc.communicate(self.content.encode(encoding))
            else:
                filtered, err = proc.communicate()
            filtered, err = filtered.decode(encoding), err.decode(encoding)
        except (IOError, OSError) as e:
            raise FilterError('Unable to apply %s (%r): %s' %
                              (self.__class__.__name__, self.command, e))
        else:
            if proc.wait() != 0:
                # command failed, raise FilterError exception
                if not err:
                    err = ('Unable to apply %s (%s)' %
                           (self.__class__.__name__, self.command))
                    if filtered:
                        err += '\n%s' % filtered
                raise FilterError(err)

            if self.verbose:
                self.logger.debug(err)

            outfile_path = '{}/{}js'.format(options.get('outfile'), basename)
            if outfile_path:
                with io.open(outfile_path, 'r', encoding=encoding) as file:
                    filtered = file.read()
            filtered = '{}window.templates["{}"] = {};'.format(
                'window.templates = window.templates || {};',
                relative_path,
                filtered,
            )
        finally:
            if self.infile is not None:
                self.infile.close()
            if self.outfile is not None:
                self.outfile.close()
        return smart_text(filtered)
コード例 #55
0
def plot_boxes(img, boxes, class_names, plot_labels, color=None):

    # Define a tensor used to set the colors of the bounding boxes
    colors = torch.FloatTensor([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0],
                                [1, 1, 0], [1, 0, 0]])

    # Define a function to set the colors of the bounding boxes
    def get_color(c, x, max_val):
        ratio = float(x) / max_val * 5
        i = int(np.floor(ratio))
        j = int(np.ceil(ratio))

        ratio = ratio - i
        r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]

        return int(r * 255)

    # Get the width and height of the image
    width = img.shape[1]
    height = img.shape[0]

    # Create a figure and plot the image
    fig, a = plt.subplots(1, 1)
    a.imshow(img)

    # Plot the bounding boxes and corresponding labels on top of the image
    for i in range(len(boxes)):

        # Get the ith bounding box
        box = boxes[i]

        # Get the (x,y) pixel coordinates of the lower-left and lower-right corners
        # of the bounding box relative to the size of the image.
        x1 = int(np.around((box[0] - box[2] / 2.0) * width))
        y1 = int(np.around((box[1] - box[3] / 2.0) * height))
        x2 = int(np.around((box[0] + box[2] / 2.0) * width))
        y2 = int(np.around((box[1] + box[3] / 2.0) * height))

        # Set the default rgb value to red
        rgb = (1, 0, 0)

        # Use the same color to plot the bounding boxes of the same object class
        if len(box) >= 7 and class_names:
            cls_conf = box[5]
            cls_id = box[6]
            classes = len(class_names)
            offset = cls_id * 123457 % classes
            red = get_color(2, offset, classes) / 255
            green = get_color(1, offset, classes) / 255
            blue = get_color(0, offset, classes) / 255

            # If a color is given then set rgb to the given color instead
            if color is None:
                rgb = (red, green, blue)
            else:
                rgb = color

        # Calculate the width and height of the bounding box relative to the size of the image.
        width_x = x2 - x1
        width_y = y1 - y2

        # Set the postion and size of the bounding box. (x1, y2) is the pixel coordinate of the
        # lower-left corner of the bounding box relative to the size of the image.
        rect = patches.Rectangle((x1, y2),
                                 width_x,
                                 width_y,
                                 linewidth=2,
                                 edgecolor=rgb,
                                 facecolor='none')

        # Draw the bounding box on top of the image
        a.add_patch(rect)

        # If plot_labels = True then plot the corresponding label
        if plot_labels:

            # Create a string with the object class name and the corresponding object class probability
            conf_tx = class_names[cls_id] + ': {:.1f}'.format(cls_conf)

            # Define x and y offsets for the labels
            lxc = (img.shape[1] * 0.266) / 100
            lyc = (img.shape[0] * 1.180) / 100

            # Draw the labels on top of the image
            a.text(x1 + lxc,
                   y1 - lyc,
                   conf_tx,
                   fontsize=24,
                   color='k',
                   bbox=dict(facecolor=rgb, edgecolor=rgb, alpha=0.8))

    tmp_plot = NamedTemporaryFile()
    plt.savefig(tmp_plot.name, format='png')

    response = cloudinary.uploader.upload(tmp_plot.name,
                                          public_id='result',
                                          overwrite=True)
    tmp_plot.close()

    return response['url']
コード例 #56
0
ファイル: forms.py プロジェクト: warcraft12321/INSaFLU_2.0
    def clean(self):
        """
		Clean all together because it's necessary to compare the genbank and fasta files
		"""
        cleaned_data = super(SampleForm, self).clean()
        name = self.cleaned_data['name'].strip()

        try:
            result_filer_name = re.sub('[^A-Za-z0-9_]+', '', name)
            if (len(result_filer_name) != len(name)):
                self.add_error(
                    'name',
                    _("Error: Only letters, numbers and underscores are allowed."
                      ))
                return cleaned_data

            try:
                Sample.objects.get(name__iexact=name,
                                   owner__username=self.request.user.username,
                                   is_obsolete=False,
                                   is_deleted=False)
                self.add_error(
                    'name',
                    ValidationError(
                        _("This name '" + name +
                          "' already exist in database, please choose other."),
                        code='invalid'))
            except Sample.DoesNotExist:
                pass

            ## latitude in degrees is -90 and +90 for the southern and northern hemisphere respectively. Longitude is in the range -180 and +180
            lat = self.cleaned_data.get('lat')
            if (lat != None and (lat > 90 or lat < -90)):
                self.add_error(
                    'lat', _("Latitute must have values between +90 and -90."))
            lng = self.cleaned_data.get('lng')
            if (lng != None and (lng > 180 or lng < -180)):
                self.add_error(
                    'lng',
                    _("Longitude must have values between +180 and -180."))

            ### test file name
            if ('path_name_1' not in self.cleaned_data):
                self.add_error('path_name_1', _("Error: must have a file."))
                return cleaned_data

            ### testing file names
            path_name_1 = self.cleaned_data.get('path_name_1')
            path_name_2 = self.cleaned_data.get('path_name_2')

            ## verbose log...
            self.logger_production.warning(
                'New Sample: {}  Path name1: {}'.format(name, path_name_1))
            self.logger_production.warning(
                'New Sample: {}  Path name2: {}'.format(name, path_name_2))
            self.logger_debug.warning('New Sample: {}  Path name1: {}'.format(
                name, path_name_1))
            self.logger_debug.warning('New Sample: {}  Path name2: {}'.format(
                name, path_name_2))

            if (path_name_2 != None and path_name_1.name == path_name_2.name):
                self.add_error(
                    'path_name_1',
                    _("Error: both files has the same name. Please, different files."
                      ))
                self.add_error(
                    'path_name_2',
                    _("Error: both files has the same name. Please, different files."
                      ))
                return cleaned_data

            ## testing fastq
            fastaq_temp_file_name = NamedTemporaryFile(prefix='flu_fq_',
                                                       suffix='.fastq.gz',
                                                       delete=False)
            fastaq_temp_file_name.write(path_name_1.file.read())
            fastaq_temp_file_name.flush()
            fastaq_temp_file_name.close()

            like_dates = self.cleaned_data.get('like_dates')
            date_of_onset = self.cleaned_data.get('date_of_onset')
            date_of_collection = self.cleaned_data.get('date_of_collection')
            date_of_receipt_lab = self.cleaned_data.get('date_of_receipt_lab')

            ## verbose log...
            self.logger_production.warning(
                'New Sample: {} Pass dates...'.format(name))
            self.logger_debug.warning(
                'New Sample: {} Pass dates...'.format(name))

            #####
            if (like_dates == None and date_of_onset != None
                    and date_of_collection != None
                    and date_of_receipt_lab != None):
                self.add_error(
                    'like_dates',
                    _("Please, choose a data to collect the day, week and year."
                      ))
            elif (like_dates == 'date_of_onset' and date_of_onset == None):
                self.add_error('like_dates',
                               _("Error, the Onset date is null."))
            elif (like_dates == 'date_of_collection'
                  and date_of_collection == None):
                self.add_error('date_of_collection',
                               _("Error, the Collection date is null."))
            elif (like_dates == 'date_of_receipt_lab'
                  and date_of_receipt_lab == None):
                self.add_error('date_of_receipt_lab',
                               _("Error, the Lab Receipt date is null."))

            try:
                self.utils.is_fastq_gz(fastaq_temp_file_name.name)
            except Exception as e:  ## (e.errno, e.strerror)
                os.unlink(fastaq_temp_file_name.name)
                self.add_error('path_name_1', e.args[0])
                return cleaned_data

            ## verbose log...
            self.logger_production.warning(
                'New Sample: {} Pass is_fastq_gz 1...'.format(name))
            self.logger_debug.warning(
                'New Sample: {} Pass is_fastq_gz 1...'.format(name))

            ## testing fastq
            if (path_name_2 != None):
                fastaq_temp_file_name_2 = NamedTemporaryFile(
                    prefix='flu_fq_', suffix='.fastq.gz', delete=False)
                fastaq_temp_file_name_2.write(path_name_2.file.read())
                fastaq_temp_file_name_2.flush()
                fastaq_temp_file_name_2.close()

                try:
                    self.utils.is_fastq_gz(fastaq_temp_file_name_2.name)
                except Exception as e:  ## (e.errno, e.strerror)
                    os.unlink(fastaq_temp_file_name_2.name)
                    self.add_error('path_name_1', e.args[0])
                    return cleaned_data

            ## verbose log...
            self.logger_production.warning(
                'New Sample: {} end...'.format(name))
            self.logger_debug.warning('New Sample: {} end...'.format(name))

            ## remove temp files
            os.unlink(fastaq_temp_file_name.name)
            if (path_name_2 != None): os.unlink(fastaq_temp_file_name_2.name)
        except:
            self.logger_production.warning(
                "New Sample: {}  Path name2: {} Can't reach second file".
                format(name, path_name_1))
            self.logger_debug.warning(
                "New Sample: {}  Path name2: {} Can't reach second file".
                format(name, path_name_2))
            self.add_error('path_name_2', "Can't reach second file")
        return cleaned_data
コード例 #57
0
ファイル: export_tools.py プロジェクト: raeesaroj/fieldsight
def generate_export(export_type, extension, username, id_string,
                    export_id=None, filter_query=None, group_delimiter='/',
                    split_select_multiples=True,
                    binary_select_multiples=False,
                    sync_to_gsuit=False, user=None):
    """
    Create appropriate export object given the export type
    """
    time.sleep(5)
    export_type_func_map = {
        Export.XLS_EXPORT: 'to_xls_export',
        Export.CSV_EXPORT: 'to_flat_csv_export',
        Export.CSV_ZIP_EXPORT: 'to_zipped_csv',
        Export.SAV_ZIP_EXPORT: 'to_zipped_sav',
        Export.ANALYSER_EXPORT: 'to_analyser_export'
    }

    xform = XForm.objects.get(
        user__username__iexact=username, id_string__exact=id_string)

    # query mongo for the cursor
    records = query_mongo(username, id_string, filter_query)

    export_builder = ExportBuilder()
    export_builder.GROUP_DELIMITER = group_delimiter
    export_builder.SPLIT_SELECT_MULTIPLES = split_select_multiples
    export_builder.BINARY_SELECT_MULTIPLES = binary_select_multiples
    __version__ = "0"
    try:
        __version__ = filter_query['$and'][0]['__version__']
    except Exception as e:
        print(str(e))
    if __version__:
        survey = build_survey_from_history(xform, __version__)
        if not survey:
            export_builder.set_survey(xform.data_dictionary().survey)
        else:
            export_builder.set_survey(survey)
    else:
        export_builder.set_survey(xform.data_dictionary().survey)

    prefix = slugify('{}_export__{}__{}'.format(export_type, username, id_string))
    temp_file = NamedTemporaryFile(prefix=prefix, suffix=("." + extension))

    # get the export function by export type
    func = getattr(export_builder, export_type_func_map[export_type])

    func.__call__(
        temp_file.name, records, username, id_string, filter_query)

    # generate filename
    basename = "%s_%s" % (
        id_string, datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
    if export_type == Export.ANALYSER_EXPORT:
        # Analyser exports should be distinguished by more than just their file extension.
        basename= '{}_ANALYSER_{}'.format(id_string, datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
    filename = basename + "." + extension

    # check filename is unique
    while not Export.is_filename_unique(xform, filename):
        filename = increment_index_in_filename(filename)

    file_path = os.path.join(
        username,
        'exports',
        id_string,
        export_type,
        filename)

    # TODO: if s3 storage, make private - how will we protect local storage??
    storage = get_storage_class()()
    # seek to the beginning as required by storage classes
    
    print sync_to_gsuit, 'file_url--------->', temp_file, filter_query

    try:
        if sync_to_gsuit == True and '__version__' not in filter_query['$and'][0]:
            if not os.path.exists("media/forms/"):
                os.makedirs("media/forms/")

            temporarylocation="media/forms/submissions_{}.xls".format(id_string)
            import shutil
            shutil.copy(temp_file.name, temporarylocation)
            fxf_form = FieldSightXF.objects.get(pk=filter_query['$and'][0]['fs_project_uuid'])
            upload_to_drive(temporarylocation, str(fxf_form.id) + '_' +id_string, None, fxf_form.project, user)
        
            os.remove(temporarylocation)
        
    except Exception as e:
        print e.__dict__
    # get or create export object
    temp_file.seek(0)
    export_filename = storage.save(
        file_path,
        File(temp_file, file_path))
    
    dir_name, basename = os.path.split(export_filename)
    temp_file.close()
    if export_id:
        export = Export.objects.get(id=export_id)
    else:
        fsxf = filter_query.values()[0]
        # print("fsxf", fsxf)
        export = Export(xform=xform, export_type=export_type, fsxf_id=fsxf)
    export.filedir = dir_name
    export.filename = basename
    export.internal_status = Export.SUCCESSFUL
    # dont persist exports that have a filter
    if filter_query is None:
        export.save()
    export.save()
    return export
コード例 #58
0
ファイル: forms.py プロジェクト: warcraft12321/INSaFLU_2.0
    def clean(self):
        """
		Clean all together because it's necessary to compare the genbank and fasta files
		"""
        cleaned_data = super(ReferenceForm, self).clean()
        name = cleaned_data['name']
        try:
            Reference.objects.get(name__iexact=name,
                                  owner=self.request.user,
                                  is_obsolete=False,
                                  is_deleted=False)
            self.add_error(
                'name',
                _("This name '" + name +
                  "' already exist in database, please choose other."))
        except Reference.DoesNotExist:
            pass

        ## test reference_fasta
        if ('reference_fasta' not in cleaned_data):
            self.add_error('reference_fasta', _("Error: Must have a file."))
            return cleaned_data

        ### testing file names
        reference_fasta = cleaned_data['reference_fasta']
        reference_genbank = cleaned_data['reference_genbank']
        if (reference_genbank != None
                and reference_fasta.name == reference_genbank.name):
            self.add_error(
                'reference_fasta',
                _("Error: both files has the same name. Please, different files."
                  ))
            self.add_error(
                'reference_genbank',
                _("Error: both files has the same name. Please, different files."
                  ))
            return cleaned_data

        ## testing fasta
        some_error_in_files = False
        reference_fasta_temp_file_name = NamedTemporaryFile(prefix='flu_fa_',
                                                            delete=False)
        reference_fasta_temp_file_name.write(reference_fasta.read())
        reference_fasta_temp_file_name.flush()
        reference_fasta_temp_file_name.close()
        self.software.dos_2_unix(reference_fasta_temp_file_name.name)
        try:
            number_locus = self.utils.is_fasta(
                reference_fasta_temp_file_name.name)
            self.request.session[
                Constants.NUMBER_LOCUS_FASTA_FILE] = number_locus

            ## test the max numbers
            if (number_locus > Constants.MAX_SEQUENCES_FROM_FASTA):
                self.add_error(
                    'reference_fasta',
                    _('Max allow number of sequences in fasta: {}'.format(
                        Constants.MAX_SEQUENCES_FROM_FASTA)))
                some_error_in_files = True
            total_length_fasta = self.utils.get_total_length_fasta(
                reference_fasta_temp_file_name.name)
            if (not some_error_in_files and total_length_fasta >
                    Constants.MAX_LENGTH_SEQUENCE_TOTAL_FROM_FASTA):
                some_error_in_files = True
                self.add_error(
                    'reference_fasta',
                    _('The length sum of the sequences in fasta: {}'.format(
                        Constants.MAX_LENGTH_SEQUENCE_TOTAL_FROM_FASTA)))

            n_seq_name_bigger_than = self.utils.get_number_seqs_names_bigger_than(
                reference_fasta_temp_file_name.name,
                Constants.MAX_LENGTH_SEQ_NAME)
            if (not some_error_in_files and n_seq_name_bigger_than > 0):
                some_error_in_files = True
                if (n_seq_name_bigger_than == 1):
                    self.add_error(
                        'reference_fasta',
                        _('There is one sequence name length bigger than {0}. The max. length name is {0}. Prokka constrainments.'
                          .format(Constants.MAX_LENGTH_SEQ_NAME)))
                else:
                    self.add_error(
                        'reference_fasta',
                        _('There are {0} sequences with name length bigger than {1}. The max. length name is {1}. Prokka constrainments.'
                          .format(n_seq_name_bigger_than,
                                  Constants.MAX_LENGTH_SEQ_NAME)))

                ## if some errors in the files, fasta or genBank, return
                if (some_error_in_files): return cleaned_data

            if (not self.utils.test_sequences_same_length(
                    reference_fasta_temp_file_name.name)):
                self.add_error(
                    'reference_fasta',
                    _('There are sequences that have not the same length. This produce errors for samtools faidx.'
                      ))
                return cleaned_data

        except IOError as e:  ## (e.errno, e.strerror)
            os.unlink(reference_fasta_temp_file_name.name)
            some_error_in_files = True
            self.add_error('reference_fasta', e.args[0])
        except:
            os.unlink(reference_fasta_temp_file_name.name)
            some_error_in_files = True
            self.add_error('reference_fasta', "Not a valid 'fasta' file.")

        ### test if it has degenerated bases
        if (os.path.exists(reference_fasta_temp_file_name.name)):
            try:
                self.utils.has_degenerated_bases(
                    reference_fasta_temp_file_name.name)
            except Exception as e:
                os.unlink(reference_fasta_temp_file_name.name)
                some_error_in_files = True
                self.add_error('reference_fasta', e.args[0])

        ### testing genbank
        reference_genbank_temp_file_name = NamedTemporaryFile(prefix='flu_gb_',
                                                              delete=False)
        reference_genbank = cleaned_data['reference_genbank']
        if (reference_genbank != None):
            reference_genbank_temp_file_name.write(reference_genbank.read())
            reference_genbank_temp_file_name.flush()
            reference_genbank_temp_file_name.close()
            self.software.dos_2_unix(reference_genbank_temp_file_name.name)
            try:
                self.utils.is_genbank(reference_genbank_temp_file_name.name)
            except IOError as e:
                some_error_in_files = True
                os.unlink(reference_genbank_temp_file_name.name)
                self.add_error('reference_genbank', e.args[0])
            except:
                os.unlink(reference_genbank_temp_file_name.name)
                some_error_in_files = True
                self.add_error('reference_genbank',
                               "Not a valid 'genbank' file.")

        ## if some errors in the files, fasta or genBank, return
        if (some_error_in_files): return cleaned_data

        ## test locus names and length of sequences
        if (reference_genbank != None):
            try:
                self.utils.compare_locus_fasta_gb(
                    reference_fasta_temp_file_name.name,
                    reference_genbank_temp_file_name.name)
            except ValueError as e:
                self.add_error('reference_fasta', e.args[0])
                self.add_error('reference_genbank', e.args[0])

        ## remove temp files
        os.unlink(reference_genbank_temp_file_name.name)
        os.unlink(reference_fasta_temp_file_name.name)
        return cleaned_data
コード例 #59
0
ファイル: export_tools.py プロジェクト: ventureslab/kobocat
def generate_export(export_type,
                    extension,
                    username,
                    id_string,
                    export_id=None,
                    filter_query=None,
                    group_delimiter='/',
                    split_select_multiples=True,
                    binary_select_multiples=False):
    """
    Create appropriate export object given the export type
    """

    export_type_func_map = {
        Export.XLS_EXPORT: 'to_xls_export',
        Export.CSV_EXPORT: 'to_flat_csv_export',
        Export.CSV_ZIP_EXPORT: 'to_zipped_csv',
        Export.SAV_ZIP_EXPORT: 'to_zipped_sav',
        Export.ANALYSER_EXPORT: 'to_analyser_export'
    }

    xform = XForm.objects.get(user__username__iexact=username,
                              id_string__exact=id_string)

    # query mongo for the cursor
    records = query_mongo(username, id_string, filter_query)

    export_builder = ExportBuilder()
    export_builder.GROUP_DELIMITER = group_delimiter
    export_builder.SPLIT_SELECT_MULTIPLES = split_select_multiples
    export_builder.BINARY_SELECT_MULTIPLES = binary_select_multiples
    export_builder.set_survey(xform.data_dictionary().survey)

    prefix = slugify('{}_export__{}__{}'.format(export_type, username,
                                                id_string))
    temp_file = NamedTemporaryFile(prefix=prefix, suffix=("." + extension))

    # get the export function by export type
    func = getattr(export_builder, export_type_func_map[export_type])

    func.__call__(temp_file.name, records, username, id_string, filter_query)

    # generate filename
    basename = "%s_%s" % (id_string,
                          datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
    if export_type == Export.ANALYSER_EXPORT:
        # Analyser exports should be distinguished by more than just their file extension.
        basename = '{}_ANALYSER_{}'.format(
            id_string,
            datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
    filename = basename + "." + extension

    # check filename is unique
    while not Export.is_filename_unique(xform, filename):
        filename = increment_index_in_filename(filename)

    file_path = os.path.join(username, 'exports', id_string, export_type,
                             filename)

    # TODO: if s3 storage, make private - how will we protect local storage??
    storage = get_storage_class()()
    # seek to the beginning as required by storage classes
    temp_file.seek(0)
    export_filename = storage.save(file_path, File(temp_file, file_path))
    temp_file.close()

    dir_name, basename = os.path.split(export_filename)

    # get or create export object
    if export_id:
        export = Export.objects.get(id=export_id)
    else:
        export = Export(xform=xform, export_type=export_type)
    export.filedir = dir_name
    export.filename = basename
    export.internal_status = Export.SUCCESSFUL
    # dont persist exports that have a filter
    if filter_query is None:
        export.save()
    return export
コード例 #60
0
ファイル: views.py プロジェクト: HarryKim93/LearningMachine
def classify_api(request):
    data = {"success": False}
    clean_directory()

    if request.method == "POST":
        model = request.POST.get("model", None)
        if model == 'imagenet':
            tmp_f = NamedTemporaryFile()
            tmp_adver = NamedTemporaryFile()

            if request.FILES.get("image", None) is not None:
                image_request = request.FILES["image"]
                image_bytes = image_request.read()
                image.save(tmp_f, image.format)

            elif request.POST.get("image64", None) is not None:
                base64_data = request.POST.get("image64", None).split(',',
                                                                      1)[1]
                plain_data = base64.b64decode(base64_data)
                image = Image.open(io.BytesIO(plain_data))
                image.save(
                    os.path.join(current_dir,
                                 'imagenet/dataset/images/testtest.png'))
                tmp_f.write(plain_data)

            tmp_f.close()

            # Backend session for attack
            print('Building Backend Session.')
            K.set_learning_phase(0)
            sess = tf.Session()
            backend.set_session(sess)

            # Image preprocess
            print('Modifying image')
            x = np.expand_dims(preprocess(image.resize((299, 299))), axis=0)
            img_shape = [1, 299, 299, 3]
            x_input = tf.placeholder(tf.float32, shape=img_shape)

            # Define model
            d = discriminator()

            # Prediction of original image
            print('prediction of original image')
            classify_result = get_predictions(d, x, 10)

            # Select attack algorithm and iteration

            attack_algorithm = request.POST.get("attack", None)
            n = int(request.POST.get("iterate", None))

            # Start attack
            result, attack_speed = attack(attack_algorithm, n, d, x_input, x,
                                          sess)
            print("attack speed: %s seconds" % (round(attack_speed, 5)))
            print('original image:', classify_result[0][1])
            print('adversarial example is classified by', result[0][1])

            # Print image to web site
            with open(
                    os.path.join(current_dir, 'imagenet/output/testtest.png'),
                    'rb') as img_file:
                img_str = base64.b64encode(img_file.read())
            tmp_adver.write(base64.b64decode(img_str))
            tmp_adver.close()
        elif model == 'mnist':
            tmp_adver = NamedTemporaryFile()
            tmp_f = NamedTemporaryFile()
            mnist_sample = int(request.POST.get("sample", None))
            mnist_target = int(request.POST.get("target", None))
            mnist_algorithm = request.POST.get("mnist_algorithm", None)
            result, attack_speed = mnist_attack_func(mnist_sample,
                                                     mnist_target,
                                                     mnist_algorithm)
            print("attack speed: %s seconds" % (round(attack_speed, 5)))
            print('original class:', mnist_sample, 'target class:',
                  mnist_target)
            print('adversarial example is classified by', np.argmax(result))

            result = result.tolist()
            with open(
                    os.path.join(current_dir,
                                 'mnist/dataset/images/testtest.png'),
                    'rb') as input_file:
                input_str = base64.b64encode(input_file.read())
            tmp_f.write(base64.b64decode(input_str))
            tmp_f.close()
            with open(os.path.join(current_dir, 'mnist/output/testtest.png'),
                      'rb') as img_file:
                img_str = base64.b64encode(img_file.read())
            tmp_adver.write(base64.b64decode(img_str))
            tmp_adver.close()

        # Make Graph
        data["attack_speed"] = attack_speed
        data["success"] = True
        data["confidence"] = {}
        if model == 'imagenet':
            data["model"] = 'imagenet'
            for i in range(len(classify_result)):
                data["confidence"][classify_result[i][1]] = float(
                    classify_result[i][2])
            data["adverimage"] = 'data:image/png;base64,' + img_str.decode(
                'utf-8')
            data["adversarial"] = {}
            for i in range(len(result)):
                data["adversarial"][result[i][1]] = float(result[i][2])
                #print('iter:', i, 'name:', result[i][1], 'pred:', result[i][2])

            sess.close()

        elif model == 'mnist':
            data["model"] = 'mnist'
            for i in range(10):
                if i == mnist_sample:
                    data["confidence"][str(i)] = float(1)
                else:
                    data["confidence"][str(i)] = float(0)
            data["input_image"] = 'data:image/png;base64,' + input_str.decode(
                'utf-8')
            data["adverimage"] = 'data:image/png;base64,' + img_str.decode(
                'utf-8')
            data["adversarial"] = {}
            for i in range(len(result[0])):
                data["adversarial"][str(i)] = float(result[0][i])

        # Close the session
        # sess.close()
    return JsonResponse(data)