def create_thumbnail(file_path):
    thumbnail_filename = utils.get_thumb_filename(os.path.basename(file_path))
    thumbnail_format = utils.get_image_format(os.path.splitext(file_path)[1])
    file_format = thumbnail_format.split('/')[1]

    image_from_url = cStringIO.StringIO(urllib.urlopen(file_path).read())
    image = Image.open(image_from_url)

    # Convert to RGB if necessary
    # Thanks to Limodou on DjangoSnippets.org
    # http://www.djangosnippets.org/snippets/20/
    if image.mode not in ('L', 'RGB'):
        image = image.convert('RGB')

    # scale and crop to thumbnail
    imagefit = ImageOps.fit(image, THUMBNAIL_SIZE, Image.ANTIALIAS)
    thumbnail_io = BytesIO()
    imagefit.save(thumbnail_io, format=file_format)

    thumbnail = InMemoryUploadedFile(
        thumbnail_io,
        None,
        thumbnail_filename,
        thumbnail_format,
        len(thumbnail_io.getvalue()),
        None)
    thumbnail.seek(0)

    cc = CloudContainer('mediaplan-images')
    data = thumbnail.read()
    cc.upload_data(filename=thumbnail_filename, data=data)
    return thumbnail_filename
Exemple #2
0
def check_number_of_samples(file: InMemoryUploadedFile):
    with tempfile.NamedTemporaryFile(suffix=".vcf") as f:
        f.write(file.read())
        file.seek(0)

        logger.info("Saved VCF to the temporary file {}", f.name)
        logger.debug("Is file readable: {}", f.readable())

        try:
            vcf = VariantFile(f.name)
        except (ValueError, OSError) as e:
            raise ValidationError(
                _("Reading of the file has failed. Probably, the file has a wrong format"
                  ),
                code="format.invalid",
            ) from e

        record = next(vcf.fetch())
        samples = record.samples

        if len(samples) > 1:
            raise ValidationError(
                _("Uploaded file has more than 1 sample. Number of samples: %(n_samples)s"
                  ),
                params={"n_samples": len(samples)},
                code="samples.number.invalid",
            )
Exemple #3
0
def handle_uploaded_file(f: InMemoryUploadedFile):
    
    # name = get_random_string(10) + '-' + f.name ""

    name = f.name
    fname = '/tmp/' + name
    mapper = Map()
    mapper.name = name.split('.')[0]

    cont = 0
    with open(fname, 'wb+') as destination:
        while True:
            chunk = f.read(1024)
            if not chunk:
                break
            destination.write(chunk)
            if cont == 0:
                mapper.apply(chunk, True)
            else:
                mapper.apply(chunk)
            cont = cont +1

        destination.close()
        mapper.end()

    s3client = boto3.client('s3')
    s3client.put_object(
        Body=fname,
        Bucket=S3_BUCKET,
        Key='uploads/'+name
    )
Exemple #4
0
class Avatar(models.Model):
    user = models.ForeignKey(User, unique=True)
    avatar = models.ImageField(max_length=1024, upload_to=content_file_name, blank=True, null=True)
    thumbnail = models.ImageField(max_length=1024, upload_to=content_file_name, blank=True, null=True)

    def save(self, *args, **kwargs):
        if self.avatar:
            avatar = Img.open(StringIO.StringIO(self.avatar.read()))
            avatar.thumbnail((100, 100), Img.ANTIALIAS)
            output = StringIO.StringIO()
            avatar.save(output, format='PNG', quality=75)
            output.seek(0)
            self.avatar = InMemoryUploadedFile(output, 'ImageField', "%s.png" % self.user.username, 'image/png',
                                               output.len, None)
            thumbnail = Img.open(StringIO.StringIO(self.avatar.read()))
            thumbnail.thumbnail((25, 25), Img.ANTIALIAS)
            output = StringIO.StringIO()
            thumbnail.save(output, format='PNG', quality=75)
            output.seek(0)
            self.thumbnail = InMemoryUploadedFile(output, 'ImageField', "mini_%s.png" % self.user.username, 'image/png',
                                                  output.len, None)
        super(Avatar, self).save(*args, **kwargs)

    class Meta:
        verbose_name = 'Avatar'
        verbose_name_plural = 'Avatary'
    def parse_account_statement(filename: InMemoryUploadedFile):
        """
        Parses Holvi account statement Excel in new or old header format

        Expected fields:
        "Date"/"Payment date", "Amount", "Currency", "Counterparty", "Description", "Reference",
        "Message", "Filing ID"

        Unused fields:
        "Execution date" after "Payment date"
        """
        sheet = xlrd.open_workbook(
            file_contents=filename.read()).sheet_by_index(0)

        date_fields = ["Payment date", "Date"]
        headers = []
        items = []
        for row_index, row in enumerate(sheet.get_rows()):
            # Skip summary rows
            if headers == [] and row[0].value not in date_fields:
                continue

            if headers == []:
                # Collect header row
                headers = [field.value for field in row]
            else:
                # Extract row data as dictionary with header row as keys
                item = dict(zip(headers, [field.value for field in row]))

                # Parse payment date
                try:
                    # Try new field name first, present since around 2021-03-21
                    date_parsed = datetime.strptime(
                        item["Payment date"],
                        "%d %b %Y"  # "9 Mar 2021"
                    )

                    # Set time to noon as new format has no payment time
                    item["Date_parsed"] = date_parsed.replace(hour=12,
                                                              minute=00)
                except KeyError:
                    # Fallback: try old field name, preset in 2020-06-10
                    # If we get second KeyError, file header format is invalid and we let import crash out
                    item["Date_parsed"] = datetime.strptime(
                        item["Date"],
                        "%d %b %Y, %H:%M:%S"  # "8 Jan 2020, 09:35:43"
                    )

                # Force reference field to be strings
                item["Reference"] = str(item["Reference"])
                item["Message"] = str(item["Message"])

                # Add meta fields
                item["source_file"] = filename
                item["source_row"] = row_index + 1

                items.append(item)

        return items
Exemple #6
0
    def parse_account_statement(uploaded_file: InMemoryUploadedFile):
        """
        Parses Holvi account statement Excel in new or old header format. Holvi report system has
        been updated in 05/2022 to use XLSX files instead of XLSX. Parsing Excel reading library has
        been uodated to handle both file types.

        Expected fields:
        "Date"/"Payment date", "Amount", "Currency", "Counterparty", "Description", "Reference",
        "Message", "Filing ID"

        Unused fields:
        "Execution date" after "Payment date"
        """
        sheet = load_workbook(filename=BytesIO(uploaded_file.read())).active

        date_fields = ["Payment date", "Date"]
        headers = []
        items = []
        for row_index, row in enumerate(sheet.values):
            # Skip summary rows
            if headers == [] and row[0] not in date_fields:
                continue

            if headers == []:
                # Collect header row
                headers = list(row)
            else:
                # Extract row data as dictionary with header row as keys
                item = dict(zip(headers, list(row)))

                # Parse payment date
                try:
                    # Try new field name first, present since around 2021-03-21
                    date_parsed = datetime.strptime(
                        item["Payment date"], "%d %b %Y"  # "9 Mar 2021"
                    )

                    # Set time to noon as new format has no payment time
                    item["Date_parsed"] = date_parsed.replace(hour=12, minute=00)
                except KeyError:
                    # Fallback: try old field name, preset in 2020-06-10
                    # If we get second KeyError, file header format is invalid and we let import crash out
                    item["Date_parsed"] = datetime.strptime(
                        item["Date"], "%d %b %Y, %H:%M:%S"  # "8 Jan 2020, 09:35:43"
                    )

                # Force reference field to be strings
                item["Reference"] = str(item["Reference"])
                item["Message"] = str(item["Message"])

                # Add meta fields
                item["source_file"] = uploaded_file.name
                item["source_row"] = row_index + 1

                items.append(item)

        return items
Exemple #7
0
    def save(self, commit=True):
        """
        Saves the uploaded image in the designated location.

        If image type is not SVG, a byteIO of image content_type is created and
        subsequently save; otherwise, the SVG is saved in its existing ``charset``
        as an ``image/svg+xml``.

        *Note*: The dimension of image files (excluding SVG) are set using ``PIL``.

        :param commit: If ``True``, the file is saved to the disk;
                       otherwise, it is held in the memory.
        :type commit: bool
        :return: An instance of saved image if ``commit is True``,
                 else ``namedtuple(path, image)``.
        :rtype: bool, namedtuple
        """
        image = self.files.get("image")
        content_type = image.content_type
        file_name = image.name
        image_extension = content_type.split("/")[-1].upper()
        image_size = image.size

        if content_type.lower() != self._SVG_TYPE:
            # Processing the raster graphic image.
            # Note that vector graphics in SVG format
            # do not require additional processing and
            # may be stored as uploaded.
            image = self._process_raster(image, image_extension)
            image_size = image.tell()
            image.seek(0, SEEK_SET)

        # Processed file (or the actual file in the case of SVG) is now
        # saved in the memory as a Django object.
        uploaded_image = InMemoryUploadedFile(
            file=image,
            field_name=None,
            name=file_name,
            content_type=content_type,
            size=image_size,
            charset=None,
        )

        if (
            content_type.lower() == self._SVG_TYPE
            and MARKDOWNX_SVG_JAVASCRIPT_PROTECTION
            and xml_has_javascript(uploaded_image.read())
        ):

            raise MarkdownxImageUploadError(
                "Failed security monitoring: SVG file contains JavaScript."
            )

        return self._save(uploaded_image, file_name, commit)
    def read_license_plate(image: InMemoryUploadedFile):
        file_name = 'samochod.jpg'
        with open(file_name, 'wb') as f:
            f.write(image.read())

        output = get_command(f"alpr /code/{file_name}")

        m = re.search('-\s*(.*)\s*\\t\s*confidence', output.decode('utf-8'))
        plate_text = m.group(1)

        return [plate_text]
Exemple #9
0
    def save(self, commit=True):
        """
        Saves the uploaded image in the designated location.

        If image type is not SVG, a byteIO of image content_type is created and
        subsequently save; otherwise, the SVG is saved in its existing ``charset``
        as an ``image/svg+xml``.

        *Note*: The dimension of image files (excluding SVG) are set using ``PIL``.

        :param commit: If ``True``, the file is saved to the disk;
                       otherwise, it is held in the memory.
        :type commit: bool
        :return: An instance of saved image if ``commit is True``,
                 else ``namedtuple(path, image)``.
        :rtype: bool, namedtuple
        """
        image = self.files.get('image')
        content_type = image.content_type
        file_name = image.name
        image_extension = content_type.split('/')[-1].upper()
        image_size = image.size

        if content_type.lower() != self._SVG_TYPE:
            # Processing the raster graphic image.
            # Note that vector graphics in SVG format
            # do not require additional processing and
            # may be stored as uploaded.
            image = self._process_raster(image, image_extension)
            image_size = image.tell()
            image.seek(0, SEEK_SET)

        # Processed file (or the actual file in the case of SVG) is now
        # saved in the memory as a Django object.
        uploaded_image = InMemoryUploadedFile(
            file=image,
            field_name=None,
            name=file_name,
            content_type=content_type,
            size=image_size,
            charset=None
        )

        if (content_type.lower() == self._SVG_TYPE
                and MARKDOWNX_SVG_JAVASCRIPT_PROTECTION
                and xml_has_javascript(uploaded_image.read())):

            raise MarkdownxImageUploadError(
                'Failed security monitoring: SVG file contains JavaScript.'
            )

        return self._save(uploaded_image, file_name, commit)
Exemple #10
0
def save_file(file: InMemoryUploadedFile) -> List[str]:
    root = settings.MEDIA_ROOT
    if is_ebook(file):
        path = default_storage.save(file.name, ContentFile(file.read()))
        filepath = os.path.join(root, path)
        return [filepath]
    elif is_archive(file):
        zip = zipfile.ZipFile(file, 'r')
        filepaths = []
        for file_name in zip.infolist():
            content = zip.read(file_name)
            path = default_storage.save(file_name.filename,
                                        ContentFile(content))
            filepath = os.path.join(root, path)
            filepaths.append(filepath)
        return filepaths
Exemple #11
0
def check_vcf_format(file: InMemoryUploadedFile):
    with tempfile.NamedTemporaryFile(suffix=".vcf") as f:
        f.write(file.read())
        f.seek(0)

        logger.info("Saved VCF to the temporary file {}", f.name)
        logger.debug("Is file readable: {}", f.readable())

        try:
            VariantFile(f.name)
        except (ValueError, OSError) as e:
            raise ValidationError(
                _("Reading of the file has failed. Probably, the file has a wrong format"
                  ),
                code="format.invalid",
            ) from e
Exemple #12
0
    def split_text(self, textfile: uploadedfile.InMemoryUploadedFile,
                   max_lines):
        """
        Splits the textfile into smaller files with at most max_lines sentences. 
        A list of SimpleUploadedFile objects is returned.
        """
        filename = textfile.name
        textfiles = []
        # get encoding
        textfile.open(mode='rb')
        encoding = chardet.detect(textfile.read())['encoding']

        # put all sentences in a list
        filecontent = []  # list of all sentences in the textfile
        sentence = ''  # one sentence in the textfile that is resetted after every \n\n and added to filecontent
        # the open method simply does seek(0). This needs to be done, because the file was already opened to find the encoding
        textfile.open()
        for line in textfile:
            line = line.decode(encoding=encoding)
            # this will not work if the newline character is just '\r'
            line = line.replace('\r', '')

            if line == '\n':
                if sentence != '':
                    filecontent.append(sentence)
                    sentence = ''
            else:
                sentence += line.replace('\n', '')
        if sentence != '':
            filecontent.append(sentence)
        # end of gathering filecontent
        # validate max_lines
        self.check_max_lines(max_lines, len(filecontent))
        # create SimpleUploadedFiles with max_lines of content from the textfile
        for i in range(math.ceil(len(filecontent) / max_lines)):
            filesentences, filecontent = filecontent[:max_lines], filecontent[
                max_lines:]
            content = ''
            for sentence in filesentences:
                content += sentence + '\n\n'
            new_filename = f'{filename[:-4]}_{i + 1}{filename[-4:]}'
            textfiles.append(
                uploadedfile.SimpleUploadedFile(new_filename,
                                                content.encode('utf-8-sig')))

        return textfiles
Exemple #13
0
    def parse_account_statement(filename: InMemoryUploadedFile):
        """
        Parses Holvi account statement Excel

        Expected fields:
        "Date", "Amount", "Currency", "Counterparty", "Description", "Reference",
        "Message", "Filing ID"
        """
        sheet = xlrd.open_workbook(
            file_contents=filename.read()).sheet_by_index(0)

        headers = []
        items = []
        for row_index, row in enumerate(sheet.get_rows()):
            # Skip summary rows
            if headers == [] and row[0].value != "Date":
                continue

            if headers == []:
                # Collect header row
                headers = [field.value for field in row]
            else:
                # Extract row data as dictionary with header row as keys
                item = dict(zip(headers, [field.value for field in row]))

                # Parse payment date
                item["Date_parsed"] = datetime.strptime(
                    item["Date"],
                    "%d %b %Y, %H:%M:%S"  # 8 Jan 2020, 09:35:43
                )

                # Force reference field to be strings
                item["Reference"] = str(item["Reference"])
                item["Message"] = str(item["Message"])

                # Add meta fields
                item["source_file"] = filename
                item["source_row"] = row_index + 1

                items.append(item)

        return items
Exemple #14
0
def create_trackpoints(track, uploaded_file: InMemoryUploadedFile, model):
    """Create list of ActivityTrackpoints for SBN file"""
    data = Parser()
    data.process(uploaded_file.read())
    # filter out Nones
    data = [x for x in data.pktq if x is not None and x['fixtype'] != 'none']

    insert = []
    app = insert.append  # cache append method for speed.. maybe?
    fmt = '%H:%M:%S %Y/%m/%d'
    for track_point in data:
        app(
            model(lat=track_point['latitude'],
                  lon=track_point['longitude'],
                  sog=track_point['sog'],
                  timepoint=datetime.strptime(
                      '{} {}'.format(track_point['time'], track_point['date']),
                      fmt).replace(tzinfo=pytz.UTC),
                  track=track))
    return insert
Exemple #15
0
def handle_csv_file(file: InMemoryUploadedFile, form_data: Dict,
                    request: HttpRequest) -> List:
    data_list = []

    with StringIO(file.read().decode("latin-1")) as csv_file:
        data = csv.reader(csv_file, delimiter=",")

        data.__next__()

        try:
            upload = Upload(user=request.user,
                            file_name=form_data.get("file_csv"),
                            data_referencia=form_data.get("data"))
            upload.save()

        except IntegrityError:
            messages.error(request, "A data referenciada já possui registros")

        else:
            for row in data:
                if (row[0] and row[3] and row[4] and row[6] and row[11]):
                    name = row[4].split(" ", maxsplit=1)

                    user_to, created = User.objects.get_or_create(
                        username=row[3],
                        defaults={
                            "first_name": name[0],
                            "last_name": name[1]
                        })

                    if created:
                        user_to.save()

                    Cartao(user_to=user_to,
                           user_to_name=row[4],
                           user_to_cpf=row[0],
                           user_to_birthday=date(day=int(row[6][:2]),
                                                 month=int(row[6][2:4]),
                                                 year=int(row[6][4:6])),
                           value=float(row[11]) if row[11] else 0,
                           upload=upload).save()
Exemple #16
0
    def parse_fit(self, file: InMemoryUploadedFile):
        # TODO: Bulk update?
        self.decoded = FitFile(file.read())

        records = [r for r in self.decoded.get_messages(name='record')]
        self.set_offset()

        self.load_records(records)

        self.load_activity()

        self.load_laps()

        Lap.objects.bulk_create(self.laps)

        for j in range(len(self.laps)):
            for i in range(self.laps[j].record_start,
                           self.laps[j].record_end + 1):
                self.records[i].activity = self.activity
                self.records[i].lap = self.laps[j]

        Record.objects.bulk_create(self.records)
Exemple #17
0
def create_trackpoints(track, uploaded_file: InMemoryUploadedFile, model):
    """Parse GPX trackpoints"""
    gpx = uploaded_file.read().decode('utf-8')
    gpx = gpxpy.parse(gpx)

    insert = []
    app = insert.append  # cache append method for speed.. maybe?

    prev_point = None
    speed = 0

    for gps_track in gpx.tracks:
        for segment in gps_track.segments:
            for point in segment.points:
                if prev_point is not None:
                    speed = point.speed_between(prev_point)
                prev_point = point
                app(
                    model(lat=point.latitude,
                          lon=point.longitude,
                          sog=speed,
                          timepoint=point.time.replace(tzinfo=pytz.UTC),
                          track=track))
    return insert
Exemple #18
0
class Student(models.Model):
    user = models.OneToOneField(User, on_delete=models.CASCADE, limit_choices_to=Q(groups__name='Students'),
                                related_name='student')
    first_name_en = models.CharField(max_length=50)
    last_name_en = models.CharField(max_length=50)
    first_name_ua = models.CharField(max_length=50)
    last_name_ua = models.CharField(max_length=50)
    course = models.IntegerField(default=3, choices=((1, _('first')), (2, _('second')), (3, _('third')), (4, _('fourth')),
                                                     (5, _('fifth')), (6, _('sixth'))))
    institution = models.CharField(max_length=100,
                                   choices=(('TSNUK', _('Taras Shevchenko National University of Kyiv')),
                                            ('UKMA', _('National University of Kyiv-Mohyla Academy'))))
    group = models.CharField(max_length=100, choices=(('HEP', _('HEP')), ('nuclear', _('Nuclear physics')), ))
    interests_en = models.TextField()
    interests_ua = models.TextField()
    join_date = models.DateField('date joined', auto_now=True)
    photo = models.ImageField(upload_to='students', default='default.jpg')
    photo_small = models.ImageField(upload_to='students_small', default='default_small.jpg', editable=False)

    def __unicode__(self):  # __unicode__ on Python 2
        return '%s  %s' % (self.first_name_en, self.last_name_en)

    def name_en(self):
        return '%s  %s' % (self.first_name_en, self.last_name_en)

    def name_ua(self):
        return '%s  %s' % (self.first_name_ua, self.last_name_ua)

    def save(self, *args, **kwargs):
        if self.photo:
            if Student.objects.filter(pk=self.pk).exists():
                if self.photo != Student.objects.get(pk=self.pk).photo:
                    image = Img.open(StringIO.StringIO(self.photo.read()))
                    image.thumbnail((200, 200), Img.ANTIALIAS)
                    output = StringIO.StringIO()
                    image.save(output, format='JPEG', quality=75)
                    output.seek(0)
                    self.photo = InMemoryUploadedFile(output, 'ImageField', "%s" % self.photo.name, 'image/jpeg',
                                                      output.len, None)
                    image = Img.open(StringIO.StringIO(self.photo.read()))
                    image.thumbnail((50, 50), Img.ANTIALIAS)
                    output = StringIO.StringIO()
                    image.save(output, format='JPEG', quality=75)
                    output.seek(0)
                    self.photo_small = InMemoryUploadedFile(output, 'ImageField', "%s" % self.photo.name, 'image/jpeg',
                                                            output.len, None)
            else:
                image = Img.open(StringIO.StringIO(self.photo.read()))
                image.thumbnail((200, 200), Img.ANTIALIAS)
                output = StringIO.StringIO()
                image.save(output, format='JPEG', quality=75)
                output.seek(0)
                self.photo = InMemoryUploadedFile(output, 'ImageField', "%s" % self.photo.name, 'image/jpeg',
                                                  output.len, None)
                image = Img.open(StringIO.StringIO(self.photo.read()))
                image.thumbnail((50, 50), Img.ANTIALIAS)
                output = StringIO.StringIO()
                image.save(output, format='JPEG', quality=75)
                output.seek(0)
                self.photo_small = InMemoryUploadedFile(output, 'ImageField', "%s" % self.photo.name, 'image/jpeg',
                                                        output.len, None)
        super(Student, self).save(*args, **kwargs)
Exemple #19
0
def file_to_strlines(file: InMemoryUploadedFile) -> list:
    return str(file.read()).split('\\r\\n')
Exemple #20
0
 def test_open_resets_file_to_start_and_returns_context_manager(self):
     uf = InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
     uf.read()
     with uf.open() as f:
         self.assertEqual(f.read(), '1')
Exemple #21
0
class Thread(models.Model):
    """
    Thread is the main content object of the application
    """
    # codes for types of posts
    QUESTION = "QQ"
    DISCUSSION = "DD"
    LINK = "LL"
    YOUTUBE = "YT"
    DUPLICATE = "DU"
    VIDEOSTREAM = "VS"

    # iterable collection for types of posts
    # must consist of iterables of exactly two items
    TYPES_OF_THREAD = (
        #(QUESTION, _('Question')),
        (DISCUSSION, _('Discussion')),
        (LINK, _('Link')),
        (YOUTUBE, _('Youtube video')),
        (DUPLICATE, _('Duplicate thread')),
        (VIDEOSTREAM, _('Video stream')),
    )
    TYPES_WITH_LINK = [LINK, YOUTUBE, DUPLICATE, VIDEOSTREAM]

    #many to many relationship with tags. When a post is created, it needs to be saved and then tags can be added
    tags = models.ManyToManyField(Tag, blank=True, verbose_name=_('tags'))

    # these fields are taken into account only if the post is thread starter
    hidden = models.BooleanField(
        default=False
    )  # the thread is visible only to the staff and the author
    closed = models.BooleanField(
        default=False)  # no one can post comments / answers in this thread
    sticky = models.DateField(
        null=True,
        blank=True)  # this thread will be sticky until the given date
    featured = models.BooleanField(
        default=False)  # hopefully one day there will be sponsored threads...
    deleted = models.BooleanField(
        default=False
    )  # the thread is marked as deleted, usually on user blocking

    # reference to the user who created the post
    user = models.ForeignKey(settings.AUTH_USER_MODEL,
                             models.CASCADE,
                             default=1)

    # atomatically added timestamp field when the record is created
    created = models.DateTimeField(auto_now_add=True)

    # atomatically added timestamp field when the record is modified
    modified = models.DateTimeField(auto_now=True)

    # Thread must have one of the types defined in TYPES_OF_THREAD
    thread_type = models.CharField(max_length=2,
                                   choices=TYPES_OF_THREAD,
                                   default=LINK,
                                   null=True)

    # thread body with HTML markup
    text = MarkdownxField(null=True)

    # link field for the Threads of the type Link
    link = models.URLField(null=True, blank=True, unique=True)

    # link's domain. Used for /domains page by the task #66
    domain = models.CharField(max_length=255, null=True, blank=True)

    # thread title can be null if the post is not a thread starter
    title = models.CharField(max_length=255, null=True)

    #image that illustrates the thread
    image = models.ImageField(upload_to='uploads/images/%Y/%m/%d',
                              null=True,
                              blank=True)

    #smaller version of the image
    thumbnail = models.ImageField(upload_to='uploads/images/%Y/%m/%d',
                                  null=True,
                                  blank=True)

    # the current score of the post. It is only calculated for thread posts (no parents)
    # that are not older than one week old
    score = models.IntegerField(default=0)

    # when thread type is "duplicate" this is a link to the original, "main" thread
    original = models.ForeignKey("self",
                                 blank=True,
                                 null=True,
                                 on_delete=models.SET_NULL)

    def __init__(self, *args, **kwargs):
        super(Thread, self).__init__(*args, **kwargs)
        self._old = model_to_dict(self,
                                  fields=[
                                      'id', 'hidden', 'closed', 'sticky',
                                      'sponsored', 'deleted', 'text', 'title'
                                  ])

    def __unicode__(self):
        return self.title

    def save(self, *args, **kwargs):
        self.prepare_images()
        self.update_link()
        super(Thread, self).save()
        AuditThread.audit(self)  # log all changes applied to the thread

    def resize_image(self, content, size, format='JPEG'):
        im = Image.open(BytesIO(content)).convert('RGBA')
        if im.size[0] > size[0] or im.size[1] > size[1]:
            im.thumbnail(size)
        new_image = Image.new('RGBA', im.size, 'WHITE')
        new_image.paste(im, (0, 0), im)
        new_image = new_image.convert('RGB')
        result = BytesIO()
        new_image.save(result, format)
        return result

    def _delete_old_image(self):
        try:
            this = Thread.objects.get(id=self.id)
            if this.image != self.image:
                # delete old image explicitly, as new image will have different name
                this.image.delete(False)
                this.thumbnail.delete(False)
        except Exception as ex:
            pass

    @cached_property
    def youtube_id(self):
        # url = url.split(/(vi\/|v%3D|v=|\/v\/|youtu\.be\/|\/embed\/)/);
        # return undefined !== url[2]?url[2].split(/[^0-9a-z_\-]/i)[0]:url[0];
        r = r"(vi\/|v%3D|v=|\/v\/|youtu\.be\/|\/embed\/)"
        video_id = re.split(r, self.link)
        if len(video_id) == 3:
            video_id = re.split(r"[^0-9a-z_\-](?i)", video_id[2])
        else:
            return None
        return video_id[0] if video_id else None

    def parse_youtube_url(self):
        id = self.youtube_id
        item = {}
        if id and settings.GOOGLE_API_KEY:
            snippet = requests.get(
                f'https://www.googleapis.com/youtube/v3/videos?part=snippet&id={id}&key={settings.GOOGLE_API_KEY}'
            )
            snippet = snippet.json()
            if snippet.get('items'):
                item = snippet['items'][0]['snippet']
                item['image'] = item['thumbnails']['default']['url']
        if not item and id:  # failed to get video info from googleapis, trying 3rd party service
            snippet = requests.get(
                f'https://noembed.com/embed?url=https://www.youtube.com/watch?v={id}'
            )
            snippet = snippet.json()
            if snippet.get('title'):
                item = snippet
                item['image'] = item['thumbnail_url']
                item['description'] = ''
        result = {'id': id} if id else None
        if item:
            result.update(
                **{k: item[k]
                   for k in ['title', 'description', 'image']})
        return result

    def _load_youtube_thumbnail(self):
        yt_info = self.parse_youtube_url()
        if yt_info:
            filename = os.path.basename(yt_info['image'])
            ext = filename.split('.', 1)[-1]
            filename = '%s.%s' % (yt_info['id'], ext)
            response = requests.get(yt_info['image'])
            self.image = SimpleUploadedFile(filename, response.content,
                                            response.headers['content-type'])

    def prepare_images(self):
        if self.thread_type == self.YOUTUBE and not self.image:
            self._load_youtube_thumbnail()
        self._delete_old_image()
        if self.image:
            img = self.resize_image(self.image.read(),
                                    size=settings.MAX_IMAGE_SIZE,
                                    format='JPEG')
            self.image = InMemoryUploadedFile(
                img, 'ImageField', "%s.jpg" % self.image.name.split('.')[0],
                'image/jpeg', sys.getsizeof(img), None)

    def update_link(self):
        """
        extract domain name for threads of type "link"
        """
        if self.thread_type not in self.TYPES_WITH_LINK:
            self.link = None
        if self.thread_type not in [self.LINK, self.DUPLICATE]:
            self.domain = None
        else:
            hostname = urlparse(self.link)
            self.domain = hostname.netloc

    @cached_property
    def comments(self):
        if self.thread_type == self.QUESTION:  # filter out comments marked as answers, they'll come in another property
            params = {
                'is_answer': False,
                'deleted': False,
                'parent_id__isnull': True,
            }
            result = self.post_set.filter(**params)
        else:
            result = self.post_set.all()
        return result

    @cached_property
    def answers(self):
        x = self.post_set.filter(is_answer=True, deleted=False)
        return x

    @cached_property
    def num_comments(self):
        return self.post_set.filter(deleted=False).count()

    @cached_property
    def points(self):
        result = self.threadlike_set.all().aggregate(
            sum=models.Sum('points'))['sum']
        return result or 0

    @cached_property
    def author(self):
        return self.user.username

    @cached_property
    def answered(self):
        """
        check whether the thread of type "question" has an answer, to prevent marking another comment as the answer
        """
        return self.answers.filter(accepted__isnull=False).count() > 0

    @cached_property
    def duplicates(self):
        if self.original:
            q = models.Q(original__in=[self.original.id, self.id]) | models.Q(
                id=self.original.id)
            q = q & ~models.Q(id=self.id)
        else:
            q = models.Q(original=self.id)
        return Thread.objects.filter(q, deleted=False)
Exemple #22
0
 def write_file(file: InMemoryUploadedFile, folder: str):
     ext = file.name.split('.')[-1]
     filename = "{}.{}".format(Tools.get_uuid(), ext)
     return default_storage.save("{}/{}".format(folder, filename), ContentFile(file.read()))
Exemple #23
0
 def test_open_resets_file_to_start_and_returns_context_manager(self):
     uf = InMemoryUploadedFile(StringIO("1"), "", "test", "text/plain", 1,
                               "utf8")
     uf.read()
     with uf.open() as f:
         self.assertEqual(f.read(), "1")
def parse_taxonomy_file(file: InMemoryUploadedFile) -> TaxonomyParserResult:
    tax_parser = TaxonomyParser(file.read().decode("UTF-8"))
    tax_result = tax_parser.parse()
    return tax_result
def parse_taxonomy_merged_file(
        file: InMemoryUploadedFile) -> List[TaxonomyParserResult]:
    parser = TaxonomyMergedParser(file.read().decode("UTF-8"))
    return parser.parse()
Exemple #26
0
class Professor(models.Model):
    user = models.OneToOneField(User, on_delete=models.CASCADE, limit_choices_to=Q(groups__name='Professors'),
                                related_name='professor')
    first_name_en = models.CharField(max_length=50)
    last_name_en = models.CharField(max_length=50)
    first_name_ua = models.CharField(max_length=50)
    last_name_ua = models.CharField(max_length=50)
    academic_title = models.CharField(max_length=100, choices=(('doctor', _('Doktor of Physics and Mathematics')),
                                                               ('kandidat', _('Kandidat of Physics and Mathematics')),))
    institution = models.CharField(max_length=100, choices=(('TSNUK', _('Taras Shevchenko National University of Kyiv')),))
    position = models.CharField(max_length=100, choices=(('assistant', _('Teaching assistant')),
                                                         ('academic', _('Academic')), ('docent', _('Docent')),
                                                         ('professor', _('Professor')), ('headOfDepartment', _('Head of Department')), ))
    interests_en = models.TextField()
    interests_ua = models.TextField()
    join_date = models.DateField('date joined', auto_now=True)
    photo = models.ImageField(upload_to='professors', default='default.jpg')
    photo_small = models.ImageField(upload_to='professors_small', default='default_small.jpg', editable=False)

    def __unicode__(self):  # __unicode__ on Python 2
        return '%s  %s' % (self.first_name_en, self.last_name_en)

    def name_en(self):
        return '%s  %s' % (self.first_name_en, self.last_name_en)

    def name_ua(self):
        return '%s  %s' % (self.first_name_ua, self.last_name_ua)

    def save(self, *args, **kwargs):
        # Change image size and create 50x50 thumbnail
        if self.photo:
            if Professor.objects.filter(pk=self.pk).exists():
                if self.photo != Professor.objects.get(pk=self.pk).photo:
                    image = Img.open(StringIO.StringIO(self.photo.read()))
                    image.thumbnail((200, 200), Img.ANTIALIAS)
                    output = StringIO.StringIO()
                    image.save(output, format='JPEG', quality=75)
                    output.seek(0)
                    self.photo = InMemoryUploadedFile(output, 'ImageField', "%s" % self.photo.name, 'image/jpeg',
                                                      output.len, None)
                    image = Img.open(StringIO.StringIO(self.photo.read()))
                    image.thumbnail((50, 50), Img.ANTIALIAS)
                    output = StringIO.StringIO()
                    image.save(output, format='JPEG', quality=75)
                    output.seek(0)
                    self.photo_small = InMemoryUploadedFile(output, 'ImageField', "%s" % self.photo.name, 'image/jpeg',
                                                            output.len, None)
            else:
                image = Img.open(StringIO.StringIO(self.photo.read()))
                image.thumbnail((200, 200), Img.ANTIALIAS)
                output = StringIO.StringIO()
                image.save(output, format='JPEG', quality=75)
                output.seek(0)
                self.photo = InMemoryUploadedFile(output, 'ImageField', "%s" % self.photo.name, 'image/jpeg',
                                                  output.len, None)
                image = Img.open(StringIO.StringIO(self.photo.read()))
                image.thumbnail((50, 50), Img.ANTIALIAS)
                output = StringIO.StringIO()
                image.save(output, format='JPEG', quality=75)
                output.seek(0)
                self.photo_small = InMemoryUploadedFile(output, 'ImageField', "%s" % self.photo.name, 'image/jpeg',
                                                        output.len, None)
        super(Professor, self).save(*args, **kwargs)
Exemple #27
0
 def test_open_resets_file_to_start_and_returns_context_manager(self):
     uf = InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
     uf.read()
     with uf.open() as f:
         self.assertEqual(f.read(), '1')
Exemple #28
0
 def raw_csv_to_dict(cls, raw_data: InMemoryUploadedFile) -> list:
     csv_data = list()
     decoded_data = io.StringIO(raw_data.read().decode('utf-8'))
     for row in csv.DictReader(decoded_data):
         csv_data.append(row)
     return csv_data