Пример #1
0
def create_product_image(product, placeholder_dir):
    img_path = '%s/%s' % (placeholder_dir,
                          random.choice(os.listdir(placeholder_dir)))
    image = ProductImage(product=product, image=File(open(img_path,
                                                          'rb'))).save()
    return image
def update_profile(request):
    current_profile_info = request.user
    if (not current_profile_info.is_anonymous()):
        current_profile_info = ProfileInfo.objects.get(
            user=current_profile_info)
        #print(current_profile_info)
    else:
        current_profile_info = None

    try:
        user_id = request.POST['user_id']
        user = ProfileInfo.objects.get(id=user_id)
        if (current_profile_info == user):
            about = ""
            website = ""
            email = ""
            size = int(request.POST['size'])
            if (size == 1):
                try:
                    about = request.POST['about1']
                except:
                    pass
                try:
                    website = request.POST['website1']
                except:
                    pass
                try:
                    email = request.POST['email1']
                except:
                    pass
            elif (size == 2):
                try:
                    about = request.POST['about2']
                except:
                    pass
                try:
                    website = request.POST['website2']
                except:
                    pass
                try:
                    email = request.POST['email2']
                except:
                    pass

            user.about = about
            user.website = website
            user.user.email = email

            try:
                picture = request.FILES['picture']
                destination = open(settings.MEDIA_ROOT + picture.name, 'wb+')
                for chunk in picture.chunks():
                    destination.write(chunk)
                destination.close()
                user.profile_image.save(
                    picture.name,
                    File(open(settings.MEDIA_ROOT + picture.name, "rb")))
            except:
                pass
            user.save()
            user.user.save()
            #print("Done!")
            return JsonResponse({'success': True})
    except Exception as inst:
        #print(inst)
        #print("Update didn't work")
        return JsonResponse({'success': False})
Пример #3
0
def upload_product_complete(request):
    if request.method == 'POST':
        try:
            file = request.FILES['database_file']
        except:
            messages.success(request, '파일이 선택되지 않았습니다.')
            return redirect('staff:upload_product_by_file')
    try:
        df = pd.read_excel(file)
        df = df.where(pd.notnull(df), None)

        print('성공적으로 파일 가져옴')
    except:
        raise ObjectDoesNotExist
    
    print(df)
    df.columns = ["number", "title", "brand", 'category', 'list_price',  # 필수항목
                'info_made_country', 'info_product_number', 'info_delivery', 'combined_delivery', 'main_image',  # 필수항목
                'image1', 'image2', 'image3', 'image4', 'image5', 
                'image6', 'image7', 'image8', 'image9',
                'info_product_kind', 'info_material', 'info_feature', 'info_product_person', 'info_alert', # 비필수
                'info_quality_standard', 'info_as', 'description', 'video_link' # 비필수
                ]
    no_updated_dict = {}
    engine = create_engine(DATABASE_URL, echo=False)
    now1 = timezone.now()
    for index, row in df.iterrows():
        data = dict(row)
        number = data['number']
        brand = data['brand']
        # obj = Test.objects.create(full_name=row['full_name'], phone_number=row['phone_number'])
        obj, created = Product.objects.get_or_new(request, number=number, data=data)
        if not created:
            no_updated_dict[data['number']] = 'Already Exist'
        elif created == 'Brand DoesNotExist':
            no_updated_dict[data['number']] = created
        elif created == 'Category DoesNotExist':
            no_updated_dict[data['number']] = created

        image_cols = ['main_image', 'image1', 'image2', 'image3', 'image4', 'image5', 
                      'image6', 'image7', 'image8', 'image9']
        
                    
        if created == True:
            obj_images = [obj.main_image, obj.image1, obj.image2, obj.image3, obj.image4, obj.image5, 
                          obj.image6, obj.image7, obj.image8, obj.image9]
            obj_images_link = [obj.main_image_link, obj.image1_link, obj.image2_link]

            image_data_link = False
            for i, col in enumerate(image_cols):
                if data[col] is not None:
                    try:
                        with open(data[col], 'rb') as f:
                            filename = upload_main_image_path(obj, os.path.basename(f.name))
                            obj_images[i] = File(f, name=filename)
                            # obj.save()
                            # self.license_file.save(upload_main_image_path, File(f))
                    except OSError:
                        image_data_link = True
                        # if data[col][0:4] == 'http':
                        # obj_images_link[i] = data[col]
                        # print("obj_images_link[{}] : {}".format(i, data[col]))
                        # obj.save()

                        # response = urllib.request.urlretrieve(data[col])
                        # with open(response[0], 'rb') as f:
                        #     file_url = urlparse(data[col])
                        #     filename = os.path.basename(file_url.path)
                        #     obj_images[i].save(filename, f)
                        #     print(i, "이미지 확인", obj_images[i])
                        #     obj.save()
            if image_data_link:
                obj.main_image_link = data['main_image']
                obj.image1_link = data['image1']
                obj.image2_link = data['image2']
            obj.save()

    product_qs = Product.objects.all()
    now2 = timezone.now()
    print(now2-now1)
    context = {
        'qs' : product_qs,
        'no_updated_dict': no_updated_dict
    }
    return render(request, 'staff/upload_product_complete.html', context)
Пример #4
0
    def authenticate(self, request):
        user = request.user or None
        access_token = None
        # assume logging in normal way
        params = {}
        params["client_id"] = CONSUMER_KEY
        params["client_secret"] = CONSUMER_SECRET
        params["redirect_uri"] = request.build_absolute_uri(reverse("facebook_login_done"))
        params["code"] = request.GET.get('code', '')

        url = ("https://graph.facebook.com/oauth/access_token?"
               + urllib.urlencode(params))
        from cgi import parse_qs
        userdata = urllib.urlopen(url).read()
        res_parse_qs = parse_qs(userdata)
        # Could be a bot query
        if not ('access_token') in res_parse_qs:
            return None
        access_token = res_parse_qs['access_token'][-1]

        url = "https://graph.facebook.com/me?access_token=" + access_token

        fb_data = json.loads(urllib.urlopen(url).read())
        uid = fb_data["id"]

        if not fb_data:
            return None

        try:
            same_email_user = UserProfile.objects.get(email=fb_data.get('email', None))
        except:
            same_email_user = None

        if user.is_anonymous() and not same_email_user:
            try:
                fb_user = FacebookUserProfile.objects.get(facebook_uid=uid)
                fb_user.accesstoken = access_token
                fb_user.save()
                return fb_user.user
            except FacebookUserProfile.DoesNotExist:
                fb_picture_url = "http://graph.facebook.com/%s/picture?type=large" % uid

                username = fb_data.get('username')
                if not username:
                    username = uid
                userProfile = UserProfile.objects.create(username=username)
                userProfile.first_name = fb_data['first_name']
                userProfile.last_name = fb_data['last_name']
                if fb_data['gender'] == "male":
                    userProfile.gender = 'M'
                else:
                    if fb_data['gender'] == "female":
                        userProfile.gender = 'F'
                userProfile.email = fb_data.get('email', None)
                userProfile.isVerified_email = True
                userProfile.location = fb_data.get('location', fb_data).get('name', None)
                userProfile.save()

                img = urllib.urlretrieve(fb_picture_url)
                userProfile.profile_picture.save("Facebook-profile.jpg", File(open(img[0])))
                urllib.urlcleanup()

                userProfile.facebook_link = fb_data.get('link', None)
                from django.contrib.auth.hashers import make_password
                raw_pass = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(12))
                tmp_pass = make_password(raw_pass)
                userProfile.password = tmp_pass
                userProfile.save()

                fb_profile = FacebookUserProfile(facebook_uid=uid, user=userProfile, email=fb_data['email'],
                    url=fb_data['link'], location=userProfile.location, accesstoken=access_token)
                fb_profile.save()

                return userProfile
        else:
            try:
                if same_email_user:
                    user = same_email_user
                user_facebook = FacebookUserProfile.objects.get(user=user)
                if user_facebook.facebook_uid == uid:
                    return user_facebook.user
                else:
                    request.session['fb_accesstoken'] = access_token
                    next = request.session['next'] or ""
                    if next:
                        del request.session['next']
                        return HttpResponseRedirect(next)
                    else:
                        return HttpResponseRedirect(reverse('sync_facebook'))
            except FacebookUserProfile.DoesNotExist:
                try:
                    user_facebook = FacebookUserProfile.objects.get(facebook_uid=uid)
                    request.session['fb_accesstoken'] = access_token
                    next = request.session['next'] or ""
                    if next:
                        del request.session['next']
                        return HttpResponseRedirect(next)
                    else:
                        return HttpResponseRedirect(reverse('sync_facebook'))
                except FacebookUserProfile.DoesNotExist:
                    fb_profile = FacebookUserProfile(facebook_uid=uid, user=UserProfile.objects.get(username=user.username), email=fb_data['email'],
                    url=fb_data['link'], location=fb_data.get('location', fb_data).get('name', None), accesstoken=access_token)
                    fb_profile.save()
                    return fb_profile.user
Пример #5
0
 def test_read_from_storage(self):
     HANDLED_FILES['written_files'].append(['foo', File(six.BytesIO(b'bar'))])
     file_ = self.command.read_from_storage('foo')
     self.assertEqual(file_.read(), b'bar')
Пример #6
0
    def populate(self, entry: Entry, form: EntryForm) -> Entry:
        form.populate(entry)

        if 'author' in form.cleaned_data.keys():
            author, created = Author.objects.get_or_create(
                catalog=self._catalog,
                name=form.cleaned_data['author']['name'],
                surname=form.cleaned_data['author']['surname'])
            entry.author = author

        entry.save()

        if 'categories' in form.cleaned_data.keys():
            entry.categories.clear()
            for record in form.cleaned_data.get('categories', []):
                category, created = Category.objects.get_or_create(
                    creator=self._creator,
                    catalog=self._catalog,
                    term=record['term'])
                if created:
                    category.label = record.get('label')
                    category.scheme = record.get('scheme')
                    category.save()
                entry.categories.add(category)

        if 'category_ids' in form.cleaned_data.keys():
            entry.contributors.clear()
            for contributor in form.cleaned_data.get('category_ids', []):
                entry.categories.add(contributor)

        for record in form.cleaned_data.get('acquisitions', []):
            acquisition = Acquisition(entry=entry,
                                      relation=record.get('relation'),
                                      mime=record['content'].content_type)

            if 'content' in record.keys():
                acquisition.content.save(
                    f"{uuid.uuid4()}{mimetypes.guess_extension(acquisition.mime)}",
                    record['content'])

            for price in record.get('prices', []):
                Price.objects.create(acquisition=acquisition,
                                     currency=price['currency_code'],
                                     value=price['value'])

        if 'contributors' in form.cleaned_data:
            entry.contributors.clear()
            for record in form.cleaned_data.get('contributors', []):
                contributor, is_created = Author.objects.get_or_create(
                    catalog=self._catalog,
                    name=record['name'],
                    surname=record['surname'])
                entry.contributors.add(contributor)

        if 'contributor_ids' in form.cleaned_data.keys():
            entry.contributors.clear()
            for contributor in form.cleaned_data.get('contributor_ids', []):
                entry.contributors.add(contributor)

        if 'feeds' in form.cleaned_data:
            entry.feeds.clear()
            for feed in form.cleaned_data.get('feeds', []):
                entry.feeds.add(feed)

        if 'image' in form.cleaned_data:
            if form.cleaned_data['image'] is None:
                entry.image = None
                entry.image_mime = None
                entry.thumbnail = None
            else:
                entry.image_mime = form.cleaned_data['image'].content_type

                entry.image.save(
                    f"cover{mimetypes.guess_extension(entry.image_mime)}",
                    form.cleaned_data['image'])

                buffer = BytesIO()
                thumbnail = form.cleaned_data['image'].image.copy()
                thumbnail.thumbnail(settings.OPDS['IMAGE_THUMBNAIL'])
                thumbnail.save(buffer,
                               format=form.cleaned_data['image'].image.format)
                entry.thumbnail.save(
                    f"thumbnail{mimetypes.guess_extension(entry.image_mime)}",
                    File(buffer))

        return entry
 def _open(self, name, mode='rb'):
     if self.exists(os.path.join(settings.MEDIA_ROOT, name)):
         return super()._open(name, mode)
     dummy_file = os.path.join(settings.STATICFILES_DIRS[0],
                               FILE_NOT_FOUND_IMAGE)
     return File(open(dummy_file, mode))
Пример #8
0
def create_uploaded_file_in_db_with_copy(values, uploaded_file):
    with open(uploaded_file, "rb") as fh:
        values["uploaded_file"] = File(file=fh, name=uploaded_file.name)
        TemporaryUploadedFile.objects.create(**values)

    os.remove(uploaded_file)
Пример #9
0
def upload_dataset(ground, train, test):
    from django.core.files import File
    with open(filepath, 'wb+') as doc_file:
        doc.documen.save(filename, File(doc_file), save=True)
Пример #10
0
 def _open(self, name, mode='rb'):
     return File(open(self.path(name), mode))
Пример #11
0
    def get_rendition(self, filter):
        if isinstance(filter, str):
            filter = Filter(spec=filter)

        cache_key = filter.get_cache_key(self)
        Rendition = self.get_rendition_model()

        try:
            rendition_caching = True
            cache = caches['renditions']
            rendition_cache_key = Rendition.construct_cache_key(
                self.id, cache_key, filter.spec)
            cached_rendition = cache.get(rendition_cache_key)
            if cached_rendition:
                return cached_rendition
        except InvalidCacheBackendError:
            rendition_caching = False

        try:
            rendition = self.renditions.get(
                filter_spec=filter.spec,
                focal_point_key=cache_key,
            )
        except Rendition.DoesNotExist:
            # Generate the rendition image
            try:
                logger.debug("Generating '%s' rendition for image %d", (
                    filter.spec,
                    self.pk,
                ))

                start_time = time.time()
                generated_image = filter.run(self, BytesIO())

                logger.debug("Generated '%s' rendition for image %d in %.1fms",
                             (filter.spec, self.pk,
                              (time.time() - start_time) * 1000))
            except:  # noqa:B901,E722
                logger.debug(
                    "Failed to generate '%s' rendition for image %d: %s",
                    filter.spec, self.pk)
                raise

            # Generate filename
            input_filename = os.path.basename(self.file.name)
            input_filename_without_extension, input_extension = os.path.splitext(
                input_filename)

            # A mapping of image formats to extensions
            FORMAT_EXTENSIONS = {
                'jpeg': '.jpg',
                'png': '.png',
                'gif': '.gif',
                'webp': '.webp',
            }

            output_extension = filter.spec.replace(
                '|', '.') + FORMAT_EXTENSIONS[generated_image.format_name]
            if cache_key:
                output_extension = cache_key + '.' + output_extension

            # Truncate filename to prevent it going over 60 chars
            output_filename_without_extension = input_filename_without_extension[:(
                59 - len(output_extension))]
            output_filename = output_filename_without_extension + '.' + output_extension

            rendition, created = self.renditions.get_or_create(
                filter_spec=filter.spec,
                focal_point_key=cache_key,
                defaults={
                    'file': File(generated_image.f, name=output_filename)
                })

        if rendition_caching:
            cache.set(rendition_cache_key, rendition)

        return rendition
Пример #12
0
    def handle(self, *args, **options):

        vs = Convention.objects.filter(year=2015)
        vs.delete()

        self.stdout.write("Database Flushed")

        i = 11
        while i <= 27:
            path = "stix/fall000{0}.txt".format(i)
            convention = import_convention(path, season='fall')
            convention.save()
            filename = convention.id.hex + '.txt'
            convention.stix_file.save(
                filename,
                File(open(path, 'r')),
                save=True,
            )
            i += 1
            self.stdout.write("{0}".format(convention))

        i = 11
        while i <= 21:
            path = "stix/spring000{0}.txt".format(i)
            convention = import_convention(path, season='spring')
            convention.save()
            filename = convention.id.hex + '.txt'
            convention.stix_file.save(
                filename,
                File(open(path, 'r')),
                save=True,
            )
            i += 1
            self.stdout.write("{0}".format(convention))

        i = 11
        while i <= 25:
            path = "stix/combo000{0}.txt".format(i)
            convention = import_convention(path,
                                           season='spring',
                                           division=True)
            convention.save()
            filename = convention.id.hex + '.txt'
            convention.stix_file.save(
                filename,
                File(open(path, 'r')),
                save=True,
            )
            i += 1
            self.stdout.write("{0}".format(convention))

        international = [
            'international.txt',
        ]

        for f in international:
            path = "stix/{0}".format(f)
            convention = import_convention(path, season='summer')
            convention.save()
            filename = convention.id.hex + '.txt'
            convention.stix_file.save(
                filename,
                File(open(path, 'r')),
                save=True,
            )
            self.stdout.write("{0}".format(convention))

        midwinter = [
            'midwinter.txt',
        ]

        for f in midwinter:
            path = "stix/{0}".format(f)
            convention = import_convention(path, season='midwinter')
            convention.save()
            filename = convention.id.hex + '.txt'
            convention.stix_file.save(
                filename,
                File(open(path, 'r')),
                save=True,
            )
            self.stdout.write("{0}".format(convention))

        self.stdout.write("Conventions Loaded")

        vs = Convention.objects.filter(year=2015)

        for v in vs:
            extract_sessions(v)
        self.stdout.write("Sessions Extracted")

        for v in vs:
            extract_rounds(v)
        self.stdout.write("Rounds Extracted")

        for v in vs:
            extract_panel(v)
        self.stdout.write("Panel Extracted")

        for v in vs:
            extract_entries(v)
        self.stdout.write("Entries Extracted")

        for v in vs:
            extract_contests(v)
        self.stdout.write("Contests Extracted")

        for v in vs:
            extract_contestants(v)
        self.stdout.write("Contestants Extracted")

        for v in vs:
            extract_appearances(v)
        self.stdout.write("Appearances Extracted")

        for v in vs:
            extract_songs(v)
        self.stdout.write("Songs Extracted")

        for v in vs:
            extract_scores(v)
        self.stdout.write("Scores Extracted")

        # for v in vs:
        #     fill_parents(v)
        # self.stdout.write("Parents Added")

        # for v in vs:
        #     denormalize(v)
        # self.stdout.write("Convention Denormalized")

        for v in vs:
            rank(v)
        self.stdout.write("Convention Ranked")

        return "Rebuild Complete"
Пример #13
0
    def post(self, request):
      if request.method == "POST":
        # save answer to Answer models
        a10 = request.POST['note-textarea']
        v10 = request.POST['video']
        t = request.POST['time']
        t = int(t)
        t10 = request.POST['time']

        if 'is_login' in request.session and request.session['is_login']==True:
            account_name = request.session['account']
            account_instance = Member.objects.get(Account=account_name)        
        
        # retreive the user's id
        uid = Answer.objects.filter(userID=account_instance).order_by('-id')[:1].values('id')   
        unit = Answer.objects.get(id=uid)
        unit.q10 = q10
        unit.a10 = a10
        unit.v10 = v10
        unit.t10 = t10
        unit.save()

        # retrieve video instance
        vid_unit = Video.objects.get(userID=account_instance, id=uid)
        vid_id = Video.objects.filter(userID=account_instance).order_by('-id')[:1].values('id')  
        

        # save result to Result models
        r10 = sentiment(10, account_name)
        res = Result.objects.get(id=uid)
        res.r10 = r10
        res.time10 = t10
        res.save()

        # decode base64 to mp4 file
        text = unit.v10
        text = text[23:]
        fh = open('interview_vid.mp4', 'wb')
        fh.write(base64.b64decode(text))
        fh.close()
        print('VIDEO DECODED!', '\n')

        # save to django video model
        f = open('interview_vid.mp4', 'rb')
        vid_unit.vid10.save('interview_vid.mp4', File(f), True)
        f.close()
        print('VIDEO SAVED TO MODEL!', '\n')

        # retrieve video file from django model
        vid_instance = Video.objects.get(id=uid).vid1
        print(vid_instance)
        vid_instance = str(vid_instance)
        vidname = str(vid_instance[7:])
        print(vidname)
        vid_path = os.path.join(BASE_DIR + '\\media\\videos\\' + vidname)
        
        # do blink detection and save to Result model
        path = request.path
        print('PATH =====> ', path)
        blink10(vid_path, account_name, t, path)
        emotion10(vid_path, account_name, t, path)

        return redirect('/')
      
      return render(request, self.template_name,locals())   
Пример #14
0
def thumbnail(image_url, width, height, quality=95, left=.5, top=.5,
              padding=False, padding_color="#fff"):
    """
    Given the URL to an image, resizes the image using the given width and
    height on the first time it is requested, and returns the URL to the new
    resized image. if width or height are zero then original ratio is
    maintained.
    """

    if not image_url:
        return ""
    try:
        from PIL import Image, ImageFile, ImageOps
    except ImportError:
        return ""

    image_url = unquote(str(image_url)).split("?")[0]
    if image_url.startswith(settings.MEDIA_URL):
        image_url = image_url.replace(settings.MEDIA_URL, "", 1)
    image_dir, image_name = os.path.split(image_url)
    image_prefix, image_ext = os.path.splitext(image_name)
    filetype = {".png": "PNG", ".gif": "GIF"}.get(image_ext, "JPEG")
    thumb_name = "%s-%sx%s" % (image_prefix, width, height)
    if left != .5 or top != .5:
        left = min(1, max(0, left))
        top = min(1, max(0, top))
        thumb_name = "%s-%sx%s" % (thumb_name, left, top)
    thumb_name += "-padded-%s" % padding_color if padding else ""
    thumb_name = "%s%s" % (thumb_name, image_ext)

    # `image_name` is used here for the directory path, as each image
    # requires its own sub-directory using its own name - this is so
    # we can consistently delete all thumbnails for an individual
    # image, which is something we do in filebrowser when a new image
    # is written, allowing us to purge any previously generated
    # thumbnails that may match a new image name.
    thumb_dir = os.path.join(settings.MEDIA_ROOT, image_dir,
                             settings.THUMBNAILS_DIR_NAME, image_name)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    thumb_path = os.path.join(thumb_dir, thumb_name)
    thumb_url = "%s/%s/%s" % (settings.THUMBNAILS_DIR_NAME,
                              quote(image_name.encode("utf-8")),
                              quote(thumb_name.encode("utf-8")))
    image_url_path = os.path.dirname(image_url)
    if image_url_path:
        thumb_url = "%s/%s" % (image_url_path, thumb_url)

    try:
        thumb_exists = os.path.exists(thumb_path)
    except UnicodeEncodeError:
        # The image that was saved to a filesystem with utf-8 support,
        # but somehow the locale has changed and the filesystem does not
        # support utf-8.
        from mezzanine.core.exceptions import FileSystemEncodingChanged
        raise FileSystemEncodingChanged()
    if thumb_exists:
        # Thumbnail exists, don't generate it.
        return thumb_url
    elif not default_storage.exists(image_url):
        # Requested image does not exist, just return its URL.
        return image_url

    f = default_storage.open(image_url)
    try:
        image = Image.open(f)
    except:
        # Invalid image format.
        return image_url

    image_info = image.info
    to_width = int(width)
    to_height = int(height)
    from_width = image.size[0]
    from_height = image.size[1]

    # Set dimensions.
    if to_width == 0:
        to_width = from_width * to_height // from_height
    elif to_height == 0:
        to_height = from_height * to_width // from_width
    if image.mode not in ("P", "L", "RGBA"):
        try:
            image = image.convert("RGBA")
        except:
            return image_url
    # Required for progressive jpgs.
    ImageFile.MAXBLOCK = 2 * (max(image.size) ** 2)

    # Padding.
    if padding and to_width and to_height:
        from_ratio = float(from_width) / from_height
        to_ratio = float(to_width) / to_height
        pad_size = None
        if to_ratio < from_ratio:
            pad_height = int(to_height * (float(from_width) / to_width))
            pad_size = (from_width, pad_height)
            pad_top = (pad_height - from_height) // 2
            pad_left = 0
        elif to_ratio > from_ratio:
            pad_width = int(to_width * (float(from_height) / to_height))
            pad_size = (pad_width, from_height)
            pad_top = 0
            pad_left = (pad_width - from_width) // 2
        if pad_size is not None:
            pad_container = Image.new("RGBA", pad_size, padding_color)
            pad_container.paste(image, (pad_left, pad_top))
            image = pad_container

    # Create the thumbnail.
    to_size = (to_width, to_height)
    to_pos = (left, top)
    try:
        image = ImageOps.fit(image, to_size, Image.ANTIALIAS, 0, to_pos)
        image = image.save(thumb_path, filetype, quality=quality, **image_info)
        # Push a remote copy of the thumbnail if MEDIA_URL is
        # absolute.
        if "://" in settings.MEDIA_URL:
            with open(thumb_path, "rb") as f:
                default_storage.save(thumb_url, File(f))
    except Exception:
        # If an error occurred, a corrupted image may have been saved,
        # so remove it, otherwise the check for it existing will just
        # return the corrupted image next time it's requested.
        try:
            os.remove(thumb_path)
        except Exception:
            pass
        return image_url
    return thumb_url
Пример #15
0
 def get_upload_file(filename='repo'):
     file = File(
         open('./tests/fixtures_static/{}.tar.gz'.format(filename), 'rb'))
     return SimpleUploadedFile(filename,
                               file.read(),
                               content_type='multipart/form-data')
Пример #16
0
    def import_entries(self, feed_entries):
        """
        Import entries.
        """
        for feed_entry in feed_entries:
            self.write_out('> %s... ' % feed_entry.title)
            if feed_entry.get('published_parsed'):
                creation_date = datetime(*feed_entry.published_parsed[:6])
                if settings.USE_TZ:
                    creation_date = timezone.make_aware(
                        creation_date, timezone.utc)
            else:
                creation_date = timezone.now()
            slug = slugify(feed_entry.title)[:255]

            if Entry.objects.filter(creation_date__year=creation_date.year,
                                    creation_date__month=creation_date.month,
                                    creation_date__day=creation_date.day,
                                    slug=slug):
                self.write_out(self.style.NOTICE(
                    'SKIPPED (already imported)\n'))
                continue

            categories = self.import_categories(feed_entry)
            entry_dict = {'title': feed_entry.title[:255],
                          'content': feed_entry.description,
                          'excerpt': strip_tags(feed_entry.get('summary')),
                          'status': PUBLISHED,
                          'creation_date': creation_date,
                          'start_publication': creation_date,
                          'last_update': timezone.now(),
                          'slug': slug}

            if not entry_dict['excerpt'] and self.auto_excerpt:
                entry_dict['excerpt'] = Truncator(
                    strip_tags(feed_entry.description)).words(50)

            if self.tags:
                entry_dict['tags'] = self.import_tags(categories)

            entry = Entry(**entry_dict)
            entry.save()
            entry.categories.add(*categories)
            entry.sites.add(self.SITE)

            if self.image_enclosure:
                for enclosure in feed_entry.enclosures:
                    if ('image' in enclosure.get('type') and
                            enclosure.get('href')):
                        img_tmp = NamedTemporaryFile(delete=True)
                        img_tmp.write(urlopen(enclosure['href']).read())
                        img_tmp.flush()
                        entry.image.save(os.path.basename(enclosure['href']),
                                         File(img_tmp))
                        break

            if self.default_author:
                entry.authors.add(self.default_author)
            elif feed_entry.get('author_detail'):
                try:
                    author = Author.objects.create_user(
                        slugify(feed_entry.author_detail.get('name')),
                        feed_entry.author_detail.get('email', ''))
                except IntegrityError:
                    author = Author.objects.get(**{
                        Author.USERNAME_FIELD:
                        slugify(feed_entry.author_detail.get('name'))})
                entry.authors.add(author)

            self.write_out(self.style.ITEM('OK\n'))
Пример #17
0
    def synchronize(self,
                    url=None,
                    filename=None,
                    content_type=None,
                    file_extras=None,
                    with_user=None):
        """Synchronizer le jeu de données avec l'instance de CKAN."""
        # Identifiant de la resource CKAN :
        id = str(self.ckan_id)

        # Définition des propriétés du « package » :
        data = {
            'crs': self.crs and self.crs.description or '',
            'name': self.title,
            'description': self.description,
            'data_type': self.data_type,
            'extracting_service': 'False',  # I <3 CKAN
            'format': self.format_type and self.format_type.ckan_format,
            'view_type': self.format_type and self.format_type.ckan_view,
            'id': id,
            'lang': self.lang,
            'restricted_by_jurisdiction': str(self.geo_restriction),
            'url': url and url or '',
            'api': '{}'
        }

        # TODO: Factoriser

        # (0) Aucune restriction
        if self.restricted_level == 'public':
            restricted = json.dumps({'level': 'public'})
        # (1) Uniquement pour un utilisateur connecté
        elif self.restricted_level == 'registered':
            restricted = json.dumps({'level': 'registered'})
        # (2) Seulement les utilisateurs indiquées
        elif self.restricted_level == 'only_allowed_users':
            restricted = json.dumps({
                'allowed_users':
                ','.join(
                    self.profiles_allowed.exists()
                    and [p.user.username
                         for p in self.profiles_allowed.all()] or []),
                'level':
                'only_allowed_users'
            })
        # (3) Les utilisateurs de cette organisation
        elif self.restricted_level == 'same_organization':
            restricted = json.dumps({
                'allowed_users':
                ','.join(
                    get_all_users_for_organisations(
                        self.organisations_allowed.all())),
                'level':
                'only_allowed_users'
            })
        # (3) Les utilisateurs des organisations indiquées
        elif self.restricted_level == 'any_organization':
            restricted = json.dumps({
                'allowed_users':
                ','.join(
                    get_all_users_for_organisations(
                        self.organisations_allowed.all())),
                'level':
                'only_allowed_users'
            })

        data['restricted'] = restricted

        if self.referenced_url:
            data['url'] = self.referenced_url

        if self.dl_url and filename:
            downloaded_file = File(open(filename, 'rb'))
            data['upload'] = downloaded_file
            data['size'] = downloaded_file.size
            data['mimetype'] = content_type

        if self.up_file and file_extras:
            data['upload'] = self.up_file.file
            data['size'] = file_extras.get('size')
            data['mimetype'] = file_extras.get('mimetype')

        if self.ftp_file:
            if not url:
                data['upload'] = self.ftp_file.file
            data['size'] = self.ftp_file.size
            data['mimetype'] = None  # TODO

        if self.data_type == 'raw':
            if self.ftp_file or self.dl_url or self.up_file:
                data['resource_type'] = 'file.upload'
            elif self.referenced_url:
                data['resource_type'] = 'file'
        if self.data_type == 'annexe':
            data['resource_type'] = 'documentation'
        if self.data_type == 'service':
            data['resource_type'] = 'api'

        ckan_package = CkanHandler.get_package(str(self.dataset.ckan_id))

        if with_user:
            username = with_user.username

            apikey = CkanHandler.get_user(username)['apikey']
            with CkanUserHandler(apikey=apikey) as ckan:
                ckan.publish_resource(ckan_package, **data)
        else:
            return CkanHandler.publish_resource(ckan_package, **data)
Пример #18
0
    def crawl_recipe(self, recipe_num=None):
        url_recipe = self._url_base.format(
            recipe_num=recipe_num
        )

        driver = webdriver.PhantomJS(
            '/Users/hongdonghyun/projects/team_project/wps-picky/django_app/utils/crawl/phantomjs')
        driver.get(url_recipe)

        html = driver.page_source
        soup = BeautifulSoup(html)

        # 전체 페이지에서 타이틀 부분 뽑아옴
        title = soup.select_one('title').get_text()
        title_list = title.split(':')
        empty_list = []
        for t in title_list:
            t = t.strip(' ')
            empty_list.append(t)

        # 레시피의 이미지 뽑아옴
        div_editor = soup.find("div", {"class": "edtitor_img_uio"})
        recipe_img = div_editor.img['src']

        # 레시피의 타이틀, 설명 뽑아옴
        recipe_title = empty_list[0]
        recipe_description = empty_list[1]

        div_container_inner = soup.find("div", {"class": "__viewer_container_inner"})
        div_na_summary_info = div_container_inner.find_all("div", {"class": "na_summary_info"})

        # select함수는 리턴값이 리스트형식
        _list = []
        for before_list in div_na_summary_info:
            a = before_list.select("ul > li > span")
            _list.append(a)

        # 위 과정을 거치면 이중 리스트 형식이됨

        # 이중 리스트 형식을 일자화 시키는 함수 flatten
        _list = flatten(_list)

        # 일자화된 리스트에서 span을 제외한 text값 추출
        # ingredient_list라는 변수에 할당
        ingredient_list = []
        for after_list in _list:
            ingredient = after_list.get_text()
            ingredient_list.append(ingredient)
        # ,로 join
        ingredient_list = ','.join(ingredient_list)

        # 스탭 부분 이미지
        div_tptype_5 = soup.find_all("div", {"class": "t_ptype5"})
        # 스탭 부분 텍스트
        div_p = soup.find_all("p", {"class": "t_txt"})

        # 스탭 이미지 추출
        img_list = []
        for image in div_tptype_5:
            img_list.append(image.img['src'])

        # 스탭 설명 추출
        text_list = []
        for text in div_p:
            text_list.append(text.get_text())
        del text_list[0]

        # 기존 텍스트에 \n,\t제거
        i = ' '.join(text_list).replace('\n', '')
        i = i.replace('\t', '')
        # STEP기준으로 설명을 쪼갠다
        # 이 기능에서 STEP삭제
        # 삭제를 위한 정규표현식
        pattern = re.compile(r'STEP \d+\s?')
        # text_list에 다시 대입
        # 공백제거, 만약 텍스트가 빈공백 요소일 경우 제거
        text_list = [text.strip() for text in pattern.split(i) if text]
        for text in text_list:
            # 크롤링 도중 원하지 않는 부분이 있어 제거하기 위한 if문
            if '(숫자)' in text:
                del text_list[0]

        # 임시 데이터의 File_name
        file_name = '{}.jpg'.format(recipe_title)
        # 무슨 기능을 하는지 확인필요 - hong 8/21
        temp_file = NamedTemporaryFile(delete=True)
        # response변수에 recipe_img 저장
        response = requests.get(recipe_img)
        # temp_file에 recipe_img의 content를 저장
        # 동작 후 사라지는 임시파일
        temp_file.write(response.content)

        # 레시피 생성
        _2bab_recipe = Recipe.objects.create(
            title=recipe_title,
            description=recipe_description,
            ingredient=ingredient_list,
            # 현재는 pickyuser 첫번째 유저를 가져오지만 최종 정리 후
            # admin의 아이디를 가져와야함 - hong 8/21
            user=PickyUser.objects.first()
        )

        # 위에서 저장한 임시파일을 _2bab_recipe의 이미지필드에 저장
        _2bab_recipe.img_recipe.save(file_name, File(temp_file))

        # zip으로 묶인 img_list 와 img에 대한 description_list
        for x, y in zip(img_list, text_list):
            # 위에서 만든 _2bab_recipe의 pk에 매핑
            # 설명을 저장 후 생성
            _2bab_recipe_step = RecipeStep.objects.create(description=y, recipe_id=_2bab_recipe.pk)
            # 상단의 동일코드와 같음
            # 설명생략
            file_name = '{}.{}.jpg'.format(_2bab_recipe.pk, recipe_title)
            temp_file = NamedTemporaryFile(delete=True)
            response = requests.get(x)
            temp_file.write(response.content)
            _2bab_recipe_step.img_step.save(file_name, File(temp_file))
Пример #19
0
def train(recogntype, facesdbname, facesdbpath, size):
    # Read CSV face file and populate Face training set and labels
    frontal_cascade = Cascade.objects.get(pk=1)
    cv_frontal_cascade = cv2.CascadeClassifier(frontal_cascade.xml_file.path)
    # CSV path e.g.: /data/media/faces/SD_Faces/faces/lala.csv
    csv_path = create_csv_file(facesdbpath)
    (face_labelsDict, npfaces, labels) = read_csv_file(recogntype, csv_path,
                                                       cv_frontal_cascade,
                                                       size)
    trained_data_path = os.path.join(settings.MEDIA_ROOT,
                                     'recognizer_train_data')
    if (recogntype == 'LBPH'):
        pretrained_filepath = os.path.join(trained_data_path, 'MyFaces.yml')
    elif (recogntype == 'KNN'):
        pretrained_filepath = os.path.join(trained_data_path, 'MyKNNFaces.yml')
    else:
        pretrained_filepath = os.path.join(
            trained_data_path,
            facesdbname.replace('.zip', '') + '_' + str(size[0]) + 'x' +
            str(size[1]) + '_' + recogntype + '.yml')

    recognizer = create_recognizer(recogntype)
    try:
        if ((recogntype == 'EF' or recogntype == 'FF')
                or (recogntype == 'LBPH'
                    and not os.path.isfile(pretrained_filepath))):
            log.debug("Creating trained file: {}".format(pretrained_filepath))
            recognizer.train(npfaces, numpy.array(labels))
        else:
            log.debug(
                "Updating the trained file: {}".format(pretrained_filepath))
            os.remove(pretrained_filepath)
            recognizer.train(npfaces, numpy.array(labels))
            #recognizer.read(pretrained_filepath)
            #recognizer.update(npfaces, numpy.array(labels))
        recognizer.write(pretrained_filepath)
        # Save the YAML pretrained file to db
        prtrdata = RecognizerPreTrainedData()
        prtrdata.name = os.path.basename(pretrained_filepath)
        prtrdata.recognizer = recogntype
        with open(pretrained_filepath) as f:
            prtrdata.yml_file.save(pretrained_filepath, File(f))

        # Save a list of faces that this database recognizes
        # keeping the order of the labels
        # For example:
        # label 1 is ptrdata.faces[0]
        # label 2 is ptrdata.faces[1] etc
        tmp = ""
        for label, person in face_labelsDict.iteritems():
            tmp += "{}, ".format(person)
        prtrdata.faces = tmp[:-2]

        # Delete previous entry of MyFaces.yml
        qs = RecognizerPreTrainedData.objects.filter(name='MyFaces.yml')
        if (qs.count() > 0):
            qs.delete()

        prtrdata.save()

    except Exception as e:
        log.error(str(e))
        raise e
        return
    return face_labelsDict
Пример #20
0
 def handle_events(self, f):
     eventreader = csv.reader(f)
     for event in eventreader:
         messages.success(self.request, " ".join([item for item in event]))
         try:
             t_oc = Team.objects.get(name=event[4])
         except:
             messages.success(self.request, "Cant find team " + event[4])
         t_oc = Team.objects.get(name=event[4])
         new_event = Event.objects.create(
             name=event[0] + "tempobject" + str(random.randint(1, 50000)),
             deadline=event[1],
             start_date=event[2],
             end_date=event[3],
             category=event[5],
             description=event[6],
             scope=event[8],
             max_participants=event[9],
         )
         new_event.save()
         new_event.organizers.add(self.request.user)
         new_event.organizing_committee.add(t_oc)
         if new_event.category == "training":
             if "ommunication" in new_event.name:
                 thumbname = "communication-skills.jpg"
             elif "motional" in new_event.name:
                 thumbname = "emotional-intelligence.jpg"
             elif "eedback" in new_event.name:
                 thumbname = "feedback.jpg"
             elif "resentation" in new_event.name:
                 thumbname = "presentation-skills.jpg"
             elif "rganizational" in new_event.name:
                 thumbname = "organizational-management.jpg"
             elif "eadership" in new_event.name:
                 thumbname = "leadership.jpg"
             elif "roject" in new_event.name:
                 thumbname = "project-management.jpg"
             elif "ime" in new_event.name and "anagement" in new_event.name:
                 thumbname = "time-management.jpg"
             elif "eambuilding" in new_event.name:
                 thumbname = "teambuilding.JPG"
             elif "acilitation" in new_event.name:
                 thumbname = "facilitation.jpg"
             elif "ynamics" in new_event.name:
                 thumbname = "group-dynamics.jpg"
             elif "ody" in new_event.name and "anguage" in new_event.name:
                 thumbname = "body-language.jpg"
             else:
                 thumbname = "trtlogo.png"
             with open('eestecnet/training/' + thumbname, 'rb') as doc_file:
                 new_event.thumbnail.save("thumbname.jpg",
                                          File(doc_file),
                                          save=True)
             randstring = ""
             try:
                 Event.objects.get(name=event[0] + "-" +
                                   str(new_event.start_date))
                 randstring = str(random.randint(1, 500))
             except:
                 pass
             new_event.name = event[0] + "-" + str(
                 new_event.start_date) + randstring
         else:
             new_event.name = event[0]
         new_event.save()
Пример #21
0
    def post(self, request, token: str):
        path = open_configuration(request, token)
        with open(path, 'r', encoding='utf-8') as file:
            data = json.load(file)

        point = False
        if 'text' in data and data['text'] != []:
            point = True
        elif 'reply_markup' in data and data['reply_markup'] != []:
            point = True
        elif 'inline_markup' in data and data['inline_markup'] != []:
            point = True
        elif 'callback' in data and data['callback'] != []:
            point = True

        if not point:
            messages.error(
                request,
                'You having empty configuration... Create some config!')
            return redirect('create_bot_second_step_text_url', token=token)

        username = str(request.user.username)
        text_builder = TextBuilder(token, username)
        reply_markup_builder = ReplyMarkupBuilder(token, username)
        inline_markup_builder = InlineMarkupBuilder(token, username)
        callback_builder = CallbackBuilder(token, username)

        program = BotFacade(text_builder, reply_markup_builder,
                            inline_markup_builder, callback_builder, token,
                            username, data, request)
        callback = program.operation()
        if callback is not None:
            messages.error(request,
                           f'You have a problem buttons.' + callback[0])
            if callback[1] == 'reply':
                return redirect('create_bot_second_step_reply_buttons_url',
                                token)
            else:
                return redirect('create_bot_second_step_inline_buttons_url',
                                token)

        some_path = open_test_bot(request=request, token=token)
        with open(some_path, 'r+', encoding='utf-8') as file:
            content_code = file.read()
            file.seek(0)
            file.truncate()
            file.write(content_code)

        file_script_path = open_test_bot(request=request, token=token)
        file_config_path = open_configuration(request=request, token=token)
        current_user = Profile.objects.get(user=request.user)

        access_token = data['access_token']
        current_user_profile = Profile.objects.get(user=request.user)
        is_existed_bot = list(
            Bot.objects.filter(access_token=access_token,
                               owner=current_user_profile))
        if is_existed_bot == []:
            bot_object = Bot(owner=current_user,
                             access_token=access_token,
                             title=data['name'],
                             username=data['username'],
                             date_created=timezone.now())
            bot_object.file_script.save(
                f"{request.user.username}_{token.replace(':', '_')}"
                "_test_bot.py", File(open(file_script_path, encoding="utf8")))
            bot_object.file_config.save(
                f"{request.user.username}_{token.replace(':', '_')}"
                "_configuration.json",
                File(open(file_config_path, encoding="utf8")))
            bot_object.save()
        return redirect('create_bot_third_step_url', token=token)
Пример #22
0
def create_default_image(apps, schema_editor):
    file_name = 'default.jpg'
    Image = apps.get_model('images', 'image')
    img = Image.objects.create(title='default', value=file_name)
    img.value.save(file_name,
                   File(open('images/fixtures/images/default.jpg', 'rb')))
Пример #23
0
def new_html():
    """Modify the html template for data vizualization."""
    global template_name
    folder_path = os.path.dirname(os.path.abspath(__file__))
    html_template = os.path.join(folder_path, "templates/blog", template_name)
    with open(html_template, "r",) as html_t:
        html_test = File(html_t)
        html_string = html_test.read()

    link_nb = 0
    colors_hex = color_span()

    for url, pct in data_prep()[0]:
        diameter = diameter_size(pct)
        link_nb += 1
        new_string = re.sub(
            r'(<a href="{}".*?>.*?</a>)'.format(url),
            r'\1<div class="url{}">{}%</div>'.format(link_nb, pct),
            html_string,
            flags=re.MULTILINE | re.DOTALL,
        )

        soup = BeautifulSoup(new_string, features="html.parser")
        soup.style.append(
            """.url{} {{
            height: {}px;
            width: {}px;
            background-color: {};
            border-radius: 50%;
            display: inline-block;
            line-height: {}px;
            text-align: center;
            vertical-align: middle;
            color: {};}}
        """.format(
                link_nb,
                diameter,
                diameter,
                colors_hex[pct - 1],
                diameter,
                colors_hex[pct - 1],
            )
        )
        soup.style.append(
            """.url{}:hover {{
            height: {}px;
            width: {}px;
            background-color: {};
            border-radius: 50%;
            display: inline-block;
            line-height: {}px;
            text-align: center;
            vertical-align: middle;
            color: black;
            font-weight: bold;}}
        """.format(
                link_nb,
                diameter,
                diameter,
                colors_hex[pct - 1],
                diameter,
                colors_hex[pct - 1],
            )
        )
        html_string = str(soup)
    return html_string
Пример #24
0
def log_host(ipaddress):
    logged_instance = Logged.objects.get(host__ip_address=ipaddress)
    logged_instance.status = ""
    if logged_instance.host.vendor.lower() == "other":
        logged_instance.status = "Model not supported"
        logged_instance.save()
    else:
        logged_instance.date_checked = timezone.now()
        if ping(ipaddress):
            tftp_path = (
                logged_instance.host.ip_address
                + "_"
                + logged_instance.date_checked.strftime("%Y%m%d%H%M%S")
                + "_config.txt"
            )
            ssh_status = ssh_host(
                ipaddress,
                logged_instance.ssh_username,
                logged_instance.ssh_password,
                logged_instance.host.vendor,
                tftp_path,
            )
            if ssh_status != "":
                logged_instance.status = ssh_status
                logged_instance.save()
                try:
                    os.remove(f"{settings.TFTP_ROOT}{tftp_path}")
                except OSError:
                    pass
            else:
                qs_logged_history = LoggedHistory.objects.filter(
                    logged=logged_instance
                ).order_by("-date")
                if qs_logged_history.exists():
                    if len(qs_logged_history) >= logged_instance.l_saves:
                        for i in range(
                            logged_instance.l_saves - 1, len(qs_logged_history)
                        ):
                            os.remove(qs_logged_history[i].filelink.path)
                            qs_logged_history[i].delete()
                    if not filecmp.cmp(
                        f"{settings.TFTP_ROOT}{tftp_path}",
                        f"{settings.PROJECT_ROOT}{qs_logged_history.first().filelink.url}",
                    ):
                        logged_instance.date_updated = logged_instance.date_checked
                        try:
                            with open(f"{settings.TFTP_ROOT}{tftp_path}") as new_file:
                                LoggedHistory(
                                    filelink=File(new_file),
                                    logged=logged_instance,
                                    date=logged_instance.date_updated,
                                ).save()
                        except OSError:
                            logged_instance.status = "ERROR: WRITE FILE"
                        if logged_instance.status != "ERROR: WRITE FILE":
                            try:
                                source_file = f"{settings.TFTP_ROOT}{tftp_path}"  # flake8 start
                                pr = settings.PROJECT_ROOT
                                mr = settings.MEDIA_URL
                                ip = logged_instance.host.ip_address
                                filename = logged_instance.date_updated.strftime("%Y%m%d%H%M%S")
                                dest_file = f'{pr}{mr}{ip}/{filename}_config.txt'
                                shutil.copyfile(source_file, dest_file)  # had to use variables for flake8 complience
                                logged_instance.status = "OK"
                            except OSError:
                                logged_instance.status = "ERROR: COPY FILE"
                    else:
                        logged_instance.status = "IDENTIC FILE"
                else:
                    logged_instance.date_updated = logged_instance.date_checked
                    try:
                        with open(f"{settings.TFTP_ROOT}{tftp_path}") as new_file:
                            LoggedHistory(
                                filelink=File(new_file),
                                logged=logged_instance,
                                date=logged_instance.date_updated,
                            ).save()
                    except OSError:
                        logged_instance.status = "ERROR: WRITE FILE"
                    if logged_instance.status != "ERROR: WRITE FILE":
                        try:
                            source_file = f"{settings.TFTP_ROOT}{tftp_path}"  # flake8 start
                            pr = settings.PROJECT_ROOT
                            mr = settings.MEDIA_URL
                            ip = logged_instance.host.ip_address
                            filename = logged_instance.date_updated.strftime("%Y%m%d%H%M%S")
                            dest_file = f'{pr}{mr}{ip}/{filename}_config.txt'
                            shutil.copyfile(source_file, dest_file)  # had to use variables for flake8 complience
                            logged_instance.status = "OK"
                        except OSError:
                            logged_instance.status = "ERROR: COPY FILE"
                try:
                    os.remove(f"{settings.TFTP_ROOT}{tftp_path}")
                except OSError:
                    logged_instance.status = "ERROR: DELETE FILE"
                logged_instance.save()
        else:
            logged_instance.status = "DOWN"
            logged_instance.save()
    if logged_instance.status == "OK" or logged_instance.status == "IDENTIC FILE":
        return True
    else:
        return False
Пример #25
0
 def test_write_local_file(self):
     fd, path = File(six.BytesIO(b"foo")), '/tmp/foo.bak'
     self.command.write_local_file(fd, path)
     self.assertTrue(os.path.exists(path))
     # tearDown
     os.remove(path)
Пример #26
0
    def partial_update(self, request, *args, **kwargs):
        # Validate tus header
        if not has_required_tus_header(request):
            msg = 'Missing "{}" header.'.format('Tus-Resumable')
            log.warning("Chunked uploader (%d): %s" % (request.user.pk, msg))
            return HttpResponse(msg, status=status.HTTP_400_BAD_REQUEST)

        # Validate content type
        if not self._is_valid_content_type(request):
            msg = 'Invalid value for "Content-Type" header: {}. Expected "{}".'.format(
                request.META['CONTENT_TYPE'], TusUploadStreamParser.media_type)
            log.warning("Chunked uploader (%d): %s" % (request.user.pk, msg))
            return HttpResponse(msg, status=status.HTTP_400_BAD_REQUEST)

        # Retrieve object
        object = self.get_object()

        # Get upload_offset
        upload_offset = int(request.META.get(constants.UPLOAD_OFFSET_NAME, 0))

        # Validate upload_offset
        if upload_offset != self.get_cached_property("offset", object):
            log.warning("Chunked uploader (%d) (%d): offset conflict" %
                        (request.user.pk, object.pk))
            raise Conflict

        temporary_file = get_or_create_temporary_file(object)
        if not os.path.isfile(temporary_file):
            # Initial request in the series of PATCH request was handled on a different server instance.
            msg = 'Previous chunks not found on this server.'
            log.warning("Chunked uploader (%d) (%d): %s" %
                        (request.user.pk, object.pk, msg))
            return HttpResponse(msg, status=status.HTTP_400_BAD_REQUEST)

        # Get chunk from request
        chunk_bytes = self.get_chunk(request)

        # Check for data
        if not chunk_bytes:
            msg = 'No data.'
            log.warning("Chunked uploader (%d) (%d): %s" %
                        (request.user.pk, object.pk, msg))
            return HttpResponse(msg, status=status.HTTP_400_BAD_REQUEST)

        # Check checksum (http://tus.io/protocols/resumable-upload.html#checksum)
        upload_checksum = request.META.get(
            constants.UPLOAD_CHECKSUM_FIELD_NAME, None)
        if upload_checksum is not None:
            if upload_checksum[0] not in TUS_API_CHECKSUM_ALGORITHMS:
                msg = 'Unsupported Checksum Algorithm: {}.'.format(
                    upload_checksum[0])
                log.warning("Chunked uploader (%d) (%d): %s" %
                            (request.user.pk, object.pk, msg))
                return HttpResponse(msg, status=status.HTTP_400_BAD_REQUEST)
            elif not checksum_matches(upload_checksum[0], upload_checksum[1],
                                      chunk_bytes):
                msg = 'Checksum Mismatch.'
                log.warning("Chunked uploader (%d) (%d) : %s" %
                            (request.user.pk, object.pk, msg))
                return HttpResponse(msg, status=460)

        # Run chunk validator
        chunk_bytes = self.validate_chunk(upload_offset, chunk_bytes)

        # Check for data
        if not chunk_bytes:
            msg = 'No data. Make sure "validate_chunk" returns data.'
            log.warning("Chunked uploader (%d) (%d): %s" %
                        (request.user.pk, object.pk, msg))
            return HttpResponse(msg, status=status.HTTP_400_BAD_REQUEST)

        # Write file
        try:
            write_data(object, chunk_bytes)
            log.debug("Chunked uploader (%d) (%d): wrote %d bytes" %
                      (request.user.pk, object.pk, len(chunk_bytes)))
        except Exception as e:
            msg = str(e)
            log.warning(
                "Chunked uploader (%d) (%d): exception writing data: %s" %
                (request.user.pk, object.pk, msg))
            return HttpResponse(msg, status=status.HTTP_400_BAD_REQUEST)

        headers = {'Upload-Offset': self.get_cached_property("offset", object)}

        if self.get_cached_property("upload-length",
                                    object) == self.get_cached_property(
                                        "offset", object):
            log.debug("Chunked uploader (%d) (%d): chunks completed" %
                      (request.user.pk, object.pk))

            # Trigger signal
            signals.saving.send(object)

            # Save file
            temporary_file = get_or_create_temporary_file(object)

            if not self.verify_file(temporary_file):
                os.remove(temporary_file)
                msg = "file verification failed"
                log.warning("Chunked uploader (%d) (%d): %s" %
                            (request.user.pk, object.pk, msg))
                return HttpResponse(
                    msg, status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)

            log.debug(
                "Chunked uploader (%d) (%d): saving object to temporary file %s"
                % (request.user.pk, object.pk, temporary_file))

            try:
                getattr(object, self.get_file_field_name()).save(
                    self.get_upload_path_function()(object,
                                                    self.get_cached_property(
                                                        "name", object)),
                    File(open(temporary_file)))
            except Exception as e:
                log.error("Chunked uploader (%d) (%d): exception: %s" %
                          (request.user.pk, object.pk, e.message))
                os.remove(temporary_file)

                delete_kwargs = {}
                if issubclass(type(object), SafeDeleteModel):
                    delete_kwargs['force_policy'] = HARD_DELETE
                object.delete(**delete_kwargs)
                return HttpResponse(
                    e.message, status=status.HTTP_500_INTERNAL_SERVER_ERROR)

            signals.saved.send(object)

            # Clean up
            os.remove(temporary_file)
            signals.finished.send(object)

            log.debug("Chunked uploader (%d) (%d): finished" %
                      (request.user.pk, object.pk))

        # Add upload expiry to headers
        add_expiry_header(self.get_cached_property("expires", object), headers)

        # By default, don't include a response body
        if not constants.TUS_RESPONSE_BODY_ENABLED:
            response = HttpResponse(content_type='application/javascript',
                                    status=status.HTTP_201_CREATED)
            response = apply_headers_to_response(response, headers)
            return response

        # Create serializer
        serializer = self.get_serializer(instance=object)

        response = HttpResponse(simplejson.dumps(serializer.data),
                                content_type='application/javascript',
                                status=status.HTTP_201_CREATED)
        response = apply_headers_to_response(response, headers)
        return response
Пример #27
0
<p>Fusce scelerisque vehicula elit, imperdiet maximus ligula dignissim non. Curabitur molestie erat ornare ex vulputate varius. Duis suscipit enim libero, non ultricies nisi porttitor eget. Proin a lacus sit amet lacus tincidunt viverra ut non ipsum. Duis semper iaculis consectetur. Praesent vel massa lacus. Praesent in malesuada ligula, in ornare tortor. Nam nec neque ipsum. Quisque vel quam nulla. Nulla viverra posuere lacus, ut faucibus sapien ultricies ut. Nullam convallis ullamcorper turpis, vitae condimentum dui ultricies et. Quisque eu venenatis turpis, tempus accumsan enim. Cras aliquet justo ante, id tempor nisl aliquet vestibulum.</p>

<p>Praesent vestibulum metus eu egestas scelerisque. Nullam tempor lectus quis nunc cursus, nec porttitor augue luctus. Quisque placerat dolor eget nulla tempus dignissim. Suspendisse mollis iaculis sem a fringilla. Proin sapien erat, gravida sed est at, bibendum mollis leo. Aenean ullamcorper nisl nisi, in sollicitudin ipsum tempus sit amet. Phasellus convallis tellus pellentesque ex malesuada, sed pulvinar orci imperdiet. Pellentesque sit amet lorem nibh. Proin arcu odio, luctus at diam at, mattis eleifend eros. Nunc vulputate enim quis semper auctor. Duis urna lacus, molestie consectetur lacus non, vulputate mattis nisl. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Integer nec venenatis diam, sit amet rhoncus massa. Nunc a malesuada odio.
"""

for i in range(num_movies):
    title = movieList[i]
    released_date = date(int(movieList[i][-5:-1]), randint(1, 12),
                         randint(1, 28))
    try:
        print('Creating movie {0}.'.format(title))
        movie = Movie()
        movie.title = title
        movie.slug = slugify(title) + '-' + str(i + 1)
        movie.released_date = released_date
        movie.description = description
        f = File(
            open(
                os.path.join(os.path.dirname(settings.BASE_DIR), "root",
                             "media_cdn", "500x500.png"), 'rb'))
        movie.photo.save('{0}.png'.format(i + 1), f)
        movie.save()
        print('Movie {0} successfully created.'.format(title))

    except:
        print(released_date)
        print(
            'There was a problem creating the Movie: {0}.  Error: {1}.'.format(
                title, sys.exc_info()))
Пример #28
0
    def handle(self, *args, **options):
        """
        main method
        """

        user = options['user']
        passw = options['password']
        required_date = required_week = required_year = required_date = 0
        if options['date']:
            required_date = int(options['date'])
        if options['week']:
            required_week = int(options['week'])
        if options['year']:
            required_year = int(options['year'])
        area_name = options['area']
        clouds = options['clouds']

        self.api = SentinelAPI(user, passw,
                               'https://scihub.copernicus.eu/dhus')

        try:
            self.area = Area.objects.get(id=int(area_name))
        except:
            try:
                self.area = Area.objects.get(name=area_name)
            except:
                self.stdout.write(
                    self.style.ERROR(
                        'Given area <{}> does not exist'.format(area_name)))
                sys.exit(1)

        (starting_date, end_date,
         week_nr) = self._get_dates(required_year, required_week,
                                    required_date)

        products = self.get_products(starting_date,
                                     end_date,
                                     self.area,
                                     clouds=clouds)

        if not len(products.items()):
            # TODO save empty week maybe?
            self.stdout.write(
                self.style.WARNING(
                    'There is no data for given time period ' +
                    '<{start}, {end}>, '.format(start=starting_date,
                                                end=end_date) +
                    'maximal cloud cover <{cloud}%> and area <{area}>'.format(
                        area=area_name, cloud=clouds)))
            return

        #self.tempdir = tempfile.mkdtemp(dir="/home/jachym/data/opengeolabs/lifemonitor/")
        #!self.tempdir = "/home/jachym/data/opengeolabs/lifemonitor/tmpq8_15z8f/"
        self.tempdir = tempfile.mkdtemp()
        _TO_BE_CLEANED.append(self.tempdir)

        self.api.download_all(products, self.tempdir)
        products_data = self.get_bands(products)
        patched_bands = self._patch_rasters(products_data)

        analysed_data = self._analyse(patched_bands)

        if Week.objects.filter(date=starting_date,
                               area=self.area).count() == 0:
            week = Week(
                date=starting_date,
                area=self.area,
            )
        else:
            week = Week.objects.get(date=starting_date, area=self.area)

        week.cutline = self.cutline_geom.wkt
        for band in patched_bands:
            band_key = band.lower()
            eval("week.{}".format(band_key)).save(
                os.path.basename(patched_bands[band]),
                File(open(patched_bands[band], "rb")),
                save=True)
        week.save()

        for an in analysed_data:
            at = AnalysisType.objects.get(name=an)
            if Analysis.objects.filter(week=week, type=at).count() == 0:
                analysis = Analysis(week=week, type=at)
            else:
                analysis = Analysis.objects.get(week=week, type=at)

            if analysed_data[an]["raster"]:
                analysis.raster.save(
                    os.path.basename(analysed_data[an]["raster"]),
                    File(open(analysed_data[an]["raster"], "rb")),
                    save=True)

            if analysed_data[an]["image"]:
                analysis.image.save(
                    os.path.basename(analysed_data[an]["image"]),
                    File(open(analysed_data[an]["image"], "rb")),
                    save=True)

            if analysed_data[an]["vector"]:
                analysis.vector.save(
                    os.path.basename(analysed_data[an]["vector"]),
                    File(open(analysed_data[an]["vector"], "rb")),
                    save=True)

            analysis.save()

        self.stdout.write(
            self.style.SUCCESS('Successfully create data for week {}'.format(
                week.week)))
Пример #29
0
 def download_image(self, url):
     image = requests.get(url).content
     with tempfile.TemporaryFile() as temp_file:
         temp_file.write(image)
         self.image.save(url.split('/')[-1], File(temp_file))
Пример #30
0
    def _process_test(
        self,
        test,
        order,
        names_re,
        indir,
        outdir,
        collected_ins,
        scored_groups,
        outs_to_make,
    ):
        """Responsible for saving test in and out files,
        setting test limits, assigning test kinds and groups.

        :param test: Test name.
        :param order: Test number.
        :param names_re: Compiled regex to match test details from name.
               Should extract basename, test name,
               group number and test type.
        :param indir: Directory with tests inputs.
        :param outdir: Directory with tests outputs.
        :param collected_ins: List of inputs that were generated,
               not taken from archive as a file.
        :param scored_groups: Accumulator for score groups.
        :param outs_to_make: Accumulator for name of output files to
               be generated by model solution.
        :return: Test instance or None if name couldn't be matched.
        """
        match = names_re.match(test)
        if not match:
            if test.endswith('.in'):
                raise ProblemPackageError(_("Unrecognized test: %s") % (test))
            return None

        # Examples for odl0ocen.in:
        basename = match.group(1)  # odl0ocen
        name = match.group(2)  # 0ocen
        group = match.group(3)  # 0
        suffix = match.group(4)  # ocen

        instance, created = Test.objects.get_or_create(
            problem_instance=self.main_problem_instance, name=name)

        inname_base = basename + '.in'
        inname = os.path.join(indir, inname_base)
        outname_base = basename + '.out'
        outname = os.path.join(outdir, outname_base)

        if test in collected_ins:
            self._save_to_field(instance.input_file, collected_ins[test])
        else:
            instance.input_file.save(inname_base, File(open(inname, 'rb')))

        if os.path.isfile(outname):
            instance.output_file.save(outname_base, File(open(outname), 'rb'))
        else:
            outs_to_make.append((
                _make_filename_in_job_dir(self.env, 'out/%s' % (outname_base)),
                instance,
            ))

        if group == '0' or 'ocen' in suffix:
            # Example tests
            instance.kind = 'EXAMPLE'
            instance.group = name
        else:
            instance.kind = 'NORMAL'
            instance.group = group
            scored_groups.add(group)

        time_limit = self._get_time_limit(created, name, group)
        if time_limit:
            instance.time_limit = time_limit

        memory_limit = self._get_memory_limit(created, name, group)
        if memory_limit:
            instance.memory_limit = memory_limit

        instance.order = order
        instance.save()
        return instance