class CumulusTests(TestCase): def setUp(self): "Set up tiny files to test with." image_path = os.path.join(os.path.dirname(__file__), "image_300x200.gif") document_path = os.path.join(os.path.dirname(__file__), "text_file.txt") self.image = ImageFile(open(image_path, "rb")) self.document = File(open(document_path, "r")) def test_file_api(self): """ Make sure we can perform the following using the Django File API: - Upload the test files - Access common file attributes - Delete the test files """ self.thing = Thing.objects.create(image=self.image, document=self.document) self.assertEqual(self.thing.image.width, 300) self.assertEqual(self.thing.image.height, 200) self.assertEqual(self.thing.image.size, 976) self.assert_( "cdn.cloudfiles.rackspacecloud.com" in self.thing.image.url, "URL is not a valid Cloud Files CDN URL." ) self.assertEqual(self.thing.document.size, 31) self.assert_( "cdn.cloudfiles.rackspacecloud.com" in self.thing.document.url, "URL is not a valid Cloud Files CDN URL." ) self.thing.delete() def tearDown(self): self.document.close() self.image.close()
def get_placeholder_image(self): storage_class = get_storage_class(settings.STATICFILES_STORAGE) storage = storage_class() placeholder = storage.open(settings.PLACEHOLDER_PATH) image = ImageFile(placeholder) image.storage = storage return image
def insertPerson(self, pers): try: pers.personID except AttributeError: #PERS is not a valid pbject, maybe a imdblib error pers =DumbPerson() act = Person.objects.filter(externalid= pers.personID, source = 'I') if act: #print "ip ", act return act[0] if not QUICK_INSERTS_IMDB : try: self.con.update(pers) except: raise act = Person( name = pers.get('name', "")[:255], namesort = pers.get('canonical name', "")[:255], birthstr = pers.get('birth date', None) , deathstr = pers.get('death date', None), bio = listdilemma(pers.get('mini biography', None)), birthname = pers.get('birth name', "")[:255], externalid = pers.personID, source = 'I', imgurl = pers.get('headshot', None), ) if pers.get('headshot', None): ph = urllib.urlopen(pers.get('headshot')) ih = ImageFile(ph) ih.size = int(ph.info().dict['content-length']) ext = CONTENT_EXTENSION.get(ph.info().dict['content-type'], "") act.img.save("%s%s"%(str(pers.personID), ext), ih ) act.save() return act
def _image(self): """Returns the current avatar or the "unknown.jpg" stock image.""" if self.avatar: return self.avatar path = os.path.join('chords', 'img', 'unknown.jpg') f = ImageFile(open(os.path.join(settings.STATIC_ROOT, path), 'rb')) f.url = os.path.join(settings.STATIC_URL, path) return f
def create_image_model(model: models.Model, filename: str, slug: str): image_file = ImageFile(open_file(filename)) image_model = Image.objects.create( model=model, slug=slug, image=image_file ) image_file.close() return image_model
def entryupload_post_save(sender, instance, **kwargs): '''Creates an Entry instance and deletes the EntryUpload instance.''' if instance.name: name = instance.name else: name, ext = os.path.splitext(os.path.basename(instance.image.path)) entry = Entry(name=name, owner=instance.owner, category=instance.category) entry.save() img = ImageFile(open(instance.image.path)) img.content_type = content_type(os.path.basename(img.name)) entry.image = img instance.delete()
def get_placeholder_image(self): if not hasattr(self.__class__, '_PlaceholderImageMixin__placeholder_image_cache'): path = finders.find(self.PLACEHOLDER_IMAGE_PATH) if path: location, file_name = os.path.split(path) fs = FileSystemStorage(location=location) image = ImageFile(fs.open(file_name)) image.storage = fs self.__class__.__placeholder_image_cache = image else: self.__class__.__placeholder_image_cache = None return self.__class__.__placeholder_image_cache
def insertTitle (self, sresult): rerun = re.compile("\d+") runtime = listdilemma(sresult.get('runtimes',None)) if runtime: allrun = rerun.findall(runtime) runtime = allrun[0] if allrun else runtime #print sresult # if type(sresult.get('runtimes',[None])[0]) == types.IntType: # runtime = sresult['runtimes'][0] # else: # runtime = None #print sresult.get('color info') title = sresult.get('episode title',"").encode('utf-8') if not( sresult.get('episode title',"") and sresult['kind']=='episode') else sresult.get('title',"??").encode('utf-8') t = Title(title = sresult.get('title',"??").encode('utf-8')[:255], year = listdilemma(sresult.get('year', None)), type = listdilemma(sresult.get('kind', None)), titlesort = listdilemma(sresult.get('canonical title', ""))[:255], #aka = "; ".join(sresult.get('akas', None)), externalid = sresult.movieID, source = 'I', plot = listdilemma(sresult.get('plot', None)), plotoutline = sresult.get('plot outline', None), rating = floatdilemma(listdilemma(sresult.get('rating', None))), runtime = runtime, color = listdilemma(sresult.get('color info', None)) , coverurl = listdilemma(sresult.get('cover url', None)) ) if listdilemma(sresult.get('cover url', None)): tc = urllib.urlopen(listdilemma(sresult.get('cover url', None))) ic = ImageFile(tc) ic.size = int(tc.info().dict['content-length']) ext = CONTENT_EXTENSION.get(tc.info().dict['content-type'], "") t.cover.save("%s%s"%(str(sresult.movieID), ext), ic) try: t.save() except : #print sresult.values raise if 'akas' in sresult.keys(): self._insertAkas(t, sresult['akas']) if 'languages' in sresult.keys(): self._insertLanguage(t, sresult['languages']) if 'country' in sresult.keys(): self._insertCountry(t, sresult['country']) if 'genres' in sresult.keys(): self._insertGenre(t, sresult['genres']) return t
def create_entry_if_not_present(old_photo): if not len(photos_m.Entry.objects.filter(name=old_photo.title)): # get or create owner first_name, sep, last_name = old_photo.photographer.rpartition(' ') owner = create_user_if_not_present(first_name, last_name) # get or create category contest, category = old_photo.tags.split(' ', 1) category = create_category_if_not_present(contest, category) entry = photos_m.Entry(name=old_photo.title, owner=owner, category=category) entry.save() img = ImageFile(open(old_photo.image.path)) img.content_type = content_type(os.path.basename(img.name)) entry.image = img return entry else: return photos_m.Entry.objects.filter(name=old_photo.title)
def mce_upload(request): if request.method == 'POST': try: print 'image' file = request.FILES['image'] print 'convert' image = ImageFile(file) print 'path' path = default_storage.save('article_images/%s' % image.name, ContentFile(image.read())) print 'data' data = { 'message': 'success', 'path': os.path.join(settings.MEDIA_URL, path), 'width': image.width, 'height': image.height } except IOError: data = { 'message': 'error' } else: data = { 'message': 'fail' } return createJSONResponse(data)
def get_context_data(self, **kwargs): context = super(FlexibleImageTestView, self).get_context_data(**kwargs) filename1 = os.path.join(settings.MEDIA_ROOT, "responsive-test-image-1.jpg") fd1 = open(filename1) image_file1 = ImageFile(fd1) filename2 = os.path.join(settings.MEDIA_ROOT, "responsive-test-image-2.jpg") fd2 = open(filename2) image_file2 = ImageFile(fd2) # XXX Change this for your environment. image_file1.url = "/static/responsive-test-image-1.jpg" image_file2.url = "/static/responsive-test-image-2.jpg" context = { "image_1": image_file1, "image_2": image_file2, } return context
def __getattr__(self, variation): if variation in IMAGE_VARIATIONS: name = self._variation_name(variation) try: image = ImageFile(self.storage.open(name), name) try: image.path = self.storage.path(image.name) except NotImplementedError: image.path = None try: image.url = self.storage.url(image.name) except NotImplementedError: image.url = None return image except IOError: pass raise AttributeError
def saveImage(tempImage,nameOfMedia): """saves the raw content of files""" fOrig = open(settings.MEDIA_DIR+nameOfMedia+".jpg",'w') imgTemp = ImageFile(fOrig) imgTemp.write(tempImage) imgTemp.flush() imgTemp.close()
def save(self, commit=True): instance = forms.ModelForm.save(self, False) dir_path = self.cleaned_data.get('directory_path') dir_category = self.cleaned_data.get('directory_category') dir_date_taken = self.cleaned_data.get('date_taken') dir_description = self.cleaned_data.get('description') dir_category_ids = list(dir_category.values_list('id',flat=True)) dir_file_list = os.listdir(dir_path) if not dir_date_taken: dir_name = os.path.basename(dir_path) re_pattern = '(\d{4})[-](\d{2})[-](\d{2})' search_in_dirname = re.search(re_pattern, dir_name) if search_in_dirname: dir_date_taken = search_in_dirname.group() images_added = [] for f in dir_file_list: filename, file_extension = os.path.splitext(f) if file_extension=='.jpg': image_form_data = { 'name': filename, 'description': dir_description, 'date_taken': dir_date_taken, 'image_category': dir_category_ids, } image_data = ImageFile(open(os.path.join(dir_path,f),'rb')) image_form_file_data = {'image': SimpleUploadedFile(filename, image_data.read())} image_form = ImageForm(data=image_form_data,files=image_form_file_data) if image_form.is_valid(): new_dir_image = image_form.save() images_added.append(new_dir_image.id) instance.directory_related_images = images_added if commit: instance.save() return instance
def makeThumbnail(tempImage,nameOfMedia): """makes thumbnails of max size 200x1000 from raw content and save it""" #imgTemp = Image.open("/opt/sources/code/pinry/media/pins/pin/"+nameOfMedia+".jpg") #imgTemp.thumbnail(MAX_THUMBNAIL_SIZE , Image.ANTIALIAS ) #imgTemp.save("/opt/sources/code/pinry/media/pins/pin/"+nameOfMedia+".200x1000.jpg", "JPEG") fThumb = open(settings.MEDIA_DIR+nameOfMedia+".200x1000.jpg",'w') imgTemp = ImageFile(fThumb) imgTemp.write(tempImage) imgTemp.flush() imgTemp.close()
def save_candidate_image_locally(c, url): current_site = Site.objects.get_current() img_temp = NamedTemporaryFile(delete=True) try: downloaded_image = urllib2.urlopen(url) except: print c.name, c.election # c.image = None # c.save() return d = downloaded_image.read() img_temp.write(d) img_temp.flush() i = ImageFile(img_temp.file) storage = get_storage_class()() data = i.read() extension = guess_extension(downloaded_image.info().type) file_name = u'candidatos/' + c.id + u'-' + c.election.slug + extension path = default_storage.save(file_name, i) url = u'http://' + current_site.domain + '/cache/' + file_name c.image = url c.save()
class Coffee(models.Model): name = models.CharField(max_length=128) description = models.CharField(max_length=1024) one_pound_price = models.DecimalField(max_digits=5,decimal_places=2) two_pound_price = models.DecimalField(max_digits=5,decimal_places=2) five_pound_price = models.DecimalField(max_digits=5,decimal_places=2) image = models.ImageField(upload_to='images') active = models.BooleanField(default=True) class BadSizeException(Exception): pass def __unicode__(self): return self.name def __str__(self): return self.__unicode__() def value(self, pounds, quantity): if pounds == 1: return quantity * self.one_pound_price elif pounds == 2: return quantity * self.two_pound_price elif pounds == 5: return quantity * self.five_pound_price else: raise Coffee.BadSizeException def set_image(self, image_data): iof = BytesIO(image_data) self.image = ImageFile(iof) self.image.name = os.path.join('images', '%s.png' % self.name) def image_data_url(self): self.image.open() b64 = b64encode(self.image.read()).decode('utf-8') self.image.close() return 'data:image/png;base64,%s' % b64
def _render(self, context): storage = SafeStaticFilesStorage() file_ = self.file_.resolve(context) absolute_path = finders.find(file_) try: file_ = ImageFile(staticfiles_storage.open(file_)) except: file_ = ImageFile(open(absolute_path)) file_.storage = storage geometry = self.geometry.resolve(context) options = {} for key, expr in self.options: noresolve = {'True': True, 'False': False, 'None': None} value = noresolve.get(text_type(expr), expr.resolve(context)) if key == 'options': options.update(value) else: options[key] = value thumbnail = get_thumbnail(file_, geometry, **options) if not thumbnail or (isinstance(thumbnail, DummyImageFile) and self.nodelist_empty): if self.nodelist_empty: return self.nodelist_empty.render(context) else: return '' if self.as_var: context.push() context[self.as_var] = thumbnail output = self.nodelist_file.render(context) context.pop() else: output = thumbnail.url return output
def add_user(username, firstName, lastName, bio, location): print("New user: "******"*****@*****.**")[0] newUser.set_password("Test") newUser.save() newUserProfile = UserProfile.objects.get_or_create( user=newUser, bio=bio, location=location, )[0] newUserProfile.age = random.randint(5, 80) newUserProfile.sellCount = random.randint(0, 50) newUserProfile.profilePicture.save( "default", ImageFile(open(settings.MEDIA_ROOT + "/population/default.jpg", 'rb'))) newUserProfile.save() return newUserProfile
def make_thumbnail(img_path, width=200, height=200): image = Image.open(img_path) img_ext = img_path.split('.')[-1] pillow_ext = { 'png': 'PNG', 'jpg': 'JPEG', 'jpeg': 'JPEG', }.get(img_ext, None) if img_ext != 'png': for orientation in ExifTags.TAGS.keys(): if ExifTags.TAGS[orientation] == 'Orientation': break if image._getexif() is not None: exif = dict(image._getexif().items()) if exif[orientation] == 3: image = image.rotate(180, expand=True) elif exif[orientation] == 6: image = image.rotate(270, expand=True) elif exif[orientation] == 8: image = image.rotate(90, expand=True) image.thumbnail((width, height), Image.ANTIALIAS) in_memory_img = BytesIO() image.save(in_memory_img, pillow_ext, quality=90) return ImageFile(in_memory_img, name=('thumbnail.' + img_ext))
def create_from_request(cls, data: QueryDict, files: {}, donation_page, image_key="DImage") -> [dict]: """Takes a request.data and builds a MediaImage instance from the json blob data found in the files dict. Expected Schemas: data = [{"uuid": str, "type": "DImage", "content": {}, n...] files = {"str(<UUID>)": Blob} :param data: A copy of the request POST data. :param files: A list of dicts. Key=UUID in the request.data for the image element :param donation_page: the page that these images are referenced on. :param image_key: The key that identifies an Image element. :return: The data["sidebar_elements"] updated with the storage locations for the image and the thumbnail. """ ## TODO: Duplicate detection mutable = data.copy() if sbe := mutable.get("sidebar_elements"): elements = json.loads(sbe) for index, element in enumerate(elements): if element.get("type") == image_key: if f := files.get(element.get("uuid"), None): img = ImageFile(f) thumb = get_thumbnail(img) media_image = cls( spa_key=element.get("uuid"), image=img, thumbnail=thumb, page_id=DonationPage.objects.get(pk=donation_page), image_attrs={}, ) media_image.save() elements[index] = media_image.get_as_dict()
def test_ref_image_is_none(self): """ Check no error is raised when image data is missing """ with open("snapshotServer/tests/data/test_Image1.png", 'rb') as imgFile: img = ImageFile(imgFile) s1 = Snapshot(stepResult=StepResult.objects.get(id=1), refSnapshot=None, pixelsDiff=None) s1.save() s2 = Snapshot(stepResult=StepResult.objects.get(id=2), refSnapshot=None, pixelsDiff=b'') s2.save() s2.image.save("img", img) s2.save() DiffComputer.get_instance().compute_now(s1, s2) # something has been computed self.assertIsNone(s2.pixelsDiff) self.assertEqual(s2.refSnapshot, s1, "refSnapshot should have been updated")
def thumbnail(self, size=None, **kwargs): size = size or self.settings['thumbnail_size'] if not size: raise Exception('No thumbnail size supplied') attrs = { 'format': self._thumbnail_file_format(), 'upscale': False, } attrs.update(self.settings['thumbnail_attrs']) attrs.update(kwargs) all_attrs = { 'size': size } all_attrs.update(attrs) key = hash(frozenset(all_attrs)) if not key in self._thumbnails: #self._thumbnails[key] = get_thumbnail(self._get_local_path_or_file(), size, **attrs) try: self._thumbnails[key] = get_thumbnail(self.local_path, size, **attrs) except Exception: return SImageFile(ImageFile(open(self.local_path, 'r'))) return self._thumbnails[key]
def create(self, request, *args, **kwargs): """Create Video object when user uploads.""" user = self._get_user(kwargs.get("user_id")) video = request.FILES.get('video') if not video: raise ValidationError("Video is required to upload.") request.data.update({'title': video.name}) verify_video(video) if not request.data.get('video_type'): request.data['video_type'] = choices.GENERIC serializer = self.serializer_class(data=request.data) serializer.is_valid(raise_exception=True) serializer.validated_data.update({ 'content_object': user, }) try: video = Video.objects.create(**serializer.validated_data) video_path = "{0}/{1}".format(settings.MEDIA_ROOT, video.video) thumbnail_path = os.path.join(settings.MEDIA_ROOT, "videos") ffmpeg = VIDEO_THUMBNAIL_GENERATOR.format(video_path, thumbnail_path) try: subprocess.call(ffmpeg, shell=True) except Exception as e: raise e temp_file = open("{0}/thumb01.jpg".format(thumbnail_path), 'rb') video.video_thumbnail.save( "thumb_for_{0}.jpg".format(video.title.split('.')[0]), ImageFile(temp_file)) os.remove("{0}/thumb01.jpg".format(thumbnail_path)) except Exception as e: raise e serialied_video = self.get_serializer(video) return Response(serialied_video.data, status=status.HTTP_201_CREATED)
def setUp(self): """ Sets up a test individual prize for the rest of the tests. This prize is not saved, as the round field is not yet set. """ self.saved_rounds = settings.COMPETITION_ROUNDS start = datetime.date.today() end = start + datetime.timedelta(days=7) settings.COMPETITION_ROUNDS = { "Round 1" : { "start": start.strftime("%Y-%m-%d"), "end": end.strftime("%Y-%m-%d"), }, } # Create a test user self.user = User.objects.create_user("user", "*****@*****.**", password="******") # Set up raffle deadline self.deadline = RaffleDeadline( round_name="Round 1", pub_date=datetime.datetime.today() - datetime.timedelta(hours=1), end_date=datetime.datetime.today() + datetime.timedelta(days=5), ) self.deadline.save() image_path = os.path.join(settings.PROJECT_ROOT, "fixtures", "test_images", "test.jpg") image = ImageFile(open(image_path, "r")) self.prize = RafflePrize( title="Super prize!", description="A test prize", image=image, value=5, deadline=self.deadline, )
def fetch_hut_images(): for hpage in HutPage.objects.all(): if hpage.link_url: try: r = requests.get(hpage.link_url, timeout=settings.API_TIMEOUT) except requests.exceptions.RequestException as e: logger.exception(str(e)) else: soup = BeautifulSoup(r.content, 'html5lib') a_tag = soup.find_all("a", {"class": "fancybox-gallery"}) if a_tag: img_tag = a_tag[0].find_all("img") if img_tag: img_url = 'http://www.doc.govt.nz/%s' % img_tag[0].get( 'src') logger.debug("Hut %s using img %s from HTML body.", str(hpage.pk), img_url) else: page = metadata_parser.MetadataParser(url=hpage.link_url) img_url = page.get_metadata_link('image') logger.debug("Hut %s using img %s from HTML meta", str(hpage.pk), img_url) if img_url: try: response = requests.get(img_url, timeout=settings.API_TIMEOUT) except requests.exceptions.RequestException as e: logger.exception(str(e)) image = Image(title=hpage.title, file=ImageFile(BytesIO(response.content), name=img_url.split('/')[-1])) image.save() hpage.meta_image = image hpage.save() else: logger.debug("No img found for hut %s", str(hpage.pk))
def create_user(request): """create user""" data = json.loads(request.body.decode()) data.update({ "likes": 0, "dislikes": 0, "profile_image": ImageFile(open('/home/cuzkov/www.m-skull.ru/users/unnamed.jpg', 'rb')), "status": "" }) serializes_user = UserSerializer(data=data) if (serializes_user.is_valid(raise_exception=True)): if User.objects.last() is not None: buffer_id = User.objects.last().id + 1 else: buffer_id = 1 response = requests.post(url=subscribers_create_user, json={"user": { "id": buffer_id }}) if json.loads(response.text)['response'] == 'success': serializes_user.save() return Response({"response": "Success create user"}) else: return Response({"response": "friend service error, try again"}) if json.loads(response.text)['response'] != 'success': response_str = 'server error, try again' else: response_str = 'bad data' return Response({ "response": response_str, "needs": serializes_user.errors })
def post(self, request, pk): try: target = find_target(request, int(pk)) except Target.DoesNotExist: return HttpResponseNotFound('Target %s not found' % pk) except ValueError as e: return HttpResponseForbidden(str(e)) # Request body is the file f = io.BytesIO(request.body) # Verify that this is a valid image try: i = Image.open(f) i.verify() except IOError as e: return HttpResponseBadRequest(str(e)) if i.format not in ['JPEG', 'PNG']: return HttpResponseBadRequest( 'Invalid image format %s, only JPEG and PNG allowed' % i.format) old_path = target.thumbnail.path if target.thumbnail else None target.thumbnail.save('%d.%s' % (target.pk, i.format), ImageFile(f)) if old_path and target.thumbnail.path != old_path: # We didn't overwrite the old thumbnail, we should delete it, # but ignore deletion errors. try: os.remove(old_path) except OSError as e: logger.warning("Unable to delete old thumbnail: %s", e) return HttpResponse("Image uploaded.")
def test_thumbnails_are_generated_on_save(self): product = models.Product( name="The cathedral and the bazaar", price=Decimal("10.00"), ) product.save() with open("media/product-images/3.1.0.png", "rb") as f: image = models.ProductImage( product=product, image=ImageFile(f, name="tctb.jpg"), ) with self.assertLogs("main", level="INFO") as cm: image.save() self.assertGreaterEqual(len(cm.output), 1) image.refresh_from_db() with open( "media/product-thumbnails/3.1.0.png", "rb", ) as f: expected_content = f.read() assert image.thumbnail.read() == expected_content image.thumbnail.delete(save=False) image.image.delete(save=False)
def test_thumbnails_are_generated_on_save(self): product = models.Product( name="The cathedral and the bazaar", price=Decimal('10.00'), ) product.save() with open('main/fixtures/the-cathedral-the-bazaar.jpg', 'rb') as f: image = models.ProductImage( product=product, image=ImageFile(f, name='tctb.jpg'), ) with self.assertLogs('main', level='INFO') as cm: image.save() self.assertGreaterEqual(len(cm.output), 1) image.refresh_from_db() with open( "main/fixtures/the-cathedral-the-bazaar.thumb.jpg", "rb", ) as f: expected_content = f.read() assert image.thumbnail.read() == expected_content image.thumbnail.delete(save=False) image.image.delete(save=False)
def handle(self, *args, **options): # Get the only instance of Magazine Index Page magazine_index_page = MagazineIndexPage.objects.get() with open(options["file"]) as import_file: issues = csv.DictReader(import_file) issues_list = list(issues) for issue in tqdm(issues_list, desc="Issues", unit="row"): response = requests.get(issue["cover_image_url"]) image_file = BytesIO(response.content) image = Image( title=issue["title"] + " cover image", file=ImageFile(image_file, name=issue["cover_image_file_name"]), ) image.save() publication_date_tz_aware = make_aware( datetime.strptime(issue["publication_date"], "%Y-%m-%d")) import_issue = MagazineIssue( title=issue["title"], publication_date=publication_date_tz_aware, first_published_at=publication_date_tz_aware, issue_number=issue["issue_number"], cover_image=image, ) # Add issue to site page hiererchy magazine_index_page.add_child(instance=import_issue) magazine_index_page.save() self.stdout.write("All done!")
def get_image(self, name): """ Get one of the test images from the test data directory. """ return ImageFile(open(TEST_DATA_ROOT / 'badges' / name + '.png'))
def get_test_image_file(filename='test.png'): f = BytesIO() image = PIL.Image.new('RGB', (640, 480), 'white') image.save(f, 'PNG') return ImageFile(f, name=filename)
def sync(self, *args, **kwargs): try: post_type = PostType.objects.get(post_type_slug=self.type) except: post_type = PostType.objects.create(post_type_slug=self.type, post_type_name=self.type) try: post = Post.objects.get(slug=self.slug, post_type=post_type) except Post.DoesNotExist: post = Post.objects.create(title=self.title, slug=self.slug, content=self.content, content_rendered=self.content, pub_date=self.date, publication_date=self.date, post_type=post_type) self.post = post self.post.author = self.author.sync() # sync categories for category in self.categories: category.sync() self.post.category.add(category.category) # sync tags for tag in self.tags: tag.sync() self.post.tags.add(tag.tag) if self.media_content.source: ct = ContentType.objects.get_for_model(self.post) mc = MediaContent.objects.filter(content_type=ct, object_pk=self.post.pk, title=self.media_content.title) # Solo si ya no tiene una imagen asociada entonces, asocia una if not mc: try: # baja la imagen asociada media_file = self.media_content.source.download() except urllib2.HTTPError: pass #raise DjblogImporterException("Error de codificación al descargar el archivo") else: try: m = MediaContent(content_type=ct, object_pk=self.post.pk, title=self.media_content.title) print u"Asocia la imagen {0} al post {1}".format( media_file, self.post.slug) m.content.save(media_file, ImageFile(open(media_file, 'r'))) log.info("Imagen cargada/actualizada %s", media_file) log.info("Elimina el archivo temporal %s", media_file) try: os.remove(media_file) except (OSError, ValueError): log.warning( "ERROR al eliminar el archivo temporal %s", media_file) pass except IOError: log.info("IOError") except DjblogImporterException: pass self.post.save() return self.post
def get_magnetic_data(latitude, longitude, year, month, day, hour, minute, entry_time, name, e_datetime): connection = psycopg2.connect(user="******", password="******", host="1.7.151.13", port="5432", database="cosmosis") regions = getRegions(connection) oobs = [] for region in regions: region_id = region[0] region_name = region[1] region_api = region[2] obs = getObs(connection, region_id, year) for ob in obs: code = ob[0] ob_name = ob[1] year = ob[2] obj_id = ob[3] lat = float(ob[4]) if (float(ob[5]) > 180): lon = float(ob[5]) - 360 else: lon = float(ob[5]) oobs.append([code, ob_name, year, obj_id, lat, lon]) code, obname = get_obs(oobs, latitude, longitude) datass, hour_data = getData(year, month, day, hour, minute, code) hour_data = pd.DataFrame( hour_data, columns=['DateTime', 'Xn(T)', 'Yn(T)', 'Zn(T)', 'Fn(T)', 'code']) # print(hour_data.head(10)) # Plot Data x_axis = hour_data['DateTime'] x = [] for i in x_axis: x.append(datetime.fromisoformat(i)) y = [] labels = ['Xn(T)', 'Yn(T)', 'Zn(T)', 'Fn(T)'] for i in labels: temp = hour_data[i] temp = temp.astype('float64') y.append(temp) fig, ax = plt.subplots(nrows=4, ncols=1) date_format = mpl_dates.DateFormatter('%H:%M') st = fig.suptitle('Data from ' + obname + ' observatory') # st.set_y(1.01) for i in range(4): ax[i].plot(x, y[i], linewidth=1) ax[i].plot(datetime.fromisoformat(datass[0]), datass[i + 1], 'ro') ax[i].text(datetime.fromisoformat(datass[0]), datass[i + 1], " - " + labels[i] + "=" + str(datass[i + 1])) ax[i].xaxis.set_major_formatter(date_format) ax[i].set_ylabel(labels[i], rotation=0, labelpad=30, fontsize=15) plt.xlabel('Observation time (within the hour)') fig.set_size_inches(12, 8) fig.tight_layout() # plt.savefig('MagneticXYZ.jpg') mag = Magnetic_Data() mag.lat = latitude mag.lon = longitude mag.entry_time = entry_time mag.name = name mag.datetime = e_datetime mag.X = datass[1] mag.Y = datass[2] mag.Z = datass[3] mag.F = datass[4] mag.area_code = str(datass[5]) mag.observatory = str(obname) file_name = "MagneticXYZ.png" figure = io.BytesIO() plt.savefig(figure, format="png") content_file = ImageFile(figure) mag.graph.save(file_name, content_file)
def set_image(self, image_data): iof = BytesIO(image_data) self.image = ImageFile(iof) self.image.name = os.path.join('images', '%s.png' % self.name)
def generate_predictions(self, user): df = pd.read_csv('{}/data/disease.csv'.format(self.BASE_DIR)) df = df.drop(columns=['location']) if self.disease == 'Measles': model = load_model('{}/data/measlesmodel.h5'.format(self.BASE_DIR)) elif self.disease == 'Malaria': model = load_model('{}/data/malariamodel.h5'.format(self.BASE_DIR)) else: print("Invalid model") df.Month = pd.to_datetime(df.Date) df = df.set_index("Date") train, test = df[:-8], df[-8:] scaler = MinMaxScaler() scaler.fit(train) train = scaler.transform(train) test = scaler.transform(test) n_input = 8 n_features = 1 pred_list = [] batch = train[-n_input:].reshape((1, n_input, n_features)) for i in range(n_input): pred_list.append(model.predict(batch)[0]) batch = np.append(batch[:, 1:, :], [[pred_list[i]]], axis=1) df.index = pd.to_datetime(df.index) from pandas.tseries.offsets import DateOffset add_dates = [df.index[-3] + DateOffset(months=x) for x in range(0, 10)] future_dates = pd.DataFrame(index=add_dates[1:], columns=df.columns) df_predict = pd.DataFrame(scaler.inverse_transform(pred_list), index=future_dates[-n_input:].index, columns=['Prediction']) df_proj = pd.concat([df, df_predict], axis=1) plt.figure(figsize=(20, 10)) plt.plot(df_proj.index, df_proj['Number of Cases']) plt.plot(df_proj.index, df_proj['Prediction'], color='r') plt.legend(loc='best', fontsize='xx-large') plt.xticks(fontsize=18) plt.yticks(fontsize=16) plt.xlabel("Date",fontsize=20) plt.ylabel("No. of cases",fontsize=20) plt.title('{} cases in {}'.format(self.disease,self.location),fontsize=28) # plt.show() plt.savefig('disease.jpg') # plot_data = [ # go.Scatter( # x=df_proj.index, # y=df_proj['Number of Cases'], # name='actual' # ), # go.Scatter( # x=df_proj.index, # y=df_proj['Prediction'], # name='predicted' # ) # # ] # plot_layout = go.Layout( # title='Cases Prediction' # ) # fig = go.Figure(data=plot_data, layout=plot_layout) # pyoff.iplot(fig) # # fig.write_image('{}/data/image.jpeg'.format(self.BASE_DIR)) # plt.style.use('fivethirtyeight') # # convert the dataframe to nparray # df = pd.read_csv('{}/data/location.csv'.format(self.BASE_DIR)) # df = df.tail(100) # data = df.filter(['Number of cases']) # dataset = data.values # # get the number of rows to train the model # training_data_len = math.ceil(len(dataset) * 0.8) # print(data) # # Scaling the data # scaler = MinMaxScaler(feature_range=(0, 1)) # scaled_data = scaler.fit_transform(dataset) # # scaled_data # # Create the training dataset # # create the scaled training dataset # train_data = scaled_data[0:training_data_len, :] # # Split the data into x_train and y_train datasets # x_train = [] # y_train = [] # # for i in range(60, len(train_data)): # x_train.append(train_data[i - 60:i, 0]) # y_train.append(train_data[i, 0]) # model = load_model('{}/data/model.h5'.format(self.BASE_DIR)) # # # Create the testing dataset # test_data = scaled_data[training_data_len - 60:, :] # # create the x_test and y_test datasets # x_test = [] # y_test = dataset[training_data_len:, :] # for i in range(60, len(test_data)): # x_test.append(test_data[i - 60:i, 0]) # # # convert the data to nparray # x_test = np.array(x_test) # # Reshape the data # x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) # # Get the models predicted disease cases # predictions = model.predict(x_test) # predictions = scaler.inverse_transform(predictions) # # Get the root mean squared error # rmse = np.sqrt(np.mean(predictions - y_test) ** 2) # rmse # # Plot the data # train = data[:training_data_len] # valid = data[training_data_len:] # valid['predictions'] = predictions # # visualize the data # plt.figure(figsize=(16, 8)) # plt.title('{} prediction for {}'.upper().format(self.disease.upper(), self.location.upper())) # plt.xlabel('date') # plt.ylabel('{} cases'.format(self.disease)) # plt.plot(train['Number of cases']) # plt.plot(valid[['Number of cases', 'predictions']]) # plt.show() figure = io.BytesIO() plt.savefig(figure, format="png") content_file = ImageFile(figure) content_file = ContentFile(figure.getvalue()) user_obj = User.objects.get(email=user) try: img_obj = Images.objects.get(user=user_obj) img_obj.image.save("image_file.png", content_file) img_obj.save() except: plot_instance = Images(user=user_obj) plot_instance.image.save("image_file.png", content_file) plot_instance.save()
def create_overview_image( video_id, source, duration, nb_img, image_width, overviewimagefilename, overviewfilename ): msg = "\ncreate overview image file" for i in range(0, nb_img): stamp = "%s" % i if nb_img == 99: stamp += "%" else: stamp = time.strftime('%H:%M:%S', time.gmtime(i)) cmd_ffmpegthumbnailer = "ffmpegthumbnailer -t \"%(stamp)s\" \ -s \"%(image_width)s\" -i %(source)s -c png \ -o %(overviewimagefilename)s_strip%(num)s.png" % { "stamp": stamp, 'source': source, 'num': i, 'overviewimagefilename': overviewimagefilename, 'image_width': image_width } # subprocess.getoutput(cmd_ffmpegthumbnailer) subprocess.run( cmd_ffmpegthumbnailer, shell=True) cmd_montage = "montage -geometry +0+0 %(overviewimagefilename)s \ %(overviewimagefilename)s_strip%(num)s.png \ %(overviewimagefilename)s" % { 'overviewimagefilename': overviewimagefilename, 'num': i } # subprocess.getoutput(cmd_montage) subprocess.run( cmd_montage, shell=True) if os.path.isfile("%(overviewimagefilename)s_strip%(num)s.png" % { 'overviewimagefilename': overviewimagefilename, 'num': i }): os.remove("%(overviewimagefilename)s_strip%(num)s.png" % {'overviewimagefilename': overviewimagefilename, 'num': i}) if check_file(overviewimagefilename): msg += "\n- overviewimagefilename :\n%s" % overviewimagefilename # Overview VTT overview = ImageFile(open(overviewimagefilename, 'rb')) image_height = int(overview.height) overview.close() image_url = os.path.basename(overviewimagefilename) image = { 'image_width': image_width, 'image_height': image_height, 'image_url': image_url } msg += create_overview_vtt( video_id, nb_img, image, duration, overviewfilename) msg += save_overview_vtt(video_id, overviewfilename) # else: msg = "overviewimagefilename Wrong file or path : "\ + "\n%s" % overviewimagefilename add_encoding_log(video_id, msg) change_encoding_step(video_id, -1, msg) send_email(msg, video_id) return msg
def get_image(name): """ Get one of the test images from the test data directory. """ return ImageFile(open(TEST_DATA_ROOT / 'badges' / name + '.png', mode='rb')) # pylint: disable=open-builtin
def create_sponsor(apps, schema_editor): Sponsor(title='LFF.lv', url='https://lff.lv/', image=ImageFile(open("static/images/sponsor-1.jpg", "rb"))).save() Sponsor(title='Fédération Internationale de Football Association', url='http://www.fifa.com/futsal/index.html', image=ImageFile(open("static/images/sponsor-2.jpg", "rb"))).save() Sponsor(title='JOMA', url='http://www.joma-sport.lv/', image=ImageFile(open("static/images/sponsor-3.jpg", "rb"))).save() Sponsor(title='OPTIBET', url='http://optibet.lv', image=ImageFile(open("static/images/sponsor-4.jpg", "rb"))).save() Sponsor(title='UEFA', url='http://www.uefa.com/futsalcup/index.html', image=ImageFile(open("static/images/sponsor-5.jpg", "rb"))).save() Sponsor(title='Evolution Gaming', url='https://www.evolutiongaming.com/', image=ImageFile(open("static/images/sponsor-6.jpg", "rb"))).save() Sponsor(title='Futsal Planet', url='http://www.futsalplanet.com', image=ImageFile(open("static/images/sponsor-7.jpg", "rb"))).save() Sponsor(title='Sportacentrs', url='http://sportacentrs.com/futzals/', image=ImageFile(open("static/images/sponsor-8.jpg", "rb"))).save() Sponsor(title='Memory Water', url='http://www.memorywater.com', image=ImageFile(open("static/images/sponsor-9.jpg", "rb"))).save() Sponsor(title='Sportlex', url='https://www.sportlex.lv/lv/', image=ImageFile(open("static/images/sponsor-10.jpg", "rb"))).save() Sponsor(title='XL Print', url='http://www.xlprint.lv/', image=ImageFile(open("static/images/sponsor-11.jpg", "rb"))).save() Sponsor(title='Restorāns Armenia ', url='http://www.restoranarmenia.lv/', image=ImageFile(open("static/images/sponsor-12.jpg", "rb"))).save() Sponsor( title='Midland oil', url= 'http://www.midlandoil.lv/index.php?lng=lv&part=start&part2=&part3=&part4=', image=ImageFile(open("static/images/sponsor-13.jpg", "rb"))).save() Sponsor(title='RISEBA', url='http://www.riseba.lv/lv/', image=ImageFile(open("static/images/sponsor-14.jpg", "rb"))).save() Sponsor(title='Rīgas Futbola federācija', url='http://riga.lff.lv/', image=ImageFile(open("static/images/sponsor-15.jpg", "rb"))).save() Sponsor(title='Ghetto Games', url='https://www.ghetto.lv/', image=ImageFile(open("static/images/sponsor-16.jpg", "rb"))).save() Sponsor(title='Mercure Hotel Riga', url='http://www.mercureriga.lv/', image=ImageFile(open("static/images/sponsor-17.jpg", "rb"))).save()
def setUp(self): path = mommy.mock_file_jpeg self.fixture_img_file = ImageFile(open(path))
member.certifications.set(certifications) member.save() members_ported += 1 except Exception as ex: print(f"Failed to create member! {ex}") skipped_members.append(email) continue try: # Get the member photo and prepare it for moving to the server old_photo_name = f"{old_id}.jpg" old_photo_path = os.path.join(OLD_PHOTO_DIR, old_photo_name) if os.path.exists(old_photo_path): new_name = get_profile_pic_upload_location( member, old_photo_name) photo = ImageFile(open(old_photo_path, "rb")) member.picture.save(new_name, photo) except Exception as ex: print(f"Failed to port photo for {email}! {ex}") # If this user is a staffer, make the realted staffer model try: staff_name = user[10] if staff_name: all_staffers.append( Staffer.objects.upgrade_to_staffer(member, staff_name)) staffers_ported += 1 except Exception as ex: print(f"Failed to make {email} into a staffer! {ex}") skipped_staffers.append(email)
def setUp(self): "Set up tiny files to test with." image_path = os.path.join(os.path.dirname(__file__), "image_300x200.gif") document_path = os.path.join(os.path.dirname(__file__), "text_file.txt") self.image = ImageFile(open(image_path, "rb")) self.document = File(open(document_path, "r"))
def save(self, *args, **kwargs): if not self.pdf: # output path output_path = os.path.join(settings.MEDIA_ROOT, 'output') # empty the output directory for entity in os.listdir(output_path): file_path = os.path.join(output_path, entity) if os.path.isfile(file_path) and not '.empty' in file_path: # don't remove .empty files os.remove(file_path) # initial save super(Edition, self).save(*args, **kwargs) # open zip, extract and close zip = ZipFile(self.output.path) zip.extractall(output_path) zip.close() # take pdf file pdf_file = File(open(os.path.join(output_path, 'file.pdf'), 'rb')) # save pdf file and edition object self.pdf.save( self.date.strftime("%Y-%m-%d.pdf"), pdf_file, save=False ) super(Edition, self).save(*args, **kwargs) pdf_file.close() # remove pdf file from output path os.remove(pdf_file.name) i = 1 for entry in os.listdir(output_path): if entry.endswith('.txt'): # create new Page object page = Page() page.edition = self page.number = i # get txt file with page text contents txt_file = open(os.path.join(output_path, entry)) page.text = txt_file.read() txt_file.close() # attach screenshot of pdf page img_file = ImageFile( open( os.path.join( output_path, 'scr-%d.jpg' % (i - 1) ), 'rb' ) ) page.screenshot.save( self.date.strftime("%Y-%m-%d.jpg")[:-4] + '-%d.jpg' % i, img_file ) img_file.close() # take pdf file pdf_page_file = File(open(os.path.join(output_path, 'file-%d.pdf' % (i - 1)), 'rb')) # save pdf file and edition object page.pdf.save( self.date.strftime("%Y-%m-%d.pdf")[:-4] + '-%d.pdf' % i, pdf_page_file ) pdf_page_file.close() # increase counter i = i + 1 # empty the output directory for entity in os.listdir(output_path): file_path = os.path.join(output_path, entity) print file_path if os.path.isfile(file_path) and not '.empty' in file_path: # don't remove .empty files os.remove(file_path) else: # super(Edition, self).save(*args, **kwargs) pass
def test_unbalanced_image(self): """ Verify that setting an image with an uneven width and height raises an error. """ unbalanced = ImageFile(self.get_image('unbalanced')) self.assertRaises(ValidationError, self.create_clean, unbalanced)
def get_image(self, name): """Get one of the test images from the test data directory.""" return ImageFile(open(TEST_DATA_ROOT + name + ".png"))
def get_test_image_file(): from django.core.files.images import ImageFile file = tempfile.NamedTemporaryFile(suffix='.png') return ImageFile(file, name=file.name)
def profile(request): u = request.session['username'] data = {} member = Member.objects.get(pk=u) if request.POST: # Get all the posted data text = request.POST['text'] country = request.POST['country'] city = request.POST['city'] workplace = request.POST['workplace'] phone = request.POST['phone'] # If a picture was uploaded, treat it... otherwise, just insert a null object if 'picture' in request.FILES: # Create a ImageFile object picture = ImageFile(request.FILES['picture']) # Check if the picture is inside the configured settings # If is not, it just delete the ImageFile object if checkPictureSize(picture) == False: picture = None flash.error( request, 'Picture dimensions not allowed! Only up to 800x600.') elif checkPictureExtension(picture) == False: picture = None flash.error(request, 'Format not allowed! Only gif, jpeg and png.') else: picture = None if member.profile: member.profile.text = text member.profile.country = country member.profile.city = city member.profile.workplace = workplace member.profile.phone = phone # If there was a uploaded picture which respects the settings if picture != None: member.profile.picture.save(request.FILES['picture'].name, picture) member.profile.save() else: profile = Profile(text=text, country=country, city=city, workplace=workplace, phone=phone, picture=picture) profile.save() member.profile = profile member.save() # Put all data which will be used on view on the data array (if there is a profile created) if member.profile: data['text'] = member.profile.text data['country'] = member.profile.country data['city'] = member.profile.city data['workplace'] = member.profile.workplace data['phone'] = member.profile.phone data['picture'] = member.profile.picture else: data['text'] = None data['country'] = None data['city'] = None data['workplace'] = None data['phone'] = None data['picture'] = None return render(request, 'social/profile.html', { 'appname': appname, 'username': u, 'data': data, 'loggedin': True })
def seed_data(apps, schema_editor): ############### # Seed Groups # ############### Permission = apps.get_model('auth', 'Permission') article_permissions = Permission.objects.filter( codename__contains='article') Group = apps.get_model('auth', 'Group') authors_group = Group.objects.create(name='Authors') authors_group.permissions.set(article_permissions) authors_group.save() ############### # Seed Author # ############### User = apps.get_model('auth', 'User') user = User.objects.create( username='******', email='*****@*****.**', first_name='Skip', last_name='Heitzig', ) #user.set_password('scarletcordconnectsbible') #user.groups.set([authors_group]) user.save() Author = apps.get_model('article', 'Author') author = Author.objects.create(user=user) ################ # Seed Article # ################ Article = apps.get_model('article', 'Article') article_path = os.path.join( settings.BASE_DIR, 'home', 'static', 'scarlet-cord-connects-bible.html') scarlet_cord = open(article_path, 'r+').readlines() scarlet_cord_html = ''.join(scarlet_cord).replace('\n', '<br>') cover_image_path = os.path.join( settings.BASE_DIR, 'article', 'static', 'media', 'scarlet-bible.jpg' ) cover_image_file = open(cover_image_path, 'r+b') cover_image = ImageFile(cover_image_file) stock_article = Article.objects.create( title='How a Scarlet Cord Connects the Bible', html=scarlet_cord_html, author=author, cover_image=cover_image, cover_image_height=cover_image.height, cover_image_width=cover_image.width, ) #################### # Seed ReadingList # #################### ReadingList = apps.get_model('article', 'ReadingList') names_and_descriptions = { 'Inspiration': 'Renew your faith with these inspirational thoughts', "Pastor's Pick": 'A selection of curated articles picked by pastors', 'Family': 'How to live a Christian life with your family', 'Diligence': "Small acts you can do every day in Christ's name", 'Holidays': 'Pause for reflection around the holidays', } article_queryset = Article.objects.filter(uuid=stock_article.uuid) for name, description in names_and_descriptions.items(): reading_list = ReadingList.objects.create( name=name, description=description, ) reading_list.articles.set(article_queryset) stock_article.reading_lists.add(reading_list) stock_article.save()
def static_image(url): storage_class = get_storage_class(settings.STATICFILES_STORAGE) storage = storage_class() image = ImageFile(storage.open(url)) image.storage = storage return image, image.url
def server_authentication(request): if request.method == 'POST': card_id = request.POST['card_id'] try: member = Member.objects.get(card_id=card_id) last_image_name = '' # save images to /tmp folder for face_key in request.FILES: last_image_name = face_key data = request.FILES[face_key] face = ImageFile(data) face_path = 'tmp/' + str(data) if default_storage.exists(face_path): default_storage.delete(face_path) default_storage.save(face_path, face) # get result of predict list images list_predicts = face_recognize.recognition(member.recognize_label) # list_predicts = [] if len(list_predicts): last_image_name = list_predicts[0][0] # check threshold result_auth = False f_name = None for file_name, conf in list_predicts: print(conf) if conf < member.threshold: result_auth = True f_name = file_name break # publish result auth to mqtt topic /pas/mqtt/icse/auth result_auth_payload = 'OK' if result_auth else 'FAIL' mqtt.publish(const.MQTT_AUTH_TOPIC, result_auth_payload) print("ok") # get latest logs to check user in or out try: # TODO: check last log for new day, not last day last_log = Logs.objects.filter( member_id=member.id).latest('time_stamp') is_go_in = False if last_log.is_go_in else True except Logs.DoesNotExist: is_go_in = True member.is_in_lab = True if is_go_in else False member.save() # publish latest user scan to web browser latest_user_scan_payload = { 'member_name': member.name, 'state': 'Goes In' if is_go_in else 'Goes Out' } mqtt.publish(const.MQTT_LATEST_USER_SCAN, json.dumps(latest_user_scan_payload)) # save logs log = Logs( time_stamp=timezone.now(), member=member, result_auth=result_auth, is_go_in=is_go_in, ) f_name = f_name if result_auth else last_image_name file_path = os.path.join(const.TMP_FOLDER, f_name) file_data = File(open(file_path, 'rb')) log.image.save(f_name, file_data, save=True) log.save() except Member.DoesNotExist: print("member does not exist") mqtt.publish(const.MQTT_AUTH_TOPIC, 'FAIL') mqtt.publish(const.MQTT_MEMBER_DOES_NOT_EXIST, '1') return HttpResponse("POST request success") return HttpResponse("Not valid request type!")
def main(): base = "http://www.nolafront.org/pages/" req = urllib.urlopen('%sarchive.htm' % base) html = req.read() pq = PyQuery(html) ass = pq("a.style2") a_iter = ass.items() a = a_iter.next() count = 0 last_year = "2013" while count < len(ass) - 1: href = "%s%s" % ( base, a.attr('href') ) name = a.text() if len(name) > 100: name = name[:14] artists_info = a.parent().text() artists_info = artists_info[len(name)+1:] print href nq = PyQuery(urllib.urlopen(href).read()) imgs = nq.find('img') n_iter = imgs.items() img = n_iter.next() text = "" i = 0 news, created = NewsArticle.objects.get_or_create(name=name) news.save() while i < len(imgs) - 1: i += 1 img_orig = n_iter.next() src = "%s%s" % (base, img_orig.attr('src')) img_name = "%s-%d.jpg" % (name, i) if len(NewsMedia.objects.filter(news_article=news,name=img_name)) > 0: continue try: img = cStringIO.StringIO(urllib.urlopen(src).read()) img = Image.open(img) img_path = "%s/front_media/%s" % (settings.MEDIA_ROOT, img_name) portrait = float(img.size[0] / img.size[1]) < 1.0 img.save(img_path, "JPEG", quality=90, optimize=True) img = ImageFile(open(img_path)) media, created = NewsMedia.objects.get_or_create(news_article=news, full_res_image=img, portrait=portrait, name=img_name) img_orig.attr('src', "%s%s" % (settings.MEDIA_URL, media.full_res_image.name)) img.close() except: print "error on saving: %s" % src html = nq.find("body").html() if href.find("archive-december 13") >= 0: print html is_content = False lines = html.split("\n") news.text = "" for line in lines: if line.find("LINKS") >= 0: is_content = True continue if is_content: if line.find("</body>") >= 0: is_content = False continue news.text += line news.name = name news.is_old_news = True news.artists_info = artists_info news.name = name date = name.split('-') date = date[0] if date.endswith(" "): date = date.rstrip(" ") try: date.index(',') except: try: date = "%s, %s" % (date, name.split(", ")[1]) except: pass try: date = datetime.strptime(date, "%B %d, %Y") last_year = "%d" % date.year except: print 'didnt get a date 1' print date try: date = "%s, %s" % (date, last_year) date = datetime.strptime(date, "%B %d, %Y") last_year = "%d" % date.year except: print 'didnt get a date 2' date = datetime.strptime(last_year, "%Y") news.date = date news.save() count += 1 a = a_iter.next() #href = archive[0]['href'] #name = archive[0]['name'] #text = archive[0]['text'] #print text '''