def output(self, filename, file_out): content = file_out.read() base, ext = os.path.splitext(filename) if ext == ".css": mimetype = "text/css" elif ext == ".js": mimetype = "text/javascript" else: mimetype = "application/octet-stream" already_exists = False for info in BlobInfo.all().filter('content_type = ', mimetype): if info.filename == filename: already_exists = True continue #Clear out old blobs if info.filename.split(".")[0] == filename.split(".")[0]: logging.debug("Deleting: %s", info.filename) info.delete() if not already_exists: logging.info("Creating: %s", filename) result = files.blobstore.create( mime_type=mimetype, _blobinfo_uploaded_filename=filename) with files.open(result, "a") as f: f.write(content) files.finalize(result) blob_key = files.blobstore.get_blob_key(result) while not blob_key: blob_key = files.blobstore.get_blob_key(result)
def returnAllJSON(self): results = [] fils = BlobInfo.all() for record in fils: blob_key = str(record.key()) result = {} result['key'] = blob_key result['name'] = record.filename result['type'] = record.content_type result['size'] = record.size result['deleteType'] = 'DELETE' result['deleteUrl'] = self.request.host_url +'/?key=' + urllib.quote(blob_key, '') if (IMAGE_TYPES.match(result['type'])): try: result['url'] = images.get_serving_url(blob_key, size=1024, crop=True, secure_url=None) result['thumbnailUrl'] = images.get_serving_url(blob_key, size=240, crop=True, secure_url=None) result['thumbnailUrl2'] = images.get_serving_url(blob_key, size=120, crop=True, secure_url=None) except: # Could not get an image serving url pass if not 'url' in result: result['url'] = self.request.host_url +\ '/' + blob_key + '/' + urllib.quote( result['name'].encode('utf-8'), '') results.append(result) #result = {'files': results} s = json.dumps(results, separators=(',', ':')) redirect = self.request.get('redirect') if redirect: return self.redirect(str( redirect.replace('%s', urllib.quote(s, ''), 1) )) if 'application/json' in self.request.headers.get('Accept'): self.response.headers['Content-Type'] = 'application/json' self.response.write(s)
def output(self, filename, file_out): content = file_out.read() base, ext = os.path.splitext(filename) if ext == ".css": mimetype = "text/css" elif ext == ".js": mimetype = "text/javascript" else: mimetype = "application/octet-stream" already_exists = False for info in BlobInfo.all().filter('content_type = ', mimetype): if info.filename == filename: already_exists = True continue #Clear out old blobs if info.filename.split(".")[0] == filename.split(".")[0]: logging.debug("Deleting: %s", info.filename) info.delete() if not already_exists: logging.info("Creating: %s", filename) result = files.blobstore.create(mime_type=mimetype, _blobinfo_uploaded_filename=filename) with files.open(result, "a") as f: f.write(content) files.finalize(result) blob_key = files.blobstore.get_blob_key(result) while not blob_key: blob_key = files.blobstore.get_blob_key(result)
def deleteOldBlobs(): blobs = BlobInfo.all().fetch(500) for blob in blobs: if blob.filename.find(SMARTHISTORY_URL) != -1: age = datetime.now() - blob.creation if age.days * 86400 + age.seconds >= SMARTHISTORY_IMAGE_CACHE_EXPIRATION: blob.delete()
def GarbageCollectBlobs(self): keys_to_blobs = {} for blob in BlobInfo.all(): keys_to_blobs[blob.key()] = blob for responder in Responder.all(): image_blob = responder.image_data if image_blob: key = image_blob.key() if key in keys_to_blobs: del keys_to_blobs[key] for product in Product.all(): image_blob = product.image_data if image_blob: key = image_blob.key() if key in keys_to_blobs: del keys_to_blobs[key] for key, blob_info in keys_to_blobs.iteritems(): logging.info('deleting %s' % key) blob_info.delete() if keys_to_blobs: return 'Deleted blobs: \n%s' % '\n'.join(str(k) for k in keys_to_blobs) else: return 'No blobs to delete'
def do_clean(cursor=None): bq = BlobInfo.all() if cursor: bq.with_cursor(cursor) blob = bq.get() if not blob: return key = str(blob.key()) thq = Thread.all(keys_only=True) thq.filter("images", key) th = thq.get() if th: logging.info("thread: %r" % th) else: logging.info("no thread for image %r" % key) blob.delete() deferred.defer(do_clean, bq.cursor(), _countdown=30)
def serve(self, filename): #TODO: Surely if this file is served from a hash-based URL then we #can return HTTP caching headers, right? info = BlobInfo.all().filter('filename = ', filename).get() if not info: return HttpResponseNotFound() # content = BlobReader(info.key()).read() return HttpResponse(content, mimetype=info.content_type)
def exportBlob(self, cursor=None, backupkey=None, ): global backupKey assert safeStringComparison(backupKey, backupkey) q = BlobInfo.all() if cursor is not None: q.with_cursor(cursor) r = [] for res in q.run(limit=5): r.append(str(res.key())) return (pickle.dumps({"cursor": str(q.cursor()), "values": r}).encode("HEX"))
def clean_blobs(self, blob_info): response = None try: existing_manifests = BlobInfo.all().filter('filename = ', blob_info.filename).filter('creation < ', blob_info.creation).fetch(limit=None) for blob in existing_manifests: blob.delete() response = True except Exception, e: logging.error(e) raise e
def exportBlob(self, cursor=None, key=None): if not self._checkKey(key, export=True): raise errors.Forbidden() q = BlobInfo.all() if cursor is not None: q.with_cursor(cursor) r = [] for res in q.run(limit=16): r.append(str(res.key())) return (pickle.dumps({ "cursor": str(q.cursor()), "values": r }).encode("HEX"))
def delete(self): blobstore.delete(self.request.get('key') or '') result = {} if self.request.get('key'): result[self.request.get('key')] = True else: for record in BlobInfo.all(): result[str(record.key())] = True s = json.dumps(result, separators=(',', ':')) redirect = self.request.get('redirect') if redirect: return self.redirect(str( redirect.replace('%s', urllib.quote(s, ''), 1) )) if 'application/json' in self.request.headers.get('Accept'): self.response.headers['Content-Type'] = 'application/json' self.response.write(s)
def get(self): if not users.get_current_user(): self.redirect('/') page_str = self.request.get('page', '1') page_num = string.atoi(page_str) # get all images image_urls = [] images = [] image_count = 0 for b in BlobInfo.all(): images.append(b) images.sort(key=lambda i:i.creation, reverse=True) for img in images: image_urls.append(get_serving_url(img.key())) image_count = image_count + 1 # calculate page if image_count % pagesize == 0: total_page = image_count / pagesize else: total_page = image_count / pagesize + 1 if page_num > total_page: page_num = total_page if page_num < 1: page_num = 1 prev_page_params = {'page':page_num - 1} next_page_params = {'page':page_num + 1} url = users.create_logout_url(self.request.uri) url_linktext = 'Logout' upload_url = blobstore.create_upload_url('/save_image') template_values = { 'user': users.get_current_user(), 'url': url, 'url_linktext': url_linktext, 'images':image_urls[(page_num - 1) * pagesize:page_num * pagesize], 'imgcount':image_count, 'total_page':total_page, 'prev_page':page_num - 1, 'next_page':page_num + 1, 'prev_page_params':urllib.urlencode(prev_page_params), 'next_page_params':urllib.urlencode(next_page_params), 'upload_url':upload_url } template = JINJA_ENVIRONMENT.get_template('/HTML/ViewImage.html') self.response.write(template.render(template_values))
def get(self): if "application/json" in self.request.headers["Accept"] or "text/javascript" in self.request.headers["Accept"]: results = [] fils = BlobInfo.all() if fils.count(): for record in fils: blob = record blob_key = str(record.key()) result = {} result['name'] = blob.filename result['type'] = blob.content_type result['size'] = blob.size result['deleteType'] = 'DELETE' result['deleteUrl'] = self.request.host_url +'/?key=' + urllib.quote(blob_key, '') if (IMAGE_TYPES.match(result['type'])): try: result['url'] = images.get_serving_url(blob_key, size=None, crop=False, secure_url=None) result['thumbnailUrl'] = result['url'] +THUMBNAIL_MODIFICATOR except: # Could not get an image serving url pass if not 'url' in result: result['url'] = self.request.host_url +\ '/' + blob_key + '/' + urllib.quote( result['name'].encode('utf-8'), '') results.append(result) result = {'files': results} s = json.dumps(result, separators=(',', ':')) redirect = self.request.get('redirect') if redirect: return self.redirect(str( redirect.replace('%s', urllib.quote(s, ''), 1) )) if 'application/json' in self.request.headers.get('Accept'): self.response.headers['Content-Type'] = 'application/json' self.response.write(s) else: template_values = { 'WEBSITE': WEBSITE, } template = JINJA_ENVIRONMENT.get_template('index.html') self.response.write(template.render(template_values))
def get(self): from product import ProductModel from video import VideoModel blobs = BlobInfo.all().fetch(500) blob_products = {} blob_videos = {} for blob in blobs: res = ProductModel.query().filter(ProductModel.imgBlobKey == blob.key()).fetch(1) if(res and len(res)>0): blob_products[blob] = res[0] else: res1 = VideoModel.query().filter(VideoModel.videoBlobKey == blob.key()).fetch(1) print "res1" print res1 if(res1 and len(res1)>0): blob_videos[blob] = res1[0] template_values = {"blobs": blobs, "blob_products" : blob_products, "blob_videos" : blob_videos, "session": self.session} template = JINJA_ENVIRONMENT.get_template('/check/blob/list.html') self.response.write(template.render(template_values))
def file_up_to_date(self, filename): result = bool(BlobInfo.all().filter('filename =', filename).count()) return result
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # import os, glob import cgi import cgitb cgitb.enable() #from config import * from google.appengine.ext.blobstore import BlobInfo blobs = BlobInfo.all() vars={'checkboxes':"",'transplantws':"/transplantdata",'survivaldatasource':"/sampledata",'genedatasource':"/genedata","jsdir":"/js","loadergif":"/images/loader.gif"} files=[] tr="" lastfile = "" namedic = {} for blob in blobs.run(): namedic[blob.filename[:28]]=True for basename in sorted(namedic.keys()): #basename = blob.filename
def browse_images(): # NOTE: consider grabbing via GPL only certain file types. # NOTE: This assumes that all blobs are images... return template('browse.html', blobstore_results = BlobInfo.all())
# Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # import os, glob import cgi import cgitb cgitb.enable() #from config import * from google.appengine.ext.blobstore import BlobInfo blobs = BlobInfo.all() vars = { 'checkboxes': "", 'transplantws': "/transplantdata", 'survivaldatasource': "/sampledata", 'genedatasource': "/genedata", "jsdir": "/js", "loadergif": "/images/loader.gif" } files = [] tr = "" lastfile = "" namedic = {} for blob in blobs.run():
def get(self): ct = datetime.datetime.fromtimestamp(time.time() - long(datetime.timedelta(hours = LIMIT_HOURS).total_seconds())) blobs = BlobInfo.all().filter("creation <", ct) for blob in blobs: blob.delete() self.response.out.write("done")