def initialize(*args): global trie, FREQ, total, min_freq, initialized if len(args)==0: dictionary = DICTIONARY else: dictionary = args[0] with DICT_LOCK: if initialized: return if trie: del trie trie = None t1 = time.time() cache_file = 'jieba.cache' default_dict = dictionary default_bucket = getattr(settings, 'STORAGE_BUCKET_NAME') bucket = Bucket(default_bucket) cache_file_content = bucket.get_object_contents(dictionary) dict_stamp = bucket.stat_object(default_dict)['timestamp'] load_from_cache_fail = True try: cache_stamp = bucket.stat_object(cache_file)['timestamp'] except: cache_exists = False else: if cache_stamp > dict_stamp: logger.debug("loading model from cache %s" % cache_file) try: cache_content = bucket.get_object_contents(cache_file) trie,FREQ,total,min_freq = marshal.loads(cache_content) load_from_cache_fail = False except: load_from_cache_fail = True if load_from_cache_fail: trie,FREQ,total = gen_trie(cache_file_content) FREQ = dict([(k,log(float(v)/total)) for k,v in FREQ.iteritems()]) #normalize min_freq = min(FREQ.itervalues()) logger.debug("dumping model to file cache %s" % cache_file) try: tmp_suffix = "."+str(random.random()) cache_file = 'dict' + tmp_suffix + '.cache' cache_file = os.path.join(tempfile.gettempdir(), cache_file) with open(cache_file,'wb') as temp_cache_file: marshal.dump((trie,FREQ,total,min_freq),temp_cache_file) if cache_exists: bucket.delete_object('jieba.cache') bucket.put_object('jieba.cache', open(cache_file, 'rb')) except: logger.error("dump cache file failed.") logger.exception("") initialized = True logger.debug("loading model cost %s seconds." % (time.time() - t1)) logger.debug("Trie has been built succesfully.")
def delThenSetToSAEStorage(filename, data): monkey.patch_all() bucket = Bucket('media') bucket.put() bucket.post( acl= '.r:.sinaapp.com,.r:sae.sina.com.cn,.r:.vipsinaapp.com,.r:.qq.com,.r:.wx.qq.com', metadata={'expires': '7d'}) tmp = __method_get_namelist(filename) if tmp == None: return None filetype = tmp[1] if filetype.lower() in ('jpg', 'jpeg', 'bmp', 'gif', 'png'): filepath = 'image/' + filename elif filetype.lower() in ("swf", "wmv"): filepath = 'video/' + filename elif filetype.lower() in ("wma", "mp3"): filepath = 'music/' + filename else: filepath = filename try: bucket.delete_object(filepath) except Exception: pass bucket.put_object(filepath, data) return bucket.generate_url(filepath)
def exportImport(filename,tp): result={} # for client debug if settings.DEBUG: data = xlrd.open_workbook(settings.MEDIA_ROOT+filename) # for sae else: bucket = Bucket('resources') obj = bucket.get_object_contents(filename) data=xlrd.open_workbook(file_contents=obj) table = data.sheets()[0] # check the column ncols=table.ncols nrows=table.nrows # for student if (tp==0 and (not ncols==11)) or (tp==1 and (not ncols==9)): result['status']='failured' result['tip']='excel列数不对' elif nrows<2: result['status']='failured' result['tip']='至少需要一条记录' else: statistic=executeImport(table,tp) result['status']='success' result['tip']='导入成功,共 %d 人,成功导入 %d 人,跳过 %d 人' \ % (statistic['sum'],statistic['count'],statistic['existed']) result['usernames']=statistic['usernames'] # delete the uploaded temp file # for client debug if settings.DEBUG: os.remove(settings.MEDIA_ROOT+filename) # for sae else: bucket.delete_object(filename) return result
def get(self, request, *args, **kwargs): from sae.deferredjob import MySQLExport, DeferredJob from sae.storage import Bucket as SBucket import time import datetime export_bucket = 'xkongbackup' bucket = SBucket(export_bucket) now = time.strftime('%Y_%m_%d_%H_%M_%S') filename = 'app_ninan_%s.zip' % now deferred_job = DeferredJob() job = MySQLExport(export_bucket, filename, 'note_note', 'backends/backupsuccess/') deferred_job.add(job) resp = {'touch': filename} # Delete all files in this bucket created a month ago a_month_ago = datetime.datetime.now() - datetime.timedelta(days=30) for object_ in bucket.list(): last_modified = object_['last_modified'] if last_modified: mtime = datetime.datetime.strptime(last_modified, '%Y-%m-%dT%H:%M:%S.%f') else: continue if object_['content_type'] is not None and mtime < a_month_ago: bucket.delete_object(object_['name']) return self.render_to_response(resp)
def delPic(request,id): if isUser(request): if request.method == "POST": try: pic_id = request.POST.get('del_pic_id','') pic = Pictures.objects.get(id=pic_id) except Exception, e: raise e if 'SERVER_SOFTWARE' in os.environ: try: from sae.storage import Bucket bucket = Bucket('media') pic_path = str(id) + '/' + pic.pic_src[pic.pic_src.rindex("/")+1:] bucket.delete_object(pic_path) pic.delete() except Exception, e: raise e else: try: os.remove(pic.pic_src[1:]) # print pic.pic_src[1:pic.pic_src.rindex("/")+1] if not os.listdir(pic.pic_src[1:pic.pic_src.rindex("/")+1]): os.rmdir(pic.pic_src[1:pic.pic_src.rindex("/")+1]) pic.delete() except Exception, e: raise e
def DeleteFileFromStorage(fileName): global domain_name try: from sae.storage import Bucket bucket = Bucket(domain_name) bucket.delete_object(fileName) Log("DeleteFileFromStorage fileName %s" % fileName) except Exception, e: Log("Error when DeleteFileFromStorage: %s" % e) return None
def delete_img(con, pid, title): if 'SERVER_SOFTWARE' in os.environ: p = get_product_detail(con, title) bucket = Bucket('domain1') for url in p.img_list: img_name = url.split("/")[-1] bucket.delete_object(img_name) print "delete bucket object", img_name sql = "delete from {0} where pid ={1}".format(IMG_TABLE, pid) execute_non_query(con, sql)
def edit0(request): pname = pcomment = pmood = newname = 0 from sae.storage import Bucket bucket = Bucket('abc') if request.POST: if request.POST.has_key('correct'): if request.GET.has_key('atitle'): pname = request.GET['atitle'] pn = t.objects.all() if (len(pn)!= 0): pname = pn[0].title for i in pn: i.delete() we = imagess.objects.filter(title = pname) if (len(we)!= 0): img = bucket.get_object_contents('stati/'+pname) im = Image.open(StringIO.StringIO(img)) imgout = StringIO.StringIO() im.save(imgout,"jpeg") img_data = imgout.getvalue() we[0].title = request.POST['cname']+'.jpg' newname = we[0].title if (newname != pname): ne = t(title = newname) ne.save() bucket.put_object('stati/'+newname, img_data) im = Image.open(StringIO.StringIO(img)) out = im.resize((128, 128)) imgout = StringIO.StringIO() out.save(imgout,"jpeg") img_data = imgout.getvalue() bucket.put_object('manage/'+newname, img_data) bucket.delete_object('manage/'+pname) bucket.delete_object('stati/'+pname) pname = newname we[0].comment = request.POST['ccomment'] we[0].mood = request.POST['cmood'] we[0].save() pname = request.POST['cname']+'.jpg' pcomment = request.POST['ccomment'] pmood = request.POST['cmood'] elif request.GET.has_key('atitle'): if (pname == 0): pname = request.GET['atitle'] p = t(title = pname) p.save() we = imagess.objects.filter(title = pname) if (len(we)!= 0): pcomment = we[0].comment pmood = we[0].mood if (pname!=0): pname = pname[:-4] return render_to_response('editt.html',{'pname':pname,'newname':newname, \ 'pmood': pmood, 'pcomment':pcomment},context_instance=RequestContext(request))
def initialize(dictionary=None): global pfdict, FREQ, total, min_freq, initialized, DICTIONARY, DICT_LOCK if not dictionary: dictionary = DICTIONARY with DICT_LOCK: if initialized: return logger.debug("Building prefix dict from %s ..." % X_CACHE_FILE) t1 = time.time() cache_file = X_CACHE_FILE if settings.DEBUG: bucket = Bucket('xkong1946') else: bucket = Bucket() dict_stamp = bucket.stat_object(dictionary)['timestamp'] cache_stamp = bucket.stat_object(cache_file)['timestamp'] load_from_cache_fail = True if cache_stamp > dict_stamp: logger.debug("Loading model from cache %s" % cache_file) try: cf = bucket.get_object_contents(cache_file) pfdict, FREQ, total = marshal.loads(cf) # prevent conflict with old version load_from_cache_fail = not isinstance(pfdict, set) except Exception, e: print e load_from_cache_fail = True if load_from_cache_fail: dict_file_content = bucket.get_object_contents(dictionary) pfdict, FREQ, total = gen_pfdict(dict_file_content) logger.debug("Dumping model to file cache %s" % cache_file) try: import StringIO fd = StringIO.StringIO() fd.write(marshal.dumps((pfdict, FREQ, total))) if bucket.stat_object(X_CACHE_FILE): bucket.delete_object(X_CACHE_FILE) bucket.put_object(X_CACHE_FILE, fd.getvalue()) except Exception: logger.exception("Dump cache file failed.") initialized = True logger.debug("Loading model cost %s seconds." % (time.time() - t1)) logger.debug("Prefix dict has been built succesfully.")
def delete_file(file_path): from urlparse import urlsplit from sae.storage import Bucket res = urlsplit(file_path) hostname, path = res.hostname, res.path bucket_name = hostname.split(".", 1)[0].split("-")[-1] _, object_name = path.split("/", 1) bucket = Bucket(bucket_name) bucket.delete_object(object_name)
def MakeBackup(): """ 定时备份文件任务 """ dbchgcounter = kv.get("kvdbchg") if dbchgcounter == None: dbchgcounter = 0 if dbchgcounter == 0: return u"数据未改变" dbchgcounter = 0 kv.set("kvdbchg",dbchgcounter) bucket = Bucket('backup') tm = datetime.now() # 删除过期文件 dellist = [] fdlist = [] fwlist = [] for finf in bucket.list(): last_modified = str(finf[u'last_modified']) last_modified = last_modified[:last_modified.index(".")]#2013-05-22T05:09:32.259140 -> 2013-05-22T05:09:32 filetime = datetime.strptime(last_modified,"%Y-%m-%dT%H:%M:%S") fname = str(finf[u"name"]) if "d.zip.data" in fname: fdlist.append((fname,tm-filetime)) else: fwlist.append((fname,tm-filetime)) if len(fdlist) > 3: sorted(fdlist,key = lambda x:x[1]) dellist = fdlist[3:] if len(fwlist) > 4: sorted(fwlist,key = lambda x:x[1]) dellist += fdlist[4:] for fname in dellist: bucket.delete_object(fname[0]) #备份新文件 filename = tm.strftime(u"%Y-%m-%d_%H_%M_%S") if tm.weekday() == 5: #周六 filename += "w.zip.data" else: filename += "d.zip.data" WriteZipFile(filename) return u"已备份"
class SaeStorageSaver: def __init__(self, key): self.bucket = Bucket(key) def StoreTxtFile(self, path, content): self.bucket.put_object(path, content) def StoreBinFile(self, path, content): self.bucket.put_object(path, content) def GetObjectByPath(self, path): return self.bucket.get_object_contents(path) def GetItemUnder(self, path): return [x for x in self.bucket.list(path)] def GetBackupList(self): return self.GetItemUnder(g_backup_path) def DeleteObject(self, obj): self.bucket.delete_object(obj)
def delete_blog(request): if request.is_ajax() and request.method == 'POST': blog_id = request.POST.get('blog_id') if blog_id is None: result = {'status': '0', 'info': '出现错误:待删除的博客ID不能为空!'} else: try: b = Blog.objects.get(id=blog_id) # 删除该博客对应的图片 import sae.storage from sae.storage import Bucket bucket = Bucket('img') img_file = b.image.url file_name = img_file[-14:] if file_name != 'default000.jpg': bucket.delete_object(file_name) b.delete() result = {'status': '1', 'info': '已成功删除该博客!'} except ObjectDoesNotExist: result = {'status': '1', 'info': '待删除的博客不存在!'} return HttpResponse(json.dumps(result), content_type='application/json') return render(request, 'blogs/index.html')
def delete_object(bucket_name, folder_path, object_name): """delete object Args: bucket_name: name of bucket folder_path: folder path object_name: name of object Returns: True if delete successfully False if object unexisted """ bucket = Bucket(bucket_name) folder_path = complete_folder(folder_path) object_path = "%s%s" % (folder_path, object_name) virtual_object_path = "/s/%s/%s" % (bucket_name, object_path) if not is_file_existed(virtual_object_path): print 'class StorageInterface delete_object() file unexisted! object_path: %s' % object_path return False bucket.delete_object(object_path) print 'class StorageInterface delete_object delete successfully!' return True
class SaeStorage(Storage): """实现存储在SAE上的Storage """ def __init__(self, bucket_name, path): self.bucket_name = bucket_name self.folder = path self._bucket = Bucket(bucket_name) self.locks = {} def __repr__(self): return "%s(%r)(%r)" % (self.__class__.__name__, self.bucket_name, self.folder) def create(self): return self def destroy(self): # Remove all files self.clean() # REMOVE locks del self.locks def create_file(self, name, **kwargs): def onclose_fn(sfile): self._bucket.put_object(self._fpath(name), sfile.file.getvalue()) f = StructFile(BytesIO(), name=name, onclose=onclose_fn) return f def open_file(self, name, **kwargs): if self._bucket.stat_object(self._fpath(name)) is None: raise NameError(name) content = self._bucket.get_object_contents(self._fpath(name)) def onclose_fn(sfile): self._bucket.put_object(self._fpath(name), sfile.file.getvalue()) return StructFile(BytesIO(content), name=name, onclose=onclose_fn) def _fpath(self, fname): return os.path.join(self.folder, fname) def clean(self): files = self.list() for fname in files: self._bucket.delete_object(self._fpath(fname)) def list(self): file_generate = self._bucket.list(path=self._fpath("")) file_names = [] for f in file_generate: file_names.append(f['name'][len(self.folder) + 1:]) return file_names def file_exists(self, name): return name in self.list() def file_modified(self, name): return self._bucket.stat_object(self._fpath(name))\ .get('last_modified', '') def file_length(self, name): return int(self._bucket.stat_object(self._fpath(name))['bytes']) def delete_file(self, name): self._bucket.delete_object(self._fpath(name)) def rename_file(self, name, newname, safe=False): if name not in self.list(): raise NameError(name) if safe and newname in self.list(): raise NameError("File %r exists" % newname) content = self._bucket.get_object_contents(self._fpath(name)) self._bucket.delete_object(self._fpath(name)) self._bucket.put_object(self._fpath(newname), content) def lock(self, name): if name not in self.locks: self.locks[name] = Lock() return self.locks[name] def temp_storage(self, name=None): name = name or "%s.tmp" % random_name() path = os.path.join(self.folder, name) tempstore = SaeStorage(self.bucket_name, path) return tempstore.create()
class SAEStorageKVDB(object): ''' A bridge for pyutils kvdb wrapper to access sae storage like a kvdb. e.g. from pyutils import KVDBWrapper from sae_storage import SAEStorageKVDB kvclient = KVDBWrapper(SAEStorageKVDB, bucket='mybucket', accesskey='xxx', secretkey='yyy', account='myapp', prefix='PRE:', ...) kvclient.set(key,value) kvclient.get(key) ''' def __init__(self, **kwargs): bucket = kwargs['bucket'] if 'bucket' in kwargs else '' accesskey = kwargs['accesskey'] if 'accesskey' in kwargs else '' secretkey = kwargs['secretkey'] if 'secretkey' in kwargs else '' account = kwargs['account'] if 'account' in kwargs else '' # app name retries = long( kwargs['retries']) if 'retries' in kwargs else 3 # app name self.prefix = kwargs['prefix'] if 'prefix' in kwargs else '' if accesskey and secretkey and account: conn = Connection(accesskey, secretkey, account, retries) self.kv = conn.get_bucket(bucket) else: self.kv = Bucket(bucket) def info(self): return self.kv.stat() def get(self, key, **kwargs): k = self.prefix + key return self.kv.get_object(k, **kwargs) def set(self, key, value, **kwargs): k = self.prefix + key return self.kv.put_object(k, value, **kwargs) def delete(self, key, **kwargs): k = self.prefix + key return self.kv.delete_object(k, **kwargs) def exist(self, key, **kwargs): k = self.prefix + key rc = False st = self.kv.stat_object(k) if st: rc = True return rc def scan(self, cursor=None, count=100, **kwargs): ''' Retrieve keys by given arguments :param kwargs: 'count' for retrieve count; 'cursor' is the key for next time retrieve :return: ''' return self.kv.c(prefix=self.prefix, marker=cursor, limit=count, **kwargs) def scanv(self, cursor=None, count=100, **kwargs): ''' Retrieve key-values by given arguments :param kwargs: 'count' for retrieve count; 'cursor' is the key for next time retrieve :return: ''' return self.kv.list(prefix=self.prefix, marker=cursor, limit=count, **kwargs) def mget(self, keys, **kwargs): for key in keys: k = self.prefix + key val = self.get(k, **kwargs) yield val
def delFile(filename): bucket = Bucket('images') bucket.delete_object(filename) return True
def saveChange(request, ID): userid = request.session.get('userid', '') if userid == '': return HttpResponseRedirect('/index') title = request.POST['title'] text = request.POST['text'] textNoHtml = re.sub('<[^>]*?>','',text) if len(textNoHtml) < 120: shortContent = textNoHtml + '......' else: shortContent = textNoHtml[0:120] + '......' #保存更新之后的文章标题,内容和概述。 passageObj = Passage.objects.get(id = int(ID)) passageObj.Title = title passageObj.ShortContent = shortContent passageObj.LongContent = text passageObj.save() #picSrcLs获取文中所有图片的路径 picSrcLs = re.findall('<img src="(.*?)">',text) #picNameLs获取文中所有图片的文件名 picNameLs = [] for pss in picSrcLs: if 'pictures'in pss: picNameLs.append(pss[49:]) else: continue #picSavedObjLs存储所有已保存的图片数据。 picSavedObjLs = Picture.objects.filter(PassageID = passageObj) #picStayLs用于保存仍然存在的图片名称。 picStayLs = [] #以下循环用于判断:图片表中有没有图片在编辑中被删除。 for picObj in picSavedObjLs: if picObj.OriginalImageName in picNameLs: picStayLs.append(picObj.OriginalImageName) continue else: bucket = Bucket('media') bucket.delete_object(picObj.OriginalImagePath.name) bucket.delete_object(picObj.CompressedImagePath.name) #os.remove(os.path.join(settings.MEDIA_ROOT, picObj.OriginalImagePath.name)) #os.remove(os.path.join(settings.MEDIA_ROOT, picObj.CompressedImagePath.name)) picObj.delete() #删除picNameLs已存在Picture表中的图片名称,剩下的图片都在PictureCache图片缓存表中。 for pic in picStayLs: picNameLs.remove(pic) for pn in picNameLs: cpobj = CachePicture.objects.get(ImageName = pn) #print 'sss',cpobj.ImagePath.name bucket = Bucket('media') im = Image.open(cStringIO.StringIO(bucket.get_object_contents(cpobj.ImagePath.name))) #im = Image.open(os.path.join(settings.MEDIA_ROOT, cpobj.ImagePath.name)) w, h = im.size if w > h: im.thumbnail((66, (66*h)//w)) else: im.thumbnail(((w*74)//h, 74)) #savepath = os.path.join(settings.MEDIA_ROOT, 'compressedpictures' ,'thumnail'+cpobj.ImageName) savepath = os.path.join('compressedpictures' ,'thumnail'+cpobj.ImageName) fm = cpobj.ImageName.split('.')[1] if fm.lower() == 'jpg': fm = 'jpeg' buf = cStringIO.StringIO() im.save(buf, fm) img_data = buf.getvalue() bucket.put_object(savepath, img_data) #im.save(savepath, fm) picObj = Picture() picObj.PassageID = passageObj picObj.OriginalImageName = pn picObj.OriginalImagePath = cpobj.ImagePath picObj.CompressedImageName = 'thumnail'+cpobj.ImageName picObj.CompressedImagePath.name = os.path.join('compressedpictures' ,'thumnail'+cpobj.ImageName) picObj.save() cpobj.delete() buf.close() #删除缓存表中的数据以及对应的图片。 username = User.objects.get(id = userid).UserName deleteCachePicLs = CachePicture.objects.filter(UserName = username) if len(deleteCachePicLs) > 0: bucket = Bucket('media') for pic in deleteCachePicLs: #os.remove(os.path.join(settings.MEDIA_ROOT, pic.ImagePath.name)) bucket.delete_object(pic.ImagePath.name) pic.delete() return HttpResponseRedirect('/passage/'+ID)
def saveWritting(request): #blog 应用中最重要的试图函数。 #包括以下主要功能:1,保存博文;2,生成缩略图;3,把缓存表的图片信息移到源图表,然后清空缓存表。 userid = request.session.get('userid', '') if userid == '': return HttpResponseRedirect('/index') title = request.POST['title'] text = request.POST['text'] textNoHtml = re.sub('<[^>]*?>','',text) #print len(textNoHtml) #print text #print textNoHtml if len(textNoHtml) < 120: shortContent = textNoHtml + '......' else: shortContent = textNoHtml[0:120] + '......' nt = datetime.now() #以下是保存博文数据到数据表中。 passageObj = Passage() writerObj = User.objects.get(id = userid) passageObj.UserID = writerObj passageObj.Title = title passageObj.Time = nt passageObj.ShortContent = shortContent passageObj.LongContent = text passageObj.save() #以下是把所有缓存表的图片去处移到源图表,并生成压缩图。 picSrcLs = re.findall('<img src="(.*?)">',text) picNameLs = [] for pss in picSrcLs: if 'pictures'in pss: picNameLs.append(pss[49:]) else: continue #此变量用于保存文章的ID ID = 0 passageObj = Passage.objects.get(UserID = writerObj, Time = nt) ID = passageObj.id for pn in picNameLs: cpobj = CachePicture.objects.get(ImageName = pn) #print 'sss',cpobj.ImagePath.name bucket = Bucket('media') im = Image.open(cStringIO.StringIO(bucket.get_object_contents(cpobj.ImagePath.name))) #im = Image.open(os.path.join(settings.MEDIA_ROOT, cpobj.ImagePath.name)) w, h = im.size if w > h: im.thumbnail((66, (66*h)//w)) else: im.thumbnail(((w*74)//h, 74)) #savepath = os.path.join(settings.MEDIA_ROOT, 'compressedpictures' ,'thumnail'+cpobj.ImageName) savepath = os.path.join('compressedpictures' ,'thumnail'+cpobj.ImageName) fm = cpobj.ImageName.split('.')[1] if fm.lower() == 'jpg': fm = 'jpeg' buf = cStringIO.StringIO() im.save(buf, fm) img_data = buf.getvalue() bucket.put_object(savepath, img_data) #im.save(savepath, fm) picObj = Picture() picObj.PassageID = passageObj picObj.OriginalImageName = pn picObj.OriginalImagePath = cpobj.ImagePath picObj.CompressedImageName = 'thumnail'+cpobj.ImageName picObj.CompressedImagePath.name = os.path.join('compressedpictures' ,'thumnail'+cpobj.ImageName) picObj.save() cpobj.delete() buf.close() #删除缓存表中的数据以及对应的图片。 deleteCachePicLs = CachePicture.objects.filter(UserName = writerObj.UserName) if len(deleteCachePicLs) > 0: bucket = Bucket('media') for pic in deleteCachePicLs: #os.remove(os.path.join(settings.MEDIA_ROOT, pic.ImagePath.name)) bucket.delete_object(pic.ImagePath.name) pic.delete() #以下到return代码前的代码用于文章数量增加1 dataCountObjLs = DataCount.objects.all() if len(dataCountObjLs) == 0: dataCountObj = DataCount() dataCountObj.PassageCount = 1 dataCountObj.save() else: dataCountObj = dataCountObjLs[0] dataCountObj.PassageCount += 1 print dataCountObj.PassageCount dataCountObj.save() #return HttpResponseRedirect('/index') return HttpResponseRedirect('/passage/'+ str(ID))
def manage(request): from sae.storage import Bucket bucket = Bucket('abc') sa = t.objects.all() for x in sa: x.delete() if request.GET: #delete if request.GET.has_key('ctitle'): name = request.GET["ctitle"] bucket.delete_object('manage/'+name) bucket.delete_object('stati/'+name) ta = imagess.objects.filter(title = name) if (len(ta)!= 0): for i in ta: i.delete() if request.GET.has_key('stitle'): #save beautify name = request.GET["stitle"] if (name != '0'): new_name = name[2:] new_comment = '..' new_mood = '..' ta = imagess.objects.filter(title = new_name) if (len(ta)!= 0): new_comment = ta[0].comment new_mood = ta[0].mood new_lat = ta[0].lat new_lon = ta[0].lon new_photo = imagess(picture = 0, comment = new_comment, mood = new_mood, \ title = name, lat = new_lat, lon = new_lon) new_photo.save() obj = bucket.get_object_contents('meihua/'+name) im = Image.open(StringIO.StringIO(obj)) imgout = StringIO.StringIO() im.save(imgout,"jpeg") img_data = imgout.getvalue() bucket.put_object('stati/'+name, img_data) im = Image.open(StringIO.StringIO(obj)) out = im.resize((128, 128)) imgout = StringIO.StringIO() out.save(imgout,"jpeg") img_data = imgout.getvalue() bucket.put_object('manage/'+name, img_data) #陈列部分 A = [] a = bucket.list(path='manage/') for i in a: dic = [] s = i.name.split('/')[-1] dic.append(s) dic.append(i.last_modified) ta = imagess.objects.filter(title = s) if (len(ta)!= 0): dic.append(ta[0].mood) dic.append(ta[0].comment) A.append(dic) if request.GET: if request.GET.has_key('search'):#search if request.GET['writesearch'] != '': A=[] wcomment = request.GET['writesearch'] result = imagess.objects.filter(comment = wcomment) for i in range(0, len(result)): a = bucket.stat_object('manage/'+result[i].title) dic = [] dic.append(result[i].title) dic.append(a.last_modified) dic.append(result[i].mood) dic.append(result[i].comment) A.append(dic) return render_to_response('manage.html',{'A':A },\ context_instance=RequestContext(request))
class SAEStorage(LocalStorage): def __init__(self, bucket): from sae.storage import Bucket self.bucket = Bucket(bucket) bucket_stat = self.bucket.stat() #self.last_mark = bucket_stat.objects + \ # bucket_stat.bytes self.last_mark = 0 def list(self): articles = self.bucket.list() filter_func = lambda x : self.is_article(x.name) articles = filter(filter_func, articles) articles = self._sorted_files(articles) rst = [] for article in articles: article_name = article.name content = self.bucket.get_object_contents(article_name) content = content.decode('utf-8') art_meta = self._get_metadatas(content) art_meta['filename'] = article_name if type(article.name) == unicode: adjust_name = article_name.encode('utf-8') else : adjust_name = article_name art_meta['filename_url_encode'] = \ base64.urlsafe_b64encode(adjust_name) if not art_meta['date']: art_meta['date'] = article.last_modified if not art_meta["slug"]: art_meta['slug'] = article_name.rpartition(".")[0] art_meta['slug'] = art_meta['slug'].replace("_", " ") rst.append(art_meta) return rst def get(self, article, cut = -1): content = self.bucket.get_object_contents(article) content = content.decode('utf-8') content = unicode(content) mdparse = MDParse() if cut != -1: content = content[:cut] content += "\n....." content = self._clean_metadatas(content) return mdparse.parse(content) def save(self, name, content): self.bucket.put_object(name, content) def delete(self, name): self.bucket.delete_object(name) def update_time(self, article): stat = self.bucket.stat_object(article) tmp = float(stat.timestamp) d = datetime.datetime.fromtimestamp(tmp) return d.strftime("%Y-%m-%d %H:%M:%S") def has_last(self): bucket_stat = self.bucket.stat() curr_mark = bucket_stat.objects + bucket_stat.bytes res = self.last_mark == curr_mark self.last_mark = curr_mark return not res def _sorted_files(self, articles): def key_func(x): stat = self.bucket.stat_object(x.name) return float(stat.timestamp) return sorted(articles, key=key_func, reverse=True)
def beauti(request): r = g = b = s = 0 from sae.storage import Bucket bucket = Bucket("abc") aa = bucket.list(path="meihua/") for ii in aa: bucket.delete_object(ii.name) if request.GET: if request.GET.has_key("btitle"): b = request.GET["btitle"] if request.GET.has_key("ptitle"): g = request.GET["ptitle"] if request.POST: if request.POST.has_key("lvjing"): if request.FILES or request.GET.has_key("btitle"): if request.FILES: f = request.FILES["file"] bucket.put_object("lvjing/" + f.name, f) img = bucket.get_object_contents("lvjing/" + f.name) s = "lv" + str(f) elif request.GET.has_key("btitle"): name = request.GET["btitle"] img = bucket.get_object_contents("stati/" + name) s = "lv" + name im = Image.open(StringIO.StringIO(img)) im.getdata() out = im.split() imgout = StringIO.StringIO() out[0].save(imgout, "jpeg") img_data = imgout.getvalue() bucket.put_object("meihua/" + s, img_data) if request.POST.has_key("suotu"): if request.FILES or request.GET.has_key("btitle"): if request.FILES: f = request.FILES["file"] bucket.put_object("suotu/" + f.name, f) img = bucket.get_object_contents("suotu/" + f.name) s = "su" + str(f) elif request.GET.has_key("btitle"): name = request.GET["btitle"] img = bucket.get_object_contents("stati/" + name) s = "su" + name im = Image.open(StringIO.StringIO(img)) out = im.resize((128, 128)) imgout = StringIO.StringIO() out.save(imgout, "jpeg") img_data = imgout.getvalue() bucket.put_object("meihua/" + s, img_data) if request.POST.has_key("xuanzhuan"): if request.FILES or request.GET.has_key("btitle"): if request.POST["dushu"] != "": dushu = request.POST["dushu"] i = string.atoi(dushu) else: i = 0 if request.FILES: f = request.FILES["file"] bucket.put_object("xuanzhuan/" + f.name, f) img = bucket.get_object_contents("xuanzhuan/" + f.name) s = "xu" + str(f) else: name = request.GET["btitle"] img = bucket.get_object_contents("stati/" + name) s = "xu" + name im = Image.open(StringIO.StringIO(img)) out = im.rotate(i) imgout = StringIO.StringIO() out.save(imgout, "jpeg") img_data = imgout.getvalue() bucket.put_object("meihua/" + s, img_data) if request.POST.has_key("huidu1"): if request.FILES or request.GET.has_key("btitle"): if request.FILES: f = request.FILES["file"] bucket.put_object("huidu1/" + f.name, f) img = bucket.get_object_contents("huidu1/" + f.name) s = "h1" + str(f) else: name = request.GET["btitle"] img = bucket.get_object_contents("stati/" + name) s = "h1" + name im = Image.open(StringIO.StringIO(img)) im.getdata() out = im.split() if len(out) > 0: imgout = StringIO.StringIO() out[0].save(imgout, "jpeg") else: im.save(imgout, "jpeg") img_data = imgout.getvalue() bucket.put_object("meihua/" + s, img_data) if request.POST.has_key("huidu2"): if request.FILES or request.GET.has_key("btitle"): if request.FILES: f = request.FILES["file"] bucket.put_object("huidu2/" + f.name, f) img = bucket.get_object_contents("huidu2/" + f.name) s = "h2" + str(f) elif request.GET.has_key("btitle"): name = request.GET["btitle"] img = bucket.get_object_contents("stati/" + name) s = "h2" + name im = Image.open(StringIO.StringIO(img)) im.getdata() out = im.split() imgout = StringIO.StringIO() if len(out) > 1: out[1].save(imgout, "jpeg") else: im.save(imgout, "jpeg") img_data = imgout.getvalue() bucket.put_object("meihua/" + s, img_data) if request.POST.has_key("huidu3"): if request.FILES or request.GET.has_key("btitle"): if request.FILES: f = request.FILES["file"] bucket.put_object("huidu3/" + f.name, f) img = bucket.get_object_contents("huidu3/" + f.name) s = "h3" + str(f) elif request.GET.has_key("btitle"): name = request.GET["btitle"] img = bucket.get_object_contents("stati/" + name) s = "h3" + name im = Image.open(StringIO.StringIO(img)) im.getdata() out = im.split() imgout = StringIO.StringIO() if len(out) > 2: out[2].save(imgout, "jpeg") else: im.save(imgout, "jpeg") img_data = imgout.getvalue() bucket.put_object("meihua/" + s, img_data) return render_to_response("pilbeau.html", {"r": s, "s": b, "g": g}, context_instance=RequestContext(request))
def saveChange(request, ID): userid = request.session.get('userid', '') if userid == '': return HttpResponseRedirect('/index') title = request.POST['title'] text = request.POST['text'] textNoHtml = re.sub('<[^>]*?>', '', text) if len(textNoHtml) < 120: shortContent = textNoHtml + '......' else: shortContent = textNoHtml[0:120] + '......' #保存更新之后的文章标题,内容和概述。 passageObj = Passage.objects.get(id=int(ID)) passageObj.Title = title passageObj.ShortContent = shortContent passageObj.LongContent = text passageObj.save() #picSrcLs获取文中所有图片的路径 picSrcLs = re.findall('<img src="(.*?)">', text) #picNameLs获取文中所有图片的文件名 picNameLs = [] for pss in picSrcLs: if 'pictures' in pss: picNameLs.append(pss[49:]) else: continue #picSavedObjLs存储所有已保存的图片数据。 picSavedObjLs = Picture.objects.filter(PassageID=passageObj) #picStayLs用于保存仍然存在的图片名称。 picStayLs = [] #以下循环用于判断:图片表中有没有图片在编辑中被删除。 for picObj in picSavedObjLs: if picObj.OriginalImageName in picNameLs: picStayLs.append(picObj.OriginalImageName) continue else: bucket = Bucket('media') bucket.delete_object(picObj.OriginalImagePath.name) bucket.delete_object(picObj.CompressedImagePath.name) #os.remove(os.path.join(settings.MEDIA_ROOT, picObj.OriginalImagePath.name)) #os.remove(os.path.join(settings.MEDIA_ROOT, picObj.CompressedImagePath.name)) picObj.delete() #删除picNameLs已存在Picture表中的图片名称,剩下的图片都在PictureCache图片缓存表中。 for pic in picStayLs: picNameLs.remove(pic) for pn in picNameLs: cpobj = CachePicture.objects.get(ImageName=pn) #print 'sss',cpobj.ImagePath.name bucket = Bucket('media') im = Image.open( cStringIO.StringIO(bucket.get_object_contents( cpobj.ImagePath.name))) #im = Image.open(os.path.join(settings.MEDIA_ROOT, cpobj.ImagePath.name)) w, h = im.size if w > h: im.thumbnail((66, (66 * h) // w)) else: im.thumbnail(((w * 74) // h, 74)) #savepath = os.path.join(settings.MEDIA_ROOT, 'compressedpictures' ,'thumnail'+cpobj.ImageName) savepath = os.path.join('compressedpictures', 'thumnail' + cpobj.ImageName) fm = cpobj.ImageName.split('.')[1] if fm.lower() == 'jpg': fm = 'jpeg' buf = cStringIO.StringIO() im.save(buf, fm) img_data = buf.getvalue() bucket.put_object(savepath, img_data) #im.save(savepath, fm) picObj = Picture() picObj.PassageID = passageObj picObj.OriginalImageName = pn picObj.OriginalImagePath = cpobj.ImagePath picObj.CompressedImageName = 'thumnail' + cpobj.ImageName picObj.CompressedImagePath.name = os.path.join( 'compressedpictures', 'thumnail' + cpobj.ImageName) picObj.save() cpobj.delete() buf.close() #删除缓存表中的数据以及对应的图片。 username = User.objects.get(id=userid).UserName deleteCachePicLs = CachePicture.objects.filter(UserName=username) if len(deleteCachePicLs) > 0: bucket = Bucket('media') for pic in deleteCachePicLs: #os.remove(os.path.join(settings.MEDIA_ROOT, pic.ImagePath.name)) bucket.delete_object(pic.ImagePath.name) pic.delete() return HttpResponseRedirect('/passage/' + ID)
def saveWritting(request): #blog 应用中最重要的试图函数。 #包括以下主要功能:1,保存博文;2,生成缩略图;3,把缓存表的图片信息移到源图表,然后清空缓存表。 userid = request.session.get('userid', '') if userid == '': return HttpResponseRedirect('/index') title = request.POST['title'] text = request.POST['text'] textNoHtml = re.sub('<[^>]*?>', '', text) #print len(textNoHtml) #print text #print textNoHtml if len(textNoHtml) < 120: shortContent = textNoHtml + '......' else: shortContent = textNoHtml[0:120] + '......' nt = datetime.now() #以下是保存博文数据到数据表中。 passageObj = Passage() writerObj = User.objects.get(id=userid) passageObj.UserID = writerObj passageObj.Title = title passageObj.Time = nt passageObj.ShortContent = shortContent passageObj.LongContent = text passageObj.save() #以下是把所有缓存表的图片去处移到源图表,并生成压缩图。 picSrcLs = re.findall('<img src="(.*?)">', text) picNameLs = [] for pss in picSrcLs: if 'pictures' in pss: picNameLs.append(pss[49:]) else: continue #此变量用于保存文章的ID ID = 0 passageObj = Passage.objects.get(UserID=writerObj, Time=nt) ID = passageObj.id for pn in picNameLs: cpobj = CachePicture.objects.get(ImageName=pn) #print 'sss',cpobj.ImagePath.name bucket = Bucket('media') im = Image.open( cStringIO.StringIO(bucket.get_object_contents( cpobj.ImagePath.name))) #im = Image.open(os.path.join(settings.MEDIA_ROOT, cpobj.ImagePath.name)) w, h = im.size if w > h: im.thumbnail((66, (66 * h) // w)) else: im.thumbnail(((w * 74) // h, 74)) #savepath = os.path.join(settings.MEDIA_ROOT, 'compressedpictures' ,'thumnail'+cpobj.ImageName) savepath = os.path.join('compressedpictures', 'thumnail' + cpobj.ImageName) fm = cpobj.ImageName.split('.')[1] if fm.lower() == 'jpg': fm = 'jpeg' buf = cStringIO.StringIO() im.save(buf, fm) img_data = buf.getvalue() bucket.put_object(savepath, img_data) #im.save(savepath, fm) picObj = Picture() picObj.PassageID = passageObj picObj.OriginalImageName = pn picObj.OriginalImagePath = cpobj.ImagePath picObj.CompressedImageName = 'thumnail' + cpobj.ImageName picObj.CompressedImagePath.name = os.path.join( 'compressedpictures', 'thumnail' + cpobj.ImageName) picObj.save() cpobj.delete() buf.close() #删除缓存表中的数据以及对应的图片。 deleteCachePicLs = CachePicture.objects.filter(UserName=writerObj.UserName) if len(deleteCachePicLs) > 0: bucket = Bucket('media') for pic in deleteCachePicLs: #os.remove(os.path.join(settings.MEDIA_ROOT, pic.ImagePath.name)) bucket.delete_object(pic.ImagePath.name) pic.delete() #以下到return代码前的代码用于文章数量增加1 dataCountObjLs = DataCount.objects.all() if len(dataCountObjLs) == 0: dataCountObj = DataCount() dataCountObj.PassageCount = 1 dataCountObj.save() else: dataCountObj = dataCountObjLs[0] dataCountObj.PassageCount += 1 print dataCountObj.PassageCount dataCountObj.save() #return HttpResponseRedirect('/index') return HttpResponseRedirect('/passage/' + str(ID))
class SaeStorage(Storage): """实现存储在SAE上的Storage """ def __init__(self, bucket_name, path): self.bucket_name = bucket_name self.folder = path self._bucket = Bucket(bucket_name) self.locks = {} def __repr__(self): return "%s(%r)(%r)" % (self.__class__.__name__, self.bucket_name, self.folder) def create(self): return self def destroy(self): # Remove all files self.clean() # REMOVE locks del self.locks def create_file(self, name, **kwargs): def onclose_fn(sfile): self._bucket.put_object(self._fpath(name), sfile.file.getvalue()) f = StructFile(BytesIO(), name=name, onclose=onclose_fn) return f def open_file(self, name, **kwargs): if self._bucket.stat_object(self._fpath(name)) is None: raise NameError(name) content = self._bucket.get_object_contents(self._fpath(name)) def onclose_fn(sfile): new_content = sfile.file.getvalue() if new_content != content: self._bucket.put_object(self._fpath(name), new_content) return StructFile(BytesIO(content), name=name, onclose=onclose_fn) def _fpath(self, fname): return os.path.join(self.folder, fname) def clean(self): files = self.list() for fname in files: self._bucket.delete_object(self._fpath(fname)) def list(self): file_generate = self._bucket.list(path=self._fpath("")) file_names = [] for f in file_generate: file_names.append(f['name'][len(self.folder)+1:]) return file_names def file_exists(self, name): return name in self.list() def file_modified(self, name): return self._bucket.stat_object(self._fpath(name))\ .get('last_modified', '') def file_length(self, name): return int(self._bucket.stat_object(self._fpath(name))['bytes']) def delete_file(self, name): self._bucket.delete_object(self._fpath(name)) def rename_file(self, name, newname, safe=False): name_list = self.list() if name not in name_list: raise NameError(name) if safe and newname in name_list: raise NameError("File %r exists" % newname) content = self._bucket.get_object_contents(self._fpath(name)) self._bucket.delete_object(self._fpath(name)) self._bucket.put_object(self._fpath(newname), content) def lock(self, name): if name not in self.locks: self.locks[name] = Lock() return self.locks[name] def temp_storage(self, name=None): temp_store = RamStorage() return temp_store.create()
def write_storage(content): bucket = Bucket(BUCKET_NAME) bucket.delete_object(TARGET_FILE) bucket.put_object(TARGET_FILE, content)