def initialize(*args): global trie, FREQ, total, min_freq, initialized if len(args)==0: dictionary = DICTIONARY else: dictionary = args[0] with DICT_LOCK: if initialized: return if trie: del trie trie = None t1 = time.time() cache_file = 'jieba.cache' default_dict = dictionary default_bucket = getattr(settings, 'STORAGE_BUCKET_NAME') bucket = Bucket(default_bucket) cache_file_content = bucket.get_object_contents(dictionary) dict_stamp = bucket.stat_object(default_dict)['timestamp'] load_from_cache_fail = True try: cache_stamp = bucket.stat_object(cache_file)['timestamp'] except: cache_exists = False else: if cache_stamp > dict_stamp: logger.debug("loading model from cache %s" % cache_file) try: cache_content = bucket.get_object_contents(cache_file) trie,FREQ,total,min_freq = marshal.loads(cache_content) load_from_cache_fail = False except: load_from_cache_fail = True if load_from_cache_fail: trie,FREQ,total = gen_trie(cache_file_content) FREQ = dict([(k,log(float(v)/total)) for k,v in FREQ.iteritems()]) #normalize min_freq = min(FREQ.itervalues()) logger.debug("dumping model to file cache %s" % cache_file) try: tmp_suffix = "."+str(random.random()) cache_file = 'dict' + tmp_suffix + '.cache' cache_file = os.path.join(tempfile.gettempdir(), cache_file) with open(cache_file,'wb') as temp_cache_file: marshal.dump((trie,FREQ,total,min_freq),temp_cache_file) if cache_exists: bucket.delete_object('jieba.cache') bucket.put_object('jieba.cache', open(cache_file, 'rb')) except: logger.error("dump cache file failed.") logger.exception("") initialized = True logger.debug("loading model cost %s seconds." % (time.time() - t1)) logger.debug("Trie has been built succesfully.")
def initialize(dictionary=None): global pfdict, FREQ, total, min_freq, initialized, DICTIONARY, DICT_LOCK if not dictionary: dictionary = DICTIONARY with DICT_LOCK: if initialized: return logger.debug("Building prefix dict from %s ..." % X_CACHE_FILE) t1 = time.time() cache_file = X_CACHE_FILE if settings.DEBUG: bucket = Bucket('xkong1946') else: bucket = Bucket() dict_stamp = bucket.stat_object(dictionary)['timestamp'] cache_stamp = bucket.stat_object(cache_file)['timestamp'] load_from_cache_fail = True if cache_stamp > dict_stamp: logger.debug("Loading model from cache %s" % cache_file) try: cf = bucket.get_object_contents(cache_file) pfdict, FREQ, total = marshal.loads(cf) # prevent conflict with old version load_from_cache_fail = not isinstance(pfdict, set) except Exception, e: print e load_from_cache_fail = True if load_from_cache_fail: dict_file_content = bucket.get_object_contents(dictionary) pfdict, FREQ, total = gen_pfdict(dict_file_content) logger.debug("Dumping model to file cache %s" % cache_file) try: import StringIO fd = StringIO.StringIO() fd.write(marshal.dumps((pfdict, FREQ, total))) if bucket.stat_object(X_CACHE_FILE): bucket.delete_object(X_CACHE_FILE) bucket.put_object(X_CACHE_FILE, fd.getvalue()) except Exception: logger.exception("Dump cache file failed.") initialized = True logger.debug("Loading model cost %s seconds." % (time.time() - t1)) logger.debug("Prefix dict has been built succesfully.")
def get(self, sub_path): from sae.storage import Bucket bucket = Bucket('oerp') # 从云平台拿到一个Bucket容器 #imagebinary = meta['body'] response = bucket.get_object_contents(sub_path, chunk_size=10) # 取文件 bucket.get_object_contents(u'oerp', r'/uploadimg/' + sub_path) self.set_header('Content-Type', 'text/xml; charset=utf-8') self.write(response.next())
def inPage(self, site, filename): try: conn = Connection(accesskey='ym51nzx10z', secretkey='h0kxmzj2ly13jjj1m0jjly41li1wimizzz2w2m32', retries=3) bucket = Bucket(site, conn) page = bucket.get_object_contents(filename) except Exception, e: print e
def inPageAmazon(self, params): try: conn = Connection(accesskey='ym51nzx10z', secretkey='h0kxmzj2ly13jjj1m0jjly41li1wimizzz2w2m32', retries=3) bucket = Bucket('amazon', conn) page = bucket.get_object_contents(params['category'] + '/' + params['in_page']) except Exception, e: print e
def exportImport(filename,tp): result={} # for client debug if settings.DEBUG: data = xlrd.open_workbook(settings.MEDIA_ROOT+filename) # for sae else: bucket = Bucket('resources') obj = bucket.get_object_contents(filename) data=xlrd.open_workbook(file_contents=obj) table = data.sheets()[0] # check the column ncols=table.ncols nrows=table.nrows # for student if (tp==0 and (not ncols==11)) or (tp==1 and (not ncols==9)): result['status']='failured' result['tip']='excel列数不对' elif nrows<2: result['status']='failured' result['tip']='至少需要一条记录' else: statistic=executeImport(table,tp) result['status']='success' result['tip']='导入成功,共 %d 人,成功导入 %d 人,跳过 %d 人' \ % (statistic['sum'],statistic['count'],statistic['existed']) result['usernames']=statistic['usernames'] # delete the uploaded temp file # for client debug if settings.DEBUG: os.remove(settings.MEDIA_ROOT+filename) # for sae else: bucket.delete_object(filename) return result
def ReadZipFile(filename): """ 从storage中读取数据,还原到kvdb中 参数 filename 要还原数据的文件名 """ bucket = Bucket('backup') # print(filename) CryptData = bucket.get_object_contents(filename) # print(CryptData) # -FileBuffer.close() key = config.keyDataBackUp cipher = AES.new(key, AES.MODE_ECB) iv = cipher.decrypt(CryptData[:16]) # print(str(iv)) cipher = AES.new(key, AES.MODE_CBC, iv) bytebuffer = cipher.decrypt(CryptData[16:]) lendata = ord(bytebuffer[-1]) FileBuffer = io.BytesIO(bytebuffer[:-lendata]) zfile = zipfile.ZipFile(FileBuffer,mode='r') namelist = zfile.namelist() for name in namelist: bytedata = zfile.read(name) kv.set(name,bytedata.decode("utf-8")) return u"数据已还原"
def get(self, request, *args, **kwargs): from utils.kvdb.bucket import Bucket as SBucket filekey = kwargs.get('filekey') bucket = SBucket() contents = bucket.get_object_contents(filekey) r = HttpResponse(contents) r['Content-Disposition'] = 'attachment; filename={}'.format(filekey) return r
def edit0(request): pname = pcomment = pmood = newname = 0 from sae.storage import Bucket bucket = Bucket('abc') if request.POST: if request.POST.has_key('correct'): if request.GET.has_key('atitle'): pname = request.GET['atitle'] pn = t.objects.all() if (len(pn)!= 0): pname = pn[0].title for i in pn: i.delete() we = imagess.objects.filter(title = pname) if (len(we)!= 0): img = bucket.get_object_contents('stati/'+pname) im = Image.open(StringIO.StringIO(img)) imgout = StringIO.StringIO() im.save(imgout,"jpeg") img_data = imgout.getvalue() we[0].title = request.POST['cname']+'.jpg' newname = we[0].title if (newname != pname): ne = t(title = newname) ne.save() bucket.put_object('stati/'+newname, img_data) im = Image.open(StringIO.StringIO(img)) out = im.resize((128, 128)) imgout = StringIO.StringIO() out.save(imgout,"jpeg") img_data = imgout.getvalue() bucket.put_object('manage/'+newname, img_data) bucket.delete_object('manage/'+pname) bucket.delete_object('stati/'+pname) pname = newname we[0].comment = request.POST['ccomment'] we[0].mood = request.POST['cmood'] we[0].save() pname = request.POST['cname']+'.jpg' pcomment = request.POST['ccomment'] pmood = request.POST['cmood'] elif request.GET.has_key('atitle'): if (pname == 0): pname = request.GET['atitle'] p = t(title = pname) p.save() we = imagess.objects.filter(title = pname) if (len(we)!= 0): pcomment = we[0].comment pmood = we[0].mood if (pname!=0): pname = pname[:-4] return render_to_response('editt.html',{'pname':pname,'newname':newname, \ 'pmood': pmood, 'pcomment':pcomment},context_instance=RequestContext(request))
def STT(self, filePath): # 输入语音文件(amr格式),输出文字 try: bucket = Bucket('wechat') result = self.__client.asr(bucket.get_object_contents(filePath), self.__format, self.__rate, {'cuid': self.__cuid, 'dev_pid': self.__dev_pid}) if 'error_code' in result: # 识别失败 raise Exception("error_code: %s, error_msg: %s" % (result['error_code'], result['error_msg'])) return result['result'][0] # 提供1-5个候选结果,utf-8 编码 except Exception, ex: print 'BaiduSTT.STT error:%s' % (ex) return ''
def thumbnail(request,filename): bucket = Bucket('upload') bucket.put() bucket.put_object("image/"+filename, request.FILES['file']) obj = bucket.get_object_contents("image/"+filename) image = Image.open(StringIO(obj)) image.thumbnail((160,120),Image.ANTIALIAS) imgOut = StringIO() image.save(imgOut, 'jpeg') img_data = imgOut.getvalue() bucket.put_object('thumbnail/'+filename, img_data) imgOut.close()
def thumbnail(request, filename): bucket = Bucket('upload') bucket.put() bucket.put_object("image/" + filename, request.FILES['file']) obj = bucket.get_object_contents("image/" + filename) image = Image.open(StringIO(obj)) image.thumbnail((160, 120), Image.ANTIALIAS) imgOut = StringIO() image.save(imgOut, 'jpeg') img_data = imgOut.getvalue() bucket.put_object('thumbnail/' + filename, img_data) imgOut.close()
def get(self, request, *args, **kwargs): bucket_name = getattr(settings, "SAE_STORAGE_BUCKET_NAME", 'xkong1946') sae_bucket = SaeBucket(bucket_name) kv_bucket = Bucket() files = ['dict.txt', 'jieba.cache'] ret = [] for file_ in files: contents = sae_bucket.get_object_contents(file_) kv_bucket.save(file_, contents) ret.append(file_) # or return json return HttpResponse("done")
def cron_task(): BUCKET = 'citylist' # read citylist bucket = Bucket(BUCKET) citylist_content = bucket.get_object_contents('cities.json') cities = json.loads(citylist_content) # datepuller data_puller = DataPuller() # mysql mysql = Mysql() for city in cities: value = data_puller.pull_data(int(city['cid'])) if value: mysql.insert_data(city['city'], int(value))
def generate_city_list(): values = [] #read citylist BUCKET = 'citylist' # read citylist bucket = Bucket(BUCKET) citylist_content = bucket.get_object_contents('cities.json') citylist = json.loads(citylist_content) values.append('<ul>') for city in citylist: line = '''<li><a href="%s/%s">%s</li>''' % ('http://aqidatapuller.applinzi.com', city['city'].lower(), city['city']) values.append(line) values.append('</ul>') return ''.join(values)
def get(self, request, *args, **kwargs): # Uncomment the following two lines to allow initial. done = {'done': '0', 'ret': '0'} # return self.render_to_response(done) bucket_name = getattr(settings, "SAE_STORAGE_BUCKET_NAME", 'xkong1946') sae_bucket = SaeBucket(bucket_name) kv_bucket = Bucket() files = ['dict.txt', 'jieba.cache'] ret = [] for file_ in files: contents = sae_bucket.get_object_contents(file_) kv_bucket.save(file_, contents) ret.append(file_) done = {'done': '0', 'ret': ret} return self.render_to_response(done)
def ImageClassify(self, filePath): try: """ 读取图片 """ bucket = Bucket('wechat') image = bucket.get_object_contents(filePath) """ 调用通用物体识别 """ self.__client.advancedGeneral(image) """ 如果有可选参数 """ options = {} options["baike_num"] = 0 """ 带参数调用通用物体识别 """ info = self.__client.advancedGeneral(image, options) #print info if 'error_code' in info: # 识别失败 raise Exception("error_code: %s, error_msg: %s" % (info['error_code'], info['error_msg'])) print "image:%s" % (filePath) for item in info['result']: print 'root:%s,keyword:%s' % (item['root'], item['keyword']) except Exception, ex: print 'BaiduImageClassify.ImageClassify error:%s' % (ex)
class SaeStorageSaver: def __init__(self, key): self.bucket = Bucket(key) def StoreTxtFile(self, path, content): self.bucket.put_object(path, content) def StoreBinFile(self, path, content): self.bucket.put_object(path, content) def GetObjectByPath(self, path): return self.bucket.get_object_contents(path) def GetItemUnder(self, path): return [x for x in self.bucket.list(path)] def GetBackupList(self): return self.GetItemUnder(g_backup_path) def DeleteObject(self, obj): self.bucket.delete_object(obj)
def start(request): url = 0 from sae.storage import Bucket bucket = Bucket('abc') if request.POST: if request.POST.has_key('save'): post = request.POST if request.FILES: f = request.FILES['file'] new_img = imagess(picture = f, comment = post['writecomment'], mood = post['writemood'],\ title = str(f),lat = 0,lon = 0) new_img.save() img = bucket.get_object_contents('stati/'+f.name) im = Image.open(StringIO.StringIO(img)) out = im.resize((128, 128)) imgout = StringIO.StringIO() out.save(imgout,"jpeg") img_data = imgout.getvalue() bucket.put_object('manage/'+f.name, img_data) exif = get_exif_data(img) if exif.has_key('GPSInfo'): w1 = exif['GPSInfo'][2][0][0] w2 = exif['GPSInfo'][2][1][0] w3 = exif['GPSInfo'][2][2][0]*1.0/100 lat = w1+w2*1.0/60 + w3*1.0/60*1.0/60 j1 = exif['GPSInfo'][4][0][0] j2 = exif['GPSInfo'][4][1][0] j3 = exif['GPSInfo'][4][2][0]*1.0/100 lon = j1+j2*1.0/60 + j3*1.0/60*1.0/60 new = imagess.objects.filter(title = f.name) if (len(new) != 0): new[0].lat = lat new[0].lon = lon new[0].save() return render_to_response('start.html',\ context_instance=RequestContext(request))
def lottery_confirmed(request): try: micro_id64 = request.POST['micro_id'] micro_id = int(base64.b64decode(micro_id64)) hash_id = hashlib.sha1(str(micro_id)).hexdigest() try: winner = Lottery.objects.get(hash_id=hash_id) if request.POST['phone_number']: #check phone number format phone_number = request.POST['phone_number'] if check_input(str(phone_number), 'phoneNumber'): if winner.phone_number != None: return HttpResponse("该奖品已经有申奖号码了") winner.phone_number = phone_number winner.save() ##update lottery_remain_information debug = not environ.get("APP_NAME", "") # #read present file if debug: jsonfile = open(JSON_FILE_PATH) data = json.load(jsonfile) else: bucket = Bucket("nkumstc") data = json.loads( bucket.get_object_contents( JSON_SERVER_OBJECT_NAME)) special_read = data["special"] first_read = data["first"] second_read = data["second"] third_read = data["third"] fourth_read = data["fourth"] fifth_read = data["fifth"] rate = data["rate"] #update if winner.present_level == "special": special_read = special_read - 1 elif winner.present_level == "first": first_read = first_read - 1 elif winner.present_level == "second": second_read = second_read - 1 elif winner.present_level == "third": third_read = third_read - 1 elif winner.present_level == "fourth": fourth_read = fourth_read - 1 elif winner.present_level == "fifth": fifth_read = fifth_read - 1 else: return HttpResponse("bug!") if debug: # write to present file jsonfile = open(JSON_FILE_PATH, "w") jsonfile.write( json.dumps({ "special": special_read, "first": first_read, "second": second_read, "third": third_read, "rate": rate, "fourth": fourth_read, "fifth": fifth_read })) else: bucket = Bucket("nkumstc") content = { "special": special_read, "first": first_read, "second": second_read, "third": third_read, "rate": rate, "fourth": fourth_read, "fifth": fifth_read } bucket.put_object(JSON_SERVER_OBJECT_NAME, json.dumps(content)) else: award_notice = get_template( 'lottery_beautyOfCoding/award_confirm.html') information = { 'level': winner.present_level, 'micro_id': micro_id64, 'no_phoneNumber': True } award_noticeHtml = award_notice.render( Context(information)) return HttpResponse(award_noticeHtml) else: award_notice = get_template( 'lottery_beautyOfCoding/award_confirm.html') information = { 'level': winner.present_level, 'micro_id': micro_id64, 'no_phoneNumber': True } award_noticeHtml = award_notice.render(Context(information)) return HttpResponse(award_noticeHtml) except: return HttpResponse("同学别刷票了 (╯▔皿▔)╯") except: return HttpResponse("您可能使用的是windows手机,请换成其他手机或使用电脑浏览器抽奖,如有问题请于俱乐部联系") #final result return award_confirmed = get_template( 'lottery_beautyOfCoding/award_confirmed.html') award_confirmedHtml = award_confirmed.render(Context()) return HttpResponse(award_confirmedHtml)
class SaeStorage(Storage): """实现存储在SAE上的Storage """ def __init__(self, bucket_name, path): self.bucket_name = bucket_name self.folder = path self._bucket = Bucket(bucket_name) self.locks = {} def __repr__(self): return "%s(%r)(%r)" % (self.__class__.__name__, self.bucket_name, self.folder) def create(self): return self def destroy(self): # Remove all files self.clean() # REMOVE locks del self.locks def create_file(self, name, **kwargs): def onclose_fn(sfile): self._bucket.put_object(self._fpath(name), sfile.file.getvalue()) f = StructFile(BytesIO(), name=name, onclose=onclose_fn) return f def open_file(self, name, **kwargs): if self._bucket.stat_object(self._fpath(name)) is None: raise NameError(name) content = self._bucket.get_object_contents(self._fpath(name)) def onclose_fn(sfile): new_content = sfile.file.getvalue() if new_content != content: self._bucket.put_object(self._fpath(name), new_content) return StructFile(BytesIO(content), name=name, onclose=onclose_fn) def _fpath(self, fname): return os.path.join(self.folder, fname) def clean(self): files = self.list() for fname in files: self._bucket.delete_object(self._fpath(fname)) def list(self): file_generate = self._bucket.list(path=self._fpath("")) file_names = [] for f in file_generate: file_names.append(f['name'][len(self.folder)+1:]) return file_names def file_exists(self, name): return name in self.list() def file_modified(self, name): return self._bucket.stat_object(self._fpath(name))\ .get('last_modified', '') def file_length(self, name): return int(self._bucket.stat_object(self._fpath(name))['bytes']) def delete_file(self, name): self._bucket.delete_object(self._fpath(name)) def rename_file(self, name, newname, safe=False): name_list = self.list() if name not in name_list: raise NameError(name) if safe and newname in name_list: raise NameError("File %r exists" % newname) content = self._bucket.get_object_contents(self._fpath(name)) self._bucket.delete_object(self._fpath(name)) self._bucket.put_object(self._fpath(newname), content) def lock(self, name): if name not in self.locks: self.locks[name] = Lock() return self.locks[name] def temp_storage(self, name=None): temp_store = RamStorage() return temp_store.create()
class SAEStorage(LocalStorage): def __init__(self, bucket): from sae.storage import Bucket self.bucket = Bucket(bucket) bucket_stat = self.bucket.stat() #self.last_mark = bucket_stat.objects + \ # bucket_stat.bytes self.last_mark = 0 def list(self): articles = self.bucket.list() filter_func = lambda x : self.is_article(x.name) articles = filter(filter_func, articles) articles = self._sorted_files(articles) rst = [] for article in articles: article_name = article.name content = self.bucket.get_object_contents(article_name) content = content.decode('utf-8') art_meta = self._get_metadatas(content) art_meta['filename'] = article_name if type(article.name) == unicode: adjust_name = article_name.encode('utf-8') else : adjust_name = article_name art_meta['filename_url_encode'] = \ base64.urlsafe_b64encode(adjust_name) if not art_meta['date']: art_meta['date'] = article.last_modified if not art_meta["slug"]: art_meta['slug'] = article_name.rpartition(".")[0] art_meta['slug'] = art_meta['slug'].replace("_", " ") rst.append(art_meta) return rst def get(self, article, cut = -1): content = self.bucket.get_object_contents(article) content = content.decode('utf-8') content = unicode(content) mdparse = MDParse() if cut != -1: content = content[:cut] content += "\n....." content = self._clean_metadatas(content) return mdparse.parse(content) def save(self, name, content): self.bucket.put_object(name, content) def delete(self, name): self.bucket.delete_object(name) def update_time(self, article): stat = self.bucket.stat_object(article) tmp = float(stat.timestamp) d = datetime.datetime.fromtimestamp(tmp) return d.strftime("%Y-%m-%d %H:%M:%S") def has_last(self): bucket_stat = self.bucket.stat() curr_mark = bucket_stat.objects + bucket_stat.bytes res = self.last_mark == curr_mark self.last_mark = curr_mark return not res def _sorted_files(self, articles): def key_func(x): stat = self.bucket.stat_object(x.name) return float(stat.timestamp) return sorted(articles, key=key_func, reverse=True)
def saveWritting(request): #blog 应用中最重要的试图函数。 #包括以下主要功能:1,保存博文;2,生成缩略图;3,把缓存表的图片信息移到源图表,然后清空缓存表。 userid = request.session.get('userid', '') if userid == '': return HttpResponseRedirect('/index') title = request.POST['title'] text = request.POST['text'] textNoHtml = re.sub('<[^>]*?>','',text) #print len(textNoHtml) #print text #print textNoHtml if len(textNoHtml) < 120: shortContent = textNoHtml + '......' else: shortContent = textNoHtml[0:120] + '......' nt = datetime.now() #以下是保存博文数据到数据表中。 passageObj = Passage() writerObj = User.objects.get(id = userid) passageObj.UserID = writerObj passageObj.Title = title passageObj.Time = nt passageObj.ShortContent = shortContent passageObj.LongContent = text passageObj.save() #以下是把所有缓存表的图片去处移到源图表,并生成压缩图。 picSrcLs = re.findall('<img src="(.*?)">',text) picNameLs = [] for pss in picSrcLs: if 'pictures'in pss: picNameLs.append(pss[49:]) else: continue #此变量用于保存文章的ID ID = 0 passageObj = Passage.objects.get(UserID = writerObj, Time = nt) ID = passageObj.id for pn in picNameLs: cpobj = CachePicture.objects.get(ImageName = pn) #print 'sss',cpobj.ImagePath.name bucket = Bucket('media') im = Image.open(cStringIO.StringIO(bucket.get_object_contents(cpobj.ImagePath.name))) #im = Image.open(os.path.join(settings.MEDIA_ROOT, cpobj.ImagePath.name)) w, h = im.size if w > h: im.thumbnail((66, (66*h)//w)) else: im.thumbnail(((w*74)//h, 74)) #savepath = os.path.join(settings.MEDIA_ROOT, 'compressedpictures' ,'thumnail'+cpobj.ImageName) savepath = os.path.join('compressedpictures' ,'thumnail'+cpobj.ImageName) fm = cpobj.ImageName.split('.')[1] if fm.lower() == 'jpg': fm = 'jpeg' buf = cStringIO.StringIO() im.save(buf, fm) img_data = buf.getvalue() bucket.put_object(savepath, img_data) #im.save(savepath, fm) picObj = Picture() picObj.PassageID = passageObj picObj.OriginalImageName = pn picObj.OriginalImagePath = cpobj.ImagePath picObj.CompressedImageName = 'thumnail'+cpobj.ImageName picObj.CompressedImagePath.name = os.path.join('compressedpictures' ,'thumnail'+cpobj.ImageName) picObj.save() cpobj.delete() buf.close() #删除缓存表中的数据以及对应的图片。 deleteCachePicLs = CachePicture.objects.filter(UserName = writerObj.UserName) if len(deleteCachePicLs) > 0: bucket = Bucket('media') for pic in deleteCachePicLs: #os.remove(os.path.join(settings.MEDIA_ROOT, pic.ImagePath.name)) bucket.delete_object(pic.ImagePath.name) pic.delete() #以下到return代码前的代码用于文章数量增加1 dataCountObjLs = DataCount.objects.all() if len(dataCountObjLs) == 0: dataCountObj = DataCount() dataCountObj.PassageCount = 1 dataCountObj.save() else: dataCountObj = dataCountObjLs[0] dataCountObj.PassageCount += 1 print dataCountObj.PassageCount dataCountObj.save() #return HttpResponseRedirect('/index') return HttpResponseRedirect('/passage/'+ str(ID))
def saveChange(request, ID): userid = request.session.get('userid', '') if userid == '': return HttpResponseRedirect('/index') title = request.POST['title'] text = request.POST['text'] textNoHtml = re.sub('<[^>]*?>','',text) if len(textNoHtml) < 120: shortContent = textNoHtml + '......' else: shortContent = textNoHtml[0:120] + '......' #保存更新之后的文章标题,内容和概述。 passageObj = Passage.objects.get(id = int(ID)) passageObj.Title = title passageObj.ShortContent = shortContent passageObj.LongContent = text passageObj.save() #picSrcLs获取文中所有图片的路径 picSrcLs = re.findall('<img src="(.*?)">',text) #picNameLs获取文中所有图片的文件名 picNameLs = [] for pss in picSrcLs: if 'pictures'in pss: picNameLs.append(pss[49:]) else: continue #picSavedObjLs存储所有已保存的图片数据。 picSavedObjLs = Picture.objects.filter(PassageID = passageObj) #picStayLs用于保存仍然存在的图片名称。 picStayLs = [] #以下循环用于判断:图片表中有没有图片在编辑中被删除。 for picObj in picSavedObjLs: if picObj.OriginalImageName in picNameLs: picStayLs.append(picObj.OriginalImageName) continue else: bucket = Bucket('media') bucket.delete_object(picObj.OriginalImagePath.name) bucket.delete_object(picObj.CompressedImagePath.name) #os.remove(os.path.join(settings.MEDIA_ROOT, picObj.OriginalImagePath.name)) #os.remove(os.path.join(settings.MEDIA_ROOT, picObj.CompressedImagePath.name)) picObj.delete() #删除picNameLs已存在Picture表中的图片名称,剩下的图片都在PictureCache图片缓存表中。 for pic in picStayLs: picNameLs.remove(pic) for pn in picNameLs: cpobj = CachePicture.objects.get(ImageName = pn) #print 'sss',cpobj.ImagePath.name bucket = Bucket('media') im = Image.open(cStringIO.StringIO(bucket.get_object_contents(cpobj.ImagePath.name))) #im = Image.open(os.path.join(settings.MEDIA_ROOT, cpobj.ImagePath.name)) w, h = im.size if w > h: im.thumbnail((66, (66*h)//w)) else: im.thumbnail(((w*74)//h, 74)) #savepath = os.path.join(settings.MEDIA_ROOT, 'compressedpictures' ,'thumnail'+cpobj.ImageName) savepath = os.path.join('compressedpictures' ,'thumnail'+cpobj.ImageName) fm = cpobj.ImageName.split('.')[1] if fm.lower() == 'jpg': fm = 'jpeg' buf = cStringIO.StringIO() im.save(buf, fm) img_data = buf.getvalue() bucket.put_object(savepath, img_data) #im.save(savepath, fm) picObj = Picture() picObj.PassageID = passageObj picObj.OriginalImageName = pn picObj.OriginalImagePath = cpobj.ImagePath picObj.CompressedImageName = 'thumnail'+cpobj.ImageName picObj.CompressedImagePath.name = os.path.join('compressedpictures' ,'thumnail'+cpobj.ImageName) picObj.save() cpobj.delete() buf.close() #删除缓存表中的数据以及对应的图片。 username = User.objects.get(id = userid).UserName deleteCachePicLs = CachePicture.objects.filter(UserName = username) if len(deleteCachePicLs) > 0: bucket = Bucket('media') for pic in deleteCachePicLs: #os.remove(os.path.join(settings.MEDIA_ROOT, pic.ImagePath.name)) bucket.delete_object(pic.ImagePath.name) pic.delete() return HttpResponseRedirect('/passage/'+ID)
import jieba import os from sae.storage import Bucket from django.conf import settings try: from analyzer import ChineseAnalyzer except ImportError: pass # SAE storage default_bucket = getattr(settings, 'STORAGE_BUCKET_NAME') bucket = Bucket(default_bucket) content = bucket.get_object_contents('idf.txt') idf_freq = {} lines = content.split('\n') for line in lines: word,freq = line.split(' ') idf_freq[word] = float(freq) median_idf = sorted(idf_freq.values())[len(idf_freq)/2] stop_words= set([ "the","of","is","and","to","in","that","we","for","an","are","by","be","as","on","with","can","if","from","which","you","it","this","then","at","have","all","not","one","has","or","that" ]) def extract_tags(sentence,topK=20): words = jieba.cut(sentence) freq = {} for w in words: if len(w.strip())<2: continue if w.lower() in stop_words: continue
def manage(request): from sae.storage import Bucket bucket = Bucket('abc') sa = t.objects.all() for x in sa: x.delete() if request.GET: #delete if request.GET.has_key('ctitle'): name = request.GET["ctitle"] bucket.delete_object('manage/'+name) bucket.delete_object('stati/'+name) ta = imagess.objects.filter(title = name) if (len(ta)!= 0): for i in ta: i.delete() if request.GET.has_key('stitle'): #save beautify name = request.GET["stitle"] if (name != '0'): new_name = name[2:] new_comment = '..' new_mood = '..' ta = imagess.objects.filter(title = new_name) if (len(ta)!= 0): new_comment = ta[0].comment new_mood = ta[0].mood new_lat = ta[0].lat new_lon = ta[0].lon new_photo = imagess(picture = 0, comment = new_comment, mood = new_mood, \ title = name, lat = new_lat, lon = new_lon) new_photo.save() obj = bucket.get_object_contents('meihua/'+name) im = Image.open(StringIO.StringIO(obj)) imgout = StringIO.StringIO() im.save(imgout,"jpeg") img_data = imgout.getvalue() bucket.put_object('stati/'+name, img_data) im = Image.open(StringIO.StringIO(obj)) out = im.resize((128, 128)) imgout = StringIO.StringIO() out.save(imgout,"jpeg") img_data = imgout.getvalue() bucket.put_object('manage/'+name, img_data) #陈列部分 A = [] a = bucket.list(path='manage/') for i in a: dic = [] s = i.name.split('/')[-1] dic.append(s) dic.append(i.last_modified) ta = imagess.objects.filter(title = s) if (len(ta)!= 0): dic.append(ta[0].mood) dic.append(ta[0].comment) A.append(dic) if request.GET: if request.GET.has_key('search'):#search if request.GET['writesearch'] != '': A=[] wcomment = request.GET['writesearch'] result = imagess.objects.filter(comment = wcomment) for i in range(0, len(result)): a = bucket.stat_object('manage/'+result[i].title) dic = [] dic.append(result[i].title) dic.append(a.last_modified) dic.append(result[i].mood) dic.append(result[i].comment) A.append(dic) return render_to_response('manage.html',{'A':A },\ context_instance=RequestContext(request))
def award_front(request): try: #验证合法 micro_id assume_micro_id = request.GET['id'] check_1 = assume_micro_id[0:13] check_2 = assume_micro_id[::-1][0:19] length = len(assume_micro_id) if check_1 == "2b80b60a10d3a" and check_2 == "05ab236e2f139bbcbd3": pass else: return HttpResponse("同学别刷票了 (╯▔皿▔)╯") moreLikely_id = assume_micro_id[len("2b80b60a10d3a"):length - len("05ab236e2f139bbcbd3")] micro_id = int(moreLikely_id) ^ 65535 if micro_id > 20000: return HttpResponse("同学别刷票了 (╯▔皿▔)╯") ##update lottery_remain_information debug = not environ.get("APP_NAME", "") #read present file if debug: jsonfile = open(JSON_FILE_PATH) data = json.load(jsonfile) else: bucket = Bucket("nkumstc") data = json.loads( bucket.get_object_contents(JSON_SERVER_OBJECT_NAME)) special = data["special"] first = data["first"] + special second = data["second"] + first third = data["third"] + second fourth = data["fourth"] + third fifth = data["fifth"] + fourth rate = data["rate"] total = (fifth * 100) / rate present_level = "" ##get lottery #remain no present if total == 0: present_level = "None" #get lottery 活动开始初期不让有特等奖orz else: #while (True): ram = random.randint(1, total) if ram == special: present_level = "special" elif ram <= first: present_level = "first" #break elif ram <= second: present_level = "second" #break elif ram <= third: present_level = "third" #break elif ram <= fourth: present_level = "fourth" #break elif ram <= fifth: present_level = "fifth" #break else: present_level = "None" #break #make database record user = Lottery() user.present_level = present_level user.micro_id = micro_id user.status = False user.hash_id = hashlib.sha1(str(micro_id)).hexdigest() try: user.save() except: return HttpResponse("同学别刷票了 (╯▔皿▔)╯") micro_id64 = base64.b64encode(str(micro_id)) award_front = get_template('lottery_beautyOfCoding/award_front.html') award_frontHtml = award_front.render(Context({"micro_id": micro_id64})) return HttpResponse(award_frontHtml) except: return HttpResponse("非法访问!")
class SaeStorage(Storage): """实现存储在SAE上的Storage """ def __init__(self, bucket_name, path): self.bucket_name = bucket_name self.folder = path self._bucket = Bucket(bucket_name) self.locks = {} def __repr__(self): return "%s(%r)(%r)" % (self.__class__.__name__, self.bucket_name, self.folder) def create(self): return self def destroy(self): # Remove all files self.clean() # REMOVE locks del self.locks def create_file(self, name, **kwargs): def onclose_fn(sfile): self._bucket.put_object(self._fpath(name), sfile.file.getvalue()) f = StructFile(BytesIO(), name=name, onclose=onclose_fn) return f def open_file(self, name, **kwargs): if self._bucket.stat_object(self._fpath(name)) is None: raise NameError(name) content = self._bucket.get_object_contents(self._fpath(name)) def onclose_fn(sfile): self._bucket.put_object(self._fpath(name), sfile.file.getvalue()) return StructFile(BytesIO(content), name=name, onclose=onclose_fn) def _fpath(self, fname): return os.path.join(self.folder, fname) def clean(self): files = self.list() for fname in files: self._bucket.delete_object(self._fpath(fname)) def list(self): file_generate = self._bucket.list(path=self._fpath("")) file_names = [] for f in file_generate: file_names.append(f['name'][len(self.folder) + 1:]) return file_names def file_exists(self, name): return name in self.list() def file_modified(self, name): return self._bucket.stat_object(self._fpath(name))\ .get('last_modified', '') def file_length(self, name): return int(self._bucket.stat_object(self._fpath(name))['bytes']) def delete_file(self, name): self._bucket.delete_object(self._fpath(name)) def rename_file(self, name, newname, safe=False): if name not in self.list(): raise NameError(name) if safe and newname in self.list(): raise NameError("File %r exists" % newname) content = self._bucket.get_object_contents(self._fpath(name)) self._bucket.delete_object(self._fpath(name)) self._bucket.put_object(self._fpath(newname), content) def lock(self, name): if name not in self.locks: self.locks[name] = Lock() return self.locks[name] def temp_storage(self, name=None): name = name or "%s.tmp" % random_name() path = os.path.join(self.folder, name) tempstore = SaeStorage(self.bucket_name, path) return tempstore.create()
def get_content(self, id): ''' id : file id (file name using) ''' b = Bucket(self.bucket_name) return b.get_object_contents(id)
def saveWritting(request): #blog 应用中最重要的试图函数。 #包括以下主要功能:1,保存博文;2,生成缩略图;3,把缓存表的图片信息移到源图表,然后清空缓存表。 userid = request.session.get('userid', '') if userid == '': return HttpResponseRedirect('/index') title = request.POST['title'] text = request.POST['text'] textNoHtml = re.sub('<[^>]*?>', '', text) #print len(textNoHtml) #print text #print textNoHtml if len(textNoHtml) < 120: shortContent = textNoHtml + '......' else: shortContent = textNoHtml[0:120] + '......' nt = datetime.now() #以下是保存博文数据到数据表中。 passageObj = Passage() writerObj = User.objects.get(id=userid) passageObj.UserID = writerObj passageObj.Title = title passageObj.Time = nt passageObj.ShortContent = shortContent passageObj.LongContent = text passageObj.save() #以下是把所有缓存表的图片去处移到源图表,并生成压缩图。 picSrcLs = re.findall('<img src="(.*?)">', text) picNameLs = [] for pss in picSrcLs: if 'pictures' in pss: picNameLs.append(pss[49:]) else: continue #此变量用于保存文章的ID ID = 0 passageObj = Passage.objects.get(UserID=writerObj, Time=nt) ID = passageObj.id for pn in picNameLs: cpobj = CachePicture.objects.get(ImageName=pn) #print 'sss',cpobj.ImagePath.name bucket = Bucket('media') im = Image.open( cStringIO.StringIO(bucket.get_object_contents( cpobj.ImagePath.name))) #im = Image.open(os.path.join(settings.MEDIA_ROOT, cpobj.ImagePath.name)) w, h = im.size if w > h: im.thumbnail((66, (66 * h) // w)) else: im.thumbnail(((w * 74) // h, 74)) #savepath = os.path.join(settings.MEDIA_ROOT, 'compressedpictures' ,'thumnail'+cpobj.ImageName) savepath = os.path.join('compressedpictures', 'thumnail' + cpobj.ImageName) fm = cpobj.ImageName.split('.')[1] if fm.lower() == 'jpg': fm = 'jpeg' buf = cStringIO.StringIO() im.save(buf, fm) img_data = buf.getvalue() bucket.put_object(savepath, img_data) #im.save(savepath, fm) picObj = Picture() picObj.PassageID = passageObj picObj.OriginalImageName = pn picObj.OriginalImagePath = cpobj.ImagePath picObj.CompressedImageName = 'thumnail' + cpobj.ImageName picObj.CompressedImagePath.name = os.path.join( 'compressedpictures', 'thumnail' + cpobj.ImageName) picObj.save() cpobj.delete() buf.close() #删除缓存表中的数据以及对应的图片。 deleteCachePicLs = CachePicture.objects.filter(UserName=writerObj.UserName) if len(deleteCachePicLs) > 0: bucket = Bucket('media') for pic in deleteCachePicLs: #os.remove(os.path.join(settings.MEDIA_ROOT, pic.ImagePath.name)) bucket.delete_object(pic.ImagePath.name) pic.delete() #以下到return代码前的代码用于文章数量增加1 dataCountObjLs = DataCount.objects.all() if len(dataCountObjLs) == 0: dataCountObj = DataCount() dataCountObj.PassageCount = 1 dataCountObj.save() else: dataCountObj = dataCountObjLs[0] dataCountObj.PassageCount += 1 print dataCountObj.PassageCount dataCountObj.save() #return HttpResponseRedirect('/index') return HttpResponseRedirect('/passage/' + str(ID))
def saveChange(request, ID): userid = request.session.get('userid', '') if userid == '': return HttpResponseRedirect('/index') title = request.POST['title'] text = request.POST['text'] textNoHtml = re.sub('<[^>]*?>', '', text) if len(textNoHtml) < 120: shortContent = textNoHtml + '......' else: shortContent = textNoHtml[0:120] + '......' #保存更新之后的文章标题,内容和概述。 passageObj = Passage.objects.get(id=int(ID)) passageObj.Title = title passageObj.ShortContent = shortContent passageObj.LongContent = text passageObj.save() #picSrcLs获取文中所有图片的路径 picSrcLs = re.findall('<img src="(.*?)">', text) #picNameLs获取文中所有图片的文件名 picNameLs = [] for pss in picSrcLs: if 'pictures' in pss: picNameLs.append(pss[49:]) else: continue #picSavedObjLs存储所有已保存的图片数据。 picSavedObjLs = Picture.objects.filter(PassageID=passageObj) #picStayLs用于保存仍然存在的图片名称。 picStayLs = [] #以下循环用于判断:图片表中有没有图片在编辑中被删除。 for picObj in picSavedObjLs: if picObj.OriginalImageName in picNameLs: picStayLs.append(picObj.OriginalImageName) continue else: bucket = Bucket('media') bucket.delete_object(picObj.OriginalImagePath.name) bucket.delete_object(picObj.CompressedImagePath.name) #os.remove(os.path.join(settings.MEDIA_ROOT, picObj.OriginalImagePath.name)) #os.remove(os.path.join(settings.MEDIA_ROOT, picObj.CompressedImagePath.name)) picObj.delete() #删除picNameLs已存在Picture表中的图片名称,剩下的图片都在PictureCache图片缓存表中。 for pic in picStayLs: picNameLs.remove(pic) for pn in picNameLs: cpobj = CachePicture.objects.get(ImageName=pn) #print 'sss',cpobj.ImagePath.name bucket = Bucket('media') im = Image.open( cStringIO.StringIO(bucket.get_object_contents( cpobj.ImagePath.name))) #im = Image.open(os.path.join(settings.MEDIA_ROOT, cpobj.ImagePath.name)) w, h = im.size if w > h: im.thumbnail((66, (66 * h) // w)) else: im.thumbnail(((w * 74) // h, 74)) #savepath = os.path.join(settings.MEDIA_ROOT, 'compressedpictures' ,'thumnail'+cpobj.ImageName) savepath = os.path.join('compressedpictures', 'thumnail' + cpobj.ImageName) fm = cpobj.ImageName.split('.')[1] if fm.lower() == 'jpg': fm = 'jpeg' buf = cStringIO.StringIO() im.save(buf, fm) img_data = buf.getvalue() bucket.put_object(savepath, img_data) #im.save(savepath, fm) picObj = Picture() picObj.PassageID = passageObj picObj.OriginalImageName = pn picObj.OriginalImagePath = cpobj.ImagePath picObj.CompressedImageName = 'thumnail' + cpobj.ImageName picObj.CompressedImagePath.name = os.path.join( 'compressedpictures', 'thumnail' + cpobj.ImageName) picObj.save() cpobj.delete() buf.close() #删除缓存表中的数据以及对应的图片。 username = User.objects.get(id=userid).UserName deleteCachePicLs = CachePicture.objects.filter(UserName=username) if len(deleteCachePicLs) > 0: bucket = Bucket('media') for pic in deleteCachePicLs: #os.remove(os.path.join(settings.MEDIA_ROOT, pic.ImagePath.name)) bucket.delete_object(pic.ImagePath.name) pic.delete() return HttpResponseRedirect('/passage/' + ID)
def beauti(request): r = g = b = s = 0 from sae.storage import Bucket bucket = Bucket("abc") aa = bucket.list(path="meihua/") for ii in aa: bucket.delete_object(ii.name) if request.GET: if request.GET.has_key("btitle"): b = request.GET["btitle"] if request.GET.has_key("ptitle"): g = request.GET["ptitle"] if request.POST: if request.POST.has_key("lvjing"): if request.FILES or request.GET.has_key("btitle"): if request.FILES: f = request.FILES["file"] bucket.put_object("lvjing/" + f.name, f) img = bucket.get_object_contents("lvjing/" + f.name) s = "lv" + str(f) elif request.GET.has_key("btitle"): name = request.GET["btitle"] img = bucket.get_object_contents("stati/" + name) s = "lv" + name im = Image.open(StringIO.StringIO(img)) im.getdata() out = im.split() imgout = StringIO.StringIO() out[0].save(imgout, "jpeg") img_data = imgout.getvalue() bucket.put_object("meihua/" + s, img_data) if request.POST.has_key("suotu"): if request.FILES or request.GET.has_key("btitle"): if request.FILES: f = request.FILES["file"] bucket.put_object("suotu/" + f.name, f) img = bucket.get_object_contents("suotu/" + f.name) s = "su" + str(f) elif request.GET.has_key("btitle"): name = request.GET["btitle"] img = bucket.get_object_contents("stati/" + name) s = "su" + name im = Image.open(StringIO.StringIO(img)) out = im.resize((128, 128)) imgout = StringIO.StringIO() out.save(imgout, "jpeg") img_data = imgout.getvalue() bucket.put_object("meihua/" + s, img_data) if request.POST.has_key("xuanzhuan"): if request.FILES or request.GET.has_key("btitle"): if request.POST["dushu"] != "": dushu = request.POST["dushu"] i = string.atoi(dushu) else: i = 0 if request.FILES: f = request.FILES["file"] bucket.put_object("xuanzhuan/" + f.name, f) img = bucket.get_object_contents("xuanzhuan/" + f.name) s = "xu" + str(f) else: name = request.GET["btitle"] img = bucket.get_object_contents("stati/" + name) s = "xu" + name im = Image.open(StringIO.StringIO(img)) out = im.rotate(i) imgout = StringIO.StringIO() out.save(imgout, "jpeg") img_data = imgout.getvalue() bucket.put_object("meihua/" + s, img_data) if request.POST.has_key("huidu1"): if request.FILES or request.GET.has_key("btitle"): if request.FILES: f = request.FILES["file"] bucket.put_object("huidu1/" + f.name, f) img = bucket.get_object_contents("huidu1/" + f.name) s = "h1" + str(f) else: name = request.GET["btitle"] img = bucket.get_object_contents("stati/" + name) s = "h1" + name im = Image.open(StringIO.StringIO(img)) im.getdata() out = im.split() if len(out) > 0: imgout = StringIO.StringIO() out[0].save(imgout, "jpeg") else: im.save(imgout, "jpeg") img_data = imgout.getvalue() bucket.put_object("meihua/" + s, img_data) if request.POST.has_key("huidu2"): if request.FILES or request.GET.has_key("btitle"): if request.FILES: f = request.FILES["file"] bucket.put_object("huidu2/" + f.name, f) img = bucket.get_object_contents("huidu2/" + f.name) s = "h2" + str(f) elif request.GET.has_key("btitle"): name = request.GET["btitle"] img = bucket.get_object_contents("stati/" + name) s = "h2" + name im = Image.open(StringIO.StringIO(img)) im.getdata() out = im.split() imgout = StringIO.StringIO() if len(out) > 1: out[1].save(imgout, "jpeg") else: im.save(imgout, "jpeg") img_data = imgout.getvalue() bucket.put_object("meihua/" + s, img_data) if request.POST.has_key("huidu3"): if request.FILES or request.GET.has_key("btitle"): if request.FILES: f = request.FILES["file"] bucket.put_object("huidu3/" + f.name, f) img = bucket.get_object_contents("huidu3/" + f.name) s = "h3" + str(f) elif request.GET.has_key("btitle"): name = request.GET["btitle"] img = bucket.get_object_contents("stati/" + name) s = "h3" + name im = Image.open(StringIO.StringIO(img)) im.getdata() out = im.split() imgout = StringIO.StringIO() if len(out) > 2: out[2].save(imgout, "jpeg") else: im.save(imgout, "jpeg") img_data = imgout.getvalue() bucket.put_object("meihua/" + s, img_data) return render_to_response("pilbeau.html", {"r": s, "s": b, "g": g}, context_instance=RequestContext(request))
class DiaryService(object): def __init__(self): self.dda = DiaryDao() self.diary_bucket = Bucket("diary") def dump_users(self): users = self.dda.get_users() if users != None: for u in users: # obj_id = '/user/{0}.dat'.format(u['girl']) # obj_data = {'password': u['password'], 'diaryId': u['diaryId'], 'boy': u['boy'], 'girl': u['girl']} # self.diary_bucket.put_object(obj_id, json.dumps(obj_data), content_type='application/json', content_encoding='utf-8') diarys = self.dda.get_user_diarys(u["diaryId"]) obj_d_id = "/diary/{0}.dat".format(u["diaryId"]) self.diary_bucket.put_object( obj_d_id, json.dumps(diarys), content_type="application/json", content_encoding="utf-8" ) else: pass def get_recent_diarys(self, username, limit): obj_id = "/user/{0}.dat".format(username) user = self.diary_bucket.get_object_contents(obj_id) obj_id = "/diary/{0}.dat".format(u["diaryId"]) return self.dda.get_diarys_by_user_since_date(username, since, limit) def get_diary_by_id_xml(self, username, diaryid): if username == None: return None diarys = self.dda.get_diary_by_id(diaryid) if len(diarys) > 0: diary = diarys[0] if diary["FDUName"] == username or diary["FDULoverName"] == username or diary["FDOpenLevel"] == 0: self.dda.hit_diary(diaryid) else: diarys = None return self.strf_diarys("data", diarys) def get_diary_by_id_dict(self, username, diaryid): if username == None: return None diarys = self.dda.get_diary_by_id(diaryid) diary = None if len(diarys) > 0: diary = diarys[0] if diary["FDUName"] == username or diary["FDULoverName"] == username or diary["FDOpenLevel"] == 0: self.dda.hit_diary(diaryid) else: diary = None return diary def get_diarys_by_date(self, username, year, month, day): if username == None: return None date = datetime.date(year, month, day) diarys = self.dda.get_diarys_by_user_date(username, date) for diary in diarys: self.dda.hit_diary(diary["FDID"]) diarys_str = self.strf_diarys("data", diarys) return diarys_str def _get_diarys_by_month(self, username, year, month): date_start = datetime.date(year, month, 1) date_end = date_start + relativedelta(months=1) diarys = self.dda.get_diarysdate_by_user_datescope(username, date_start, date_end) return diarys def get_diarysdate_by_month(self, username, year, month): strArray = [] diarys = self._get_diarys_by_month(username, year, month) if diarys != None: for diary in diarys: strArray.append(diary["FDTime"].strftime("%d")) return ",".join(strArray) def get_diarysdate_by_scope(self, year, month): dateformat = "{:0>4d}" if year == None: date_group_count = self.dda.year_group_count() elif month == None: date_s = datetime.datetime(year, 1, 1, 0, 0, 0) date_e = date_s + relativedelta(years=1) dateformat = "{:0>2d}" date_group_count = self.dda.month_group_count(date_s, date_e) else: date_s = datetime.datetime(year, month, 1, 0, 0, 0) date_e = date_s + relativedelta(months=1) dateformat = "{:0>2d}" date_group_count = self.dda.date_group_count(date_s, date_e) strdate = [] strcount = [] if date_group_count == None or len(date_group_count) == 0: return "|" for row in date_group_count: strdate.append(dateformat.format(row["d"])) strcount.append(str(row["c"])) return ",".join(strdate) + "|" + ",".join(strcount) def get_diarysdate_by_user_scope(self, username, year, month): if username == None or len(username) == 0: return "|" dateformat = "{:0>4d}" if year == None: date_group_count = self.dda.year_group_count(username=username) elif month == None: date_s = datetime.datetime(year, 1, 1, 0, 0, 0) date_e = date_s + relativedelta(years=1) dateformat = "{:0>2d}" date_group_count = self.dda.month_group_count(date_s, date_e, username=username) else: date_s = datetime.datetime(year, month, 1, 0, 0, 0) date_e = date_s + relativedelta(months=1) dateformat = "{:0>2d}" date_group_count = self.dda.date_group_count(date_s, date_e, username=username) strdate = [] strcount = [] if date_group_count == None or len(date_group_count) == 0: return "|" for row in date_group_count: strdate.append(dateformat.format(row["d"])) strcount.append(str(row["c"])) return ",".join(strdate) + "|" + ",".join(strcount) def get_diarys_by_user_scope(self, username, year, month): if year == None: date_s = None date_e = None elif month == None: date_s = datetime.datetime(year, 1, 1, 0, 0, 0) date_e = date_s + relativedelta(years=1) else: date_s = datetime.datetime(year, month, 1, 0, 0, 0) date_e = date_s + relativedelta(months=1) diarys = self.dda.get_diarys_by_user_datescope(date_s, date_e, username) return self.strf_diarys("info", diarys) def strf_diarys(self, outputtype, diarys): strArray = [] if diarys != None: if outputtype == "data": for diary in diarys: strArray.append(self.strf_diary_data(diary)) else: for diary in diarys: strArray.append(self.strf_diary_info(diary)) formatter = """<?xml version=\"1.0\" encoding=\"utf-8\"?> <diarys> {0} </diarys>""" return formatter.format("".join(strArray)) def strf_diary_info(self, diary): if diary == None: return None formatter = """<diary> <id>{0[FDID]}</id> <title><![CDATA[{0[FDTitle]}]]></title> <date>{0[FDTime]:%Y-%m-%d}</date> <author>{0[FDUName]}</author> <visit>{0[FDVisitCount]}</visit> <comment>{0[FDCommCount]}</comment> </diary>""" return formatter.format(diary) def strf_diary_data(self, diary): if diary == None: return None formatter = """<diary> <id>{0[FDID]}</id> <title><![CDATA[{0[FDTitle]}]]></title> <date>{0[FDTime]:%Y-%m-%d}</date> <weather>{0[FDWeather]}</weather> <author>{0[FDUName]}</author> <visit>{0[FDVisitCount]}</visit> <comment>{0[FDCommCount]}</comment> <content><![CDATA[{0[FDContent]}]]></content> <openlevel>{0[FDOpenLevel]}</openlevel> <allowcomment>{0[FDAllowComment]}</allowcomment> </diary>""" return formatter.format(diary) def is_diary_owner(self, diaryid, username): owners = self.dda.get_diary_owners(diaryid) if owners == None: return False if username == owners[0] or username == owners[1]: return True else: return False def can_comment(self, diaryid): diary_info = self.dda.get_diary_fields(["FDAllowComment"], diaryid) if diary_info == None or len(diary_info) == 0: return False if diary_info[0]["FDAllowComment"]: return True else: return False