def send_email_by_mailgun(from_address, to_address, subject, html_content, api_host, api_key): if not isinstance(to_address, (list, tuple)): to_address = [to_address] addresses = get_valid_addresses(to_address) if not addresses: return response = requests.post( api_host, auth=("api", api_key), data={"from": from_address, "to": addresses, "subject": smart_str(subject), "html": smart_str(html_content)} ) # {u'id': u'<*****@*****.**>', # u'message': u'Queued. Thank you.'} try: result = response.json() message_id = result['id'] message_id = message_id.lstrip('<').rstrip('>') return message_id except: return response
def write_logs( logs, app_name, filepath=None, root=None, ): # filepath: logs for which file, root: under which root if isinstance(logs, string_types): logs = [logs] if not logs: return # ignore if not root: return # ignore now = datetime.datetime.now() now_str = now.strftime('%Y-%m-%d %H:%M:%S') log_path = make_sure_sync_log_path(root, app_name) with open(log_path, 'a') as f: for log in logs: log = raw_log = smart_str(log) if filepath: filepath = smart_str(filepath) log = '%s: %s %s\n' % (now_str, filepath, log) else: log = '%s %s\n' % (now_str, smart_str(log)) write_file(f, log)
def set_more_headers_for_response(response): more_headers = getattr(request, 'more_headers', None) or {} if not isinstance(more_headers, dict): return for k, v in more_headers.items(): if isinstance(k, string_types) and isinstance(v, string_types) and k not in response.headers: k = smart_str(k) v = smart_str(v) response.headers[k] = v
def refund(self, trade_no, amount, reason=''): # 退款 data = { 'trade_no': smart_str(trade_no), 'refund_amount': smart_str(amount), } if reason: data['refund_reason'] = smart_unicode(reason)[80] return self._send_request('alipay.trade.refund', data)
def set_user_headers_for_response(response): # 用户通过模板 API 设定的 headers, 给出到 response user_response_headers = getattr(request, 'user_response_headers', {}) if not isinstance(user_response_headers, dict): return for k, v in user_response_headers.items(): if isinstance(k, string_types) and isinstance(v, string_types) and k not in response.headers: k = smart_str(k) v = smart_str(v) response.headers[k] = v
def dump_csv(list_obj, lines=True): f = BytesIO() wr = csv.writer(f, quoting=csv.QUOTE_ALL) if lines: # 多行,list_obj 内的元素,本身自成一行 for sub_list in list_obj: sub_list = [smart_str(e) for e in sub_list] wr.writerow(sub_list) else: list_obj = [smart_str(e) for e in list_obj] wr.writerow(list_obj) return f.getvalue()
def is_doc_modified(doc, date_field='date'): if not doc: return True #if request.environ.get('HTTP_PRAGMA') in ['no-cache']: # return True if request.environ.get('HTTP_CACHE_CONTROL') in ['no-cache']: return True if date_field in doc: date = smart_str(doc[date_field]) date_in_request = request.environ.get('HTTP_IF_MODIFIED_SINCE') if date_in_request: if smart_str(date_in_request)== date: # and request.cache_control.max_age return False return True
def _get_sign_s(self, post_data, excludes=None): #post_data = post_data.copy() #post_data.pop('sign_type', None) excludes = excludes or [] # ['sign', 'sign_type'] post_data = { k: v for k, v in post_data.items() if len(smart_str(v)) and k not in excludes } # 按照 alipay 的规则,key 的升序排列形成 content post_data_s = '&'.join('%s=%s' % (k, v) for k, v in sorted(post_data.items())) post_data_s = smart_str(post_data_s) return post_data_s
def csv_to_list(raw_content, max_rows=None, max_columns=None, return_max_length=False, auto_fill=False): # auto_fill 表示会自动补充缺失的空 cell file_io = BytesIO(smart_str(raw_content)) csv_reader = csv.reader(file_io) result = [] i = 0 max_length = 0 if max_rows is None: max_rows = 9999 for row in csv_reader: row = [smart_unicode(s).strip().replace('\\n', '\n') for s in row] if max_columns: # 一行最多允许多少列数据 row = row[:max_columns] if len(row) > max_length: max_length = len(row) result.append(row) if i >= max_rows: break i += 1 if auto_fill: # 自动补全 result = [row + [''] * (max_length - len(row)) for row in result] if return_max_length: return result, max_length else: return result
def join_url(url, **params): if not params: return url for key in params: value = params[key] if isinstance(value, dict): params[key] = json.dumps(value) elif isinstance(value, unicode): params[key] = value.encode('utf8') elif value is True: params[key] = 'true' else: params[key] = smart_str(value) url_parts = url.split('?', 1) if len(url_parts) == 2: # 合并原来的 GET 参数 url, url_q = url_parts GET_dict_data = get_GET_dict_data(url_q) for k, v in GET_dict_data.items(): if k not in params: params[k] = v p_strings = urllib.urlencode(params) url += '?' + p_strings return url
def post_url_path(self): url_path = self.get_meta_value('url', '') or self.get_meta_value( 'url_path', '') if url_path and not isinstance(url_path, basestring): url_path = smart_unicode(url_path) if not url_path: # 如果是用户自己指定的url,则不管对错,都保存; 反之则是通过relative_path解析一个url url_path = self.path.rsplit('.', 1)[0] url_path = slugify(url_path, auto=True).lower() # 不能以slash开头,并且确保小写 else: # 用户自己声明的 url,不做大小写处理,保留原始状态 url_path = slugify(url_path, must_lower=False) # 替代掉不适合做url的字符 # url_path是全小写 if '%' in url_path: # 被编码的url,特别是wordpress转过来的 _url_path = urllib.unquote(smart_str(url_path)) if url_path != _url_path: url_path = smart_unicode(_url_path) url_path = url_path.lstrip('/') url_path = re.sub(r'/{2,}', '/', url_path) url_path = url_path or '--' # 可能存在空的情况... url_path = url_path.strip().lower() return url_path
def set_304_response_for_doc(doc, response, date_field='date', etag=None): if date_field in doc: date = smart_str(doc[date_field]) response.headers['Last-Modified'] = date if etag: response.set_etag(etag) return response
def format(self, format_str='%H:%M / %d-%m-%Y'): if not isinstance(self.core, datetime.datetime): return self.core or '-' utc_offset = just_get_site_config('utc_offset') try: utc_offset = float(utc_offset) except: utc_offset = 8 if utc_offset > 12 or utc_offset < -12: utc_offset = 8 if utc_offset is None: utc_offset = 8 # default value if not isinstance(utc_offset, (int, float)): utc_offset = 8 format_str = smart_str(format_str[:200]) # 可以处理中文等unicode字符 date = self.core + datetime.timedelta(0, utc_offset * 3600) date_year = date.year if date_year <= 1900: date = date.replace(year=1900) result = date.strftime(format_str) result = result.replace('1900', str(date_year), 1) else: result = date.strftime(format_str) return smart_unicode(result)
def default_response_handler(response): # request.response 是最优先处理的 response_in_request = get_response_in_request() response = response_in_request or response # 确保有 vid 这个 cookie visitor_id = get_cookie('vid') if not visitor_id: visitor_id = uuid.uuid1().hex set_cookie('vid', visitor_id, max_age=5 * one_year_seconds) # 系统中调用产生的 header set_more_headers_for_response(response) # 用户通过模板 API 设定的 headers set_user_headers_for_response(response) r_code = get_response_code_in_request() if r_code: response.status_code = r_code r_type = get_response_content_type_in_request() if r_type: response.content_type = r_type cache_key = get_page_cache_key_in_request() if cache_key: response.headers['x-cache-key'] = smart_str(cache_key) emails_sent_info = getattr(request, "emails_sent_info", None) if emails_sent_info: response.headers['x-emails-sent'] = smart_str(emails_sent_info) if response.status_code > 400: return response ## 尝试缓存 #cache_response_into_memcache(response) # 最后再处理 cookies,一些行为(比如统计)可能会产生新的 cookie set_cookies(response) return response
def store_sync_from_log(root, log): now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') log = smart_str('%s %s\n\n' % (now, log)) sync_log_filepath = join(root, '.sync/farbox_sync_from.log') try: make_sure_path(sync_log_filepath) with open(sync_log_filepath, 'a') as f: f.write(log) except: pass
def unqote_url_path_to_unicode(url_path): if not isinstance(url_path, string_types): return url_path url_path = smart_unicode(url_path) if '%' in url_path: # 被编码的url,特别是wordpress转过来的 _url_path = smart_unicode(urllib.unquote(smart_str(url_path))) if url_path != _url_path: url_path = _url_path return url_path return url_path
def to_sort_str(sort): default_sort = 'desc' if isinstance(sort, string_types): sort = smart_str(sort) if sort not in ['desc', 'asc']: sort = default_sort elif isinstance(sort, int): sort = 'desc' if sort < 0 else 'asc' else: sort = default_sort return sort
def split_path_by_name(name, length=7, hash_name=True): name = name.strip() if hash_name: name_string = hashlib.md5(smart_str(name)).hexdigest() else: name_string = name head = name_string[:length - 1] tail = name_string[length - 1:] path_parts = list(head) + [tail] path = '/'.join(path_parts) return path
def image_to_base64(im, im_format='PNG', as_url=False): f = BytesIO() if im_format.lower() != 'png': im_format = 'JPEG' url_prefix = 'data:image/jpeg' else: im_format = 'PNG' url_prefix = 'data:image/png' im.save(f, format=im_format) b64_content = base64.b64encode(f.getvalue()) if as_url: b64_content = '%s;base64,%s' % (url_prefix, b64_content) return smart_str(b64_content)
def set(self, key, data, expiration=0, zipped=False, hash_key=False, *args, **kwargs): if not isinstance(key, str): key = smart_str(key) if not isinstance(data, str): data = smart_str(data) if zipped: # 需要压缩的 # compress的效率很高,1亿字节大概5秒, 压缩率接近50% try: data = zlib.compress(data) except: pass if hash_key: key = hashlib.md5(key).hexdigest() expiration = int(expiration) self.safe_try(self.current_client, 'set', key, data, expiration, *args, **kwargs)
def send_file_with_304(filepath, mimetype=None): if not os.path.isfile(filepath): return # ignore if not is_filepath_modified(filepath): return get_304_response() else: mimetype = mimetype or guess_type(filepath, 'application/octet-stream') response = send_file(filepath, mimetype=mimetype) try: mtime = int(os.path.getmtime(filepath)) response.headers['Last-Modified'] = smart_str(mtime) except: pass return response
def get_file_timestamp(relative_path=None, metadata=None, abs_filepath=None, utc_offset=None): # 主要是获取 post 的date信息 # relative_path 是相对于 root 的 path if abs_filepath and not metadata: if os.path.isfile(abs_filepath): return os.path.getmtime(abs_filepath) else: return time.time() name_from_path = get_just_name(relative_path) try: metadata_date = metadata.get('date') if isinstance(metadata_date, datetime.datetime): # 先转成 str 形式,这样能最终获得 utc 的时间戳 date_s = metadata_date.strftime('%Y-%m-%d %H:%M:%S') else: date_s = smart_str( get_meta_value(metadata=metadata, key='date', default='')) # 先从meta中获取, 格式化之后,可以自动调整时差 if date_s: date_s = date_s.strip() if date_s and re.match('\d{4}\.\d+\.\d+$', date_s): # # 2018.3.19 这种日期形式的 转为 xxxx-xx-xx date_s = date_s.replace('.', '-') if not date_s: # 兼容 '2012-12-12 12-12 or 2012-12-12 12-12-12 这种格式' if re.match(r'^\d+-\d+-\d+ \d+-\d+(-\d+)?$', name_from_path): part1, part2 = name_from_path.split(' ', 1) try: s = '%s %s' % (part1, part2.replace('-', ':')) date = utc_date_parse(s, utc_offset=utc_offset) return date except: pass # 从文件名中获取 2012-1?2-1?2, date模式 date_search = re.search('/?([123]\d{3}-\d{1,2}-\d{1,2})[^/]*', relative_path) if date_search: # 可以从文件的路径中取, 兼容jekyll date_s = date_search.groups()[0] date = utc_date_parse(date_s, utc_offset=utc_offset) except (ValueError, TypeError): return time.time() timestamp = date_to_timestamp(date) if not timestamp: timestamp = time.time() return timestamp
def do_download_from_internet_and_sync(bucket, path, url, timeout=10): # 下载行为, 这里要获得site,因为如果是异步的话,g.site是无效的 if not has_bucket(bucket): return try: response = requests.get(url, timeout=timeout, verify=False) if response.status_code > 300: return # ignore response_content = smart_str(response.content) if not response_content: return sync_file_by_server_side(bucket=bucket, relative_path=path, content=response_content) # 进行同步 return True except: pass
def add_filepath_to_ipfs(filepath, only_hash=False): if not os.path.isfile(filepath): return only_hash = only_hash if only_hash: cmd = 'ipfs add --quieter --only-hash "%s"' % filepath else: cmd = 'ipfs add --quieter "%s"' % filepath cmd = smart_str(cmd) result = run_ipfs_cmd(cmd) if result: result = result.strip() if not result.startswith('Qm'): # not a valid ipfs hash value return return result
def accept_upload_file_from_client(self, bucket, record_data, get_raw_content_func=None): # return ok, failed, existed if not self.should_upload_file_by_client(bucket, record_data): return "failed" filepath = self.get_filepath_from_record(bucket=bucket, record_data=record_data) if not filepath: return "failed" if not get_raw_content_func: return "failed" if hasattr(get_raw_content_func, "__call__"): raw_content = get_raw_content_func() else: raw_content = get_raw_content_func if len(raw_content) > MAX_FILE_SIZE: return "failed" if not self.exists(bucket, record_data): if not raw_content or not isinstance(raw_content, string_types): return "failed" # qcloud_cos.cos_common.maplist content_type = guess_type(record_data.get("path")) relative_path = record_data.get("path") or "" filename = os.path.split(relative_path)[-1] headers = dict() if filename: headers["ContentDisposition"] = smart_str('attachment;filename="%s"' % relative_path) if DEBUG: print("upload %s to qcloud" % relative_path) uploaded = upload_file_to_qcloud_for_bucket(bucket, filepath, raw_content, content_type=content_type, **headers) if uploaded: file_size = len(raw_content) image_info = {} if guess_type(relative_path).startswith("image/"): image_info = get_image_info_from_raw_content(raw_content) self.update_record_when_file_stored(bucket, record_data, file_size=file_size, image_info=image_info) return "ok" else: return "failed" else: if raw_content and isinstance(raw_content, string_types): file_size = len(raw_content) else: file_size = 0 self.update_record_when_file_stored(bucket, record_data, file_size=file_size) # try to update the record return "existed"
def get(self, key, default=None, zipped=False, hash_key=False): if not isinstance(key, str): key = smart_str(key) if hash_key: key = hashlib.md5(key).hexdigest() if not key: return None data = self.safe_try(self.current_client, 'get', key) if data is None: return default if isinstance(data, (tuple, list)): data = data[0] if zipped: try: data = zlib.decompress(data) except: return None return data
def _send_request(self, data): if not self.cert: return # ignore # 发送请求信息 # data is a dict post_data = self.default_request.copy() post_data.update(data) post_data = { key: smart_str(value) for key, value in post_data.items() } # value 全部转为 utf8 编码 sign = self._sign(post_data, excludes=['sign', 'sign_type']) # 进行签名 post_data['sign'] = sign params_s = urlencode(post_data) # 直接 POST 的话,中文会有问题.... r = requests.post(self.api_url + "?" + params_s, allow_redirects=False) return self.compile_response(r)
def _send_request(self, method, data): if not self.cert: return # ignore # 发送请求信息 # data is a dict data = {key: smart_str(value) for key, value in data.items()} # value 全部转为 utf8 编码 biz_content = json.dumps(data) post_data = dict( self.default_request, method=method, timestamp=get_cn_timestamp(), biz_content=biz_content, ) sign = self._sign(post_data) # 进行签名 post_data['sign'] = sign r = requests.post(self.api_url, data=post_data, allow_redirects=False) return self.compile_response(r)
def __init__( self, relative_path, abs_filepath=None, private_key=None, should_encrypt_file=False, is_deleted=False, is_dir=None, ipfs_key=None, doc_type=None, version=None, raw_content=None, files_info=None, real_relative_path=None, utc_offset=None, ): if raw_content: # 直接传入内容, abs_filepath 直接无效 abs_filepath = None # 外部存储所有 files 的一个数据对象 self.files_info_is_updated = False self.files_info = files_info self.real_relative_path = real_relative_path self.relative_path = same_slash(relative_path).lstrip('/') self.path = self.relative_path self.abs_filepath = abs_filepath self.is_deleted = is_deleted self._is_dir = is_dir self._ipfs_key = ipfs_key self._doc_type = doc_type self.private_key = private_key # 除了往服务器中提交数据之外,也是加密文件用到的的 key self.should_encrypt_file = should_encrypt_file self.should_ignore_current_file = False self._raw_content = raw_content self._raw_byte_content = smart_str(raw_content or '') self.version = version self.utc_offset = utc_offset
def __str__(self): return smart_str(self.default_value)