def txtaudit(cont): from aip import AipContentCensor #可以自行百度ai申请 APP_ID = '23489175' API_KEY = 'ur1buDW12v3KvxUCZoFnWQNm' SECRET_KEY = 'iNIGdhkmlZka7ZgVwoZKOGmkS26umYpA' client = AipContentCensor(APP_ID, API_KEY, SECRET_KEY) result = client.textCensorUserDefined(cont) #print(result) return result['conclusion']
def imgaudit(img): from aip import AipContentCensor #可以自行百度ai申请 APP_ID = '23489175' API_KEY = 'ur1buDW12v3KvxUCZoFnWQNm' SECRET_KEY = 'iNIGdhkmlZka7ZgVwoZKOGmkS26umYpA' client = AipContentCensor(APP_ID, API_KEY, SECRET_KEY) #result = client.textCensorUserDefined("测试文本") #文本审核 imgpath = os.path.join(settings.MEDIA_ROOT, "images", img) with open(imgpath, "rb") as fp: img = fp.read() resultimg = client.imageCensorUserDefined(img) #print(resultimg) return resultimg['conclusion']
class Monitoring(): APP_ID = '16206995' API_KEY = 'D4GGTm9oiDePu3GG9mMYszWu' SECRET_KEY = 'Zz46qd8P1eIdXwksCr3ZSMpILlnPE9EG' def __init__(self): self.client = AipContentCensor(self.APP_ID, self.API_KEY, self.SECRET_KEY) def get_file_content(self, filePath): return open(filePath, 'rb').read() def AipContentCensoR(self, strContent): data = {'text': str(strContent)} url = 'https://aip.baidubce.com/rest/2.0/solution/v1/text_censor/v2/user_defined' res = self.client.post(url=url, data=data) return res['conclusionType'] def AipImageCensoR(self, filePath): result = self.client.imageCensorUserDefined( self.get_file_content(filePath)) return result['conclusionType'] def testUntestedImages(self): images = models.Image.objects.filter(tested=False) for image in images: image.conclusionType = self.AipImageCensoR(image.url) image.tested = True image.save() def testUntestedMessages(self): messages = models.Message.objects.filter(tested=False) for message in messages: message.conclusionType = self.AipImageCensoR(message.content) message.tested = True if message.conclusionType == 2: message.deleted = 1 message.save() def testUntestedComments(self): comments = models.Comment.objects.filter(tested=False) for comment in comments: comment.conclusionType = self.AipImageCensoR(comment.content) comment.tested = True if comment.conclusionType == 2: comment.deleted = 1 comment.save()
def detectPicture(self, file_name): print("Detect Picture") APP_ID = '23065324' API_KEY = 'ZiDIBN37iBIrUm06Llxrpvcj' SECRET_KEY = 'Kw6sQs80MDrBFOrqICghUGG2SIvqywUv' md5 = get_md5_01(file_name) pg.sel_statement(md5) res = pg.get_result_set() before = time.time() if res is not None: result = res else: client = AipContentCensor(APP_ID, API_KEY, SECRET_KEY) result = client.imageCensorUserDefined(get_file_content(file_name)) after = time.time() print(after - before) # self.cnt += 1 # self.pictureAverageTime += after-before print(result) isExists = os.path.exists('./PictureResult') if not isExists: os.makedirs('./PictureResult') os.chdir('./PictureResult') with open('{}_detection_result.json'.format(file_name.split('.')[0]), 'w') as json_file: json_file.write('{\n') json_file.write('"log_id": "{}",\n'.format(result['log_id'])) json_file.write('"md5": "{}",'.format(md5)) if result['conclusion'] == '合规': json_file.write('\n') json_file.write('"conclusion" : "合规"}') else: json_file.write('\n') json_file.write('"conclusion": "{}"\n'.format( result['data'][0]['msg'])) json_file.write('}') os.chdir('..')
def Check_Baidu(imgurl, imgname): imgContent = downloadImg(imgurl) if len(imgContent) < 2e4 or len(imgContent) > 1e7: return censor_APP_ID = '22842022' censor_API_KEY = 'SEBH4QACKkEpGX7NRr7f4tYY' censor_SECRET_KEY = '0oI6FfOHbCuWSFlbgIpnlsBUGkKfOgxt' #classify_APP_ID = '17981247' #classify_API_KEY = '3HuleW8fwIPymQcRM1DNhigp' #classify_SECRET_KEY = 'LcClAOmKwGSIXR2st8ishMXUPXkiLaaI' censor_client = AipContentCensor(censor_APP_ID, censor_API_KEY, censor_SECRET_KEY) censor_result = censor_client.imageCensorUserDefined(imgurl) if 'data' in censor_result: s = '' for each in censor_result['data']: s = s + each['msg'] + str(each['probability']) + ' ' nonebot.log.logger.debug(s) for each in censor_result['data']: #print('type', each['type'], 'prob', each['probability']) if each['msg']=='存在卡通色情不合规' and each['probability']>0.25: nonebot.log.logger.debug('卡通色情%.6f' % each['probability']) saveImg(imgurl, imgname) return 1 elif each['msg']=='存在卡通女性性感不合规' and each['probability']>0.25: nonebot.log.logger.debug('卡通女性性感%.6f' % each['probability']) saveImg(imgurl, imgname) return 1 elif each['msg']=='存在卡通亲密行为不合规' and each['probability']>0.25: nonebot.log.logger.debug('卡通亲密行为%.6f' % each['probability']) saveImg(imgurl, imgname) return 1 return 0
import time,jieba,pymysql import pandas as pd from threading import Timer # 定时函数 from sklearn.feature_extraction.text import CountVectorizer # 词频计算 # 百度文本检测API连接 from aip import AipContentCensor APP_ID = "22982291" API_KEY = "mnnvDxGFRVENkCdANdqYLqVl" SECRET_KEY = "k2GUNHGt8GnbTxemGrtb3RsucBY6wgNs" client = AipContentCensor(APP_ID, API_KEY, SECRET_KEY) # 连接数据库统一接口 def con_sql(): # 连接数据库 config = { "host":"127.0.0.1", "port":3306, "user":"******", "password":'******', "charset":'utf8mb4', "database":"hao1" } conn = pymysql.connect(**config) cursor = conn.cursor() # 执行完毕返回的结果默认以元组显示 return conn,cursor def get_time(): time_str = time.strftime("%Y{}%m{}%d{} %X") return time_str.format("年","月","日")
def AipContentCensoR(strContent): client = AipContentCensor(APP_ID, API_KEY, SECRET_KEY) data = {'text': str(strContent)} url = 'https://aip.baidubce.com/rest/2.0/solution/v1/text_censor/v2/user_defined' res = client.post(url=url, data=data) return res['conclusionType']
def __init__(self): self.client = AipContentCensor(self.APP_ID, self.API_KEY, self.SECRET_KEY)