コード例 #1
0
ファイル: views.py プロジェクト: Hans210302/web1
def openpic(request):
    from qiniu import Auth, put_file, etag
    import qiniu.config
    from PIL import Image
    from aip import AipImageCensor
    a = request.FILES.get("imageData")  #得到图片
    b = random.randint(111111, 999999)
    key = 's2' + str(b) + '.jpg'
    Image = Image.open(a)
    Image.save('s1.jpg')  #保存本地
    APP_ID = '10027473'
    API_KEY = 'vuVlmlKsXULFfo438jiWfxb0'
    SECRET_KEY = '33Iew8K4zT3hBBI1YqGPaK9vcHD5dAxG'
    client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)

    def get_file_content(filePath):
        with open(filePath, 'rb') as fp:
            return fp.read()

    img = get_file_content('s1.jpg')
    result = client.imageCensorUserDefined(img)
    if result['conclusion'] == '合规':
        # path = default_storage.save('s1.jpg', ContentFile(Image.read()))

        access_key = 'EjIqvG2iluFxOx-PzeIyCNUKqUL2je9Q5bfunOyg'
        secret_key = 'pqt97zqOt7cw07a9AWwEXPp0zofB_9swvTVJGOOr'
        q = Auth(access_key, secret_key)
        bucket_name = 'xunyingpicture'
        localfile = r"s1.jpg"
        token = q.upload_token(bucket_name, key)
        ret, info = put_file(token, key, localfile)
        #返回图片地址
        return HttpResponse('http://files.g8.xmgc360.com/' + key)
    else:
        return HttpResponse("1")
コード例 #2
0
ファイル: test.py プロジェクト: SchroDr/WhistleWind
def AipImageCensoR(filePath):
    client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)
    result = client.imageCensorUserDefined(
        get_file_content(filePath))
    # result = client.imageCensorUserDefined('http://www.example.com/image.jpg')
    #print(result)
    return result['conclusionType']
コード例 #3
0
def bad_words_filter(img_name):

    base_name = img_name.split('\\')[-1]
    text = ' '
    with open('data/results/' + 'res_{}.txt'.format(base_name.split('.')[0]),
              'r') as f:
        # text = f.read().split('\n')
        text = ' '
        # for i in text:
        #     temp += i
        lines = f.readlines()
        for line in lines:
            str_arr = line.strip().split(',')
            if len(str_arr) > 4:
                for word in str_arr[4:]:
                    text += (word + ',')

    with open('data/results/' + 'res_{}.txt'.format(base_name.split('.')[0]),
              'a') as f:
        client = AipImageCensor(appId='18097522',
                                apiKey='bgeRXmpblbOMocErTxvMCiOF',
                                secretKey='SyRmwoFGsKXqC3LvAAYwlDlf1BckBOwG')
        if client.antiSpam(text)['result']['spam'] == 0:
            f.write('无敏感信息')
        else:
            f.write('待检测的文本里面含有侮辱、色情、暴力和政治敏感词汇。\n')
            for i in client.antiSpam(text)['result']['reject']:
                if (len(i['hit']) != 0):
                    f.write(str(i['hit']))
コード例 #4
0
ファイル: Util.py プロジェクト: wanghaoyu33437/ICare
 def __init__(self):
     # 测试数据
     test_case = [np.zeros(10)]
     print('**** Load dict ****')
     f = open(config._URL['URL_DICT_PATH'], encoding='utf8')
     char = f.read()
     url_dict = eval(char)
     f = open(config._DGA['DGA_DICT_PATH'], encoding='utf8')
     char = f.read()
     dga_dict = eval(char)
     f.close()
     self.DGA_dict = dga_dict
     self.Url_dict = url_dict
     # 文本检测返回值
     self.URL_labels = ['暴恐违禁', '文本色情', '政治敏感', '恶意推广', '低俗辱骂', '低质灌水']
     print('**** Load model ****')
     ''' 创建加载模型的所需变量'''
     self.graph1 = tf.Graph()
     self.sess1 = tf.Session(graph=self.graph1)
     self.graph2 = tf.Graph()
     self.sess2 = tf.Session(graph=self.graph2)
     '''保证模型加载在一个图中,以便再使用模型时不为空'''
     with self.sess1.as_default():
         with self.graph1.as_default():
             self.DGAModel = tf.keras.models.load_model(
                 config._DGA['DGA_MODEL_PATH'])
             # 初始化的时候要进行一次模拟测试,防止layer找不到
             test_case = tf.keras.preprocessing.sequence.pad_sequences(
                 test_case,
                 maxlen=self.DGA_dict['maxlen'],
             )
             self.DGAModel.predict(test_case, verbose=0)
             print('DGA模型初始化成功')
     keras.backend.clear_session()
     with self.sess2.as_default():
         with self.graph2.as_default():
             self.UrlModel = keras.models.load_model(
                 config._URL['URL_MODEL_PATH'])
             test_case = keras.preprocessing.sequence.pad_sequences(
                 test_case,
                 maxlen=self.Url_dict['maxlen'],
             )
             self.UrlModel.predict(test_case, verbose=0)
             print('URL模型初始化成功')
     self.iface = config.IFACE
     self.DGA_Flag = 1
     self.URL_Flag = 1
     self.mydb = connect(host='localhost',
                         user='******',
                         passwd='123456',
                         database='dgamonitoring')
     print('**** Connect mysql ****')
     self.Mycursor = self.mydb.cursor()
     self.mysqlPool = self.MysqlPool()
     print('**** Connect success ****')
     self.client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)
     pass
コード例 #5
0
ファイル: utils.py プロジェクト: wemecan/DjangoForum
def contentany(content):
    client = AipImageCensor('百度ai自己申请', '百度ai自己申请', '百度ai自己申请')
    result = client.textCensorUserDefined(content)
    result = json.loads(str(result).replace("'", '"'))
    if "error_code" in result:
        return result["error_msg"]
    if result["conclusionType"] != 1 \
            and result["conclusionType"] != 3:
        return result["data"][0]["msg"]
    return False
コード例 #6
0
def test_article_filter():
    try:
        res = request.get_json()
        article_id = res.get('article_id')
        article_images = res.get('article_images')

        Logging.logger.info('request_args:{0}'.format(res))
        if not article_id:
            return jsonify(errno=-1, errmsg='参数错误,请传入要查询的文章的article_id')

        article = Article.query.get(article_id)
        if not article:
            return jsonify(errno=-1, errmsg='参数错误,该文章不存在')

        docs = mongo_store.articles.find({'title': article.title})
        doc = docs[0]
        article_dict = dict()
        content = doc.get('content')
        title = article.title.encode("utf-8")
        article_dict['title'] = title
        article_dict['content'] = content

        obj = SensitiveFilter()
        str11 = ''.join(
            [item.get('text') for item in content if item.get('text')])
        text = {'content': str11}
        txt_data = obj.content_check(text)
        if txt_data.get('errcode') == 40001:
            redis_store.delete('access_token')
            txt_data = obj.content_check(text)
        Logging.logger.info('res_data:{0}'.format(txt_data))

        APP_ID = '15791531'
        API_KEY = 'kajyVlP73XtSGBgoXDIHH5Za'
        SECRET_KEY = 'u2TClEW6LaHIIpRNdFcL2HIexcgG1ovC'

        client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)
        txt_resp = client.antiSpam(str11)
        Logging.logger.info('txt_resp:{0}'.format(txt_resp))

        for img in article_images:
            img_resp = client.imageCensorUserDefined(img)
            print(img_resp)
            Logging.logger.info('img_resp:{0}'.format(img_resp))
            # img_data = obj.img_check(img)
            # print img_data
        return jsonify(errno=0, errmsg="OK", data=txt_data)
    except Exception as e:
        Logging.logger.error('errmsg:{0}'.format(e))
        return jsonify(errno=-1, errmsg='文章详情查询失败')
コード例 #7
0
    def __init__(self,
                 credential,
                 bounds=(0, 255),
                 channel_axis=3,
                 preprocessing=(0, 1)):

        from aip import AipImageCensor
        super(AipAntiPornModel, self).__init__(credential=credential,
                                               bounds=bounds,
                                               channel_axis=channel_axis,
                                               preprocessing=preprocessing)

        self._task = 'cls'
        self.model = AipImageCensor(self._appId, self._apiKey, self._secretKey)
コード例 #8
0
def picture_review(content):
    pic = re.findall(r"<img src=(.*?)data", content)
    h = 0
    l = len(pic)
    client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)
    if l > 0:
        for i in range(l):
            url = pic[0][1:-2]
            result = client.imageCensorUserDefined(url)
            print(result)
            if len(result['conclusion']) == 2:
                h += 1
        print("图片通过:%d") % h
        print("图片违规:%d") % (l - h)
    else:
        print("没有图片")
コード例 #9
0
class AipAntiPornModel(AipModel):
    """Create a :class:`Model` instance from an `AipAntiPorn` model.

    Parameters
    ----------
    credential : tuple
        Tuple of (appId, apiKey, secretKey) for using AIP API.
    bounds : tuple
        Tuple of lower and upper bound for the pixel values, usually
        (0, 1) or (0, 255).
    channel_axis : int
        The index of the axis that represents color channels.
    preprocessing: 2-element tuple with floats or numpy arrays
        Elementwises preprocessing of input; we first substract the first
        element of preprocessing from the input and then divide the input
        by the second element.

    """
    def __init__(self,
                 credential,
                 bounds=(0, 255),
                 channel_axis=3,
                 preprocessing=(0, 1)):

        from aip import AipImageCensor
        super(AipAntiPornModel, self).__init__(credential=credential,
                                               bounds=bounds,
                                               channel_axis=channel_axis,
                                               preprocessing=preprocessing)

        self._task = 'cls'
        self.model = AipImageCensor(self._appId, self._apiKey, self._secretKey)

    def predictions(self, image):
        """Get prediction for input image

        Parameters
        ----------
        image : `numpy.ndarray`
            The input image in [h, n, c] ndarry format.

        Returns
        -------
        list
            List of anitporn prediction resutls.
            Each element is a dictionary containing:
            {'class_name', 'probability'}

        """

        image_bytes = ndarray_to_bytes(image)
        predictions = self.model.antiPorn(image_bytes)
        if 'result' in predictions:
            return predictions['result']
        return predictions

    def model_task(self):
        """Get the task that the model is used for."""
        return self._task
コード例 #10
0
ファイル: function.py プロジェクト: Leoleocheng/MEETJINN
def baiduimg(s):
    """ 你的 APPID AK SK """
    APP_ID = '14440883'
    API_KEY = 'kT8o0IxopsPwp8dswWCV54ww'
    SECRET_KEY = '8jPrgXlKj7c2mvDtkeqcTQe83RhdKLzV'
    client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)

    def get_file_content(filePath):
        with open(filePath, 'rb') as fp:
            return fp.read()

    result = client.imageCensorUserDefined(get_file_content(r's1.' + s))
    print(result)
    if str(result['conclusionType']) == '1':
        return ('1')
    else:
        return (result['data'][0]['msg'])
コード例 #11
0
ファイル: myocr.py プロジェクト: onlinesen/rct
 def repic():
     from aip import AipImageCensor
     APP_ID = '10805647'
     API_KEY = '4BORuFSWdXtODzh8gjFVUzKB'
     SECRET_KEY = 'uZG60psAKFxRYZuqtQdTbree4ilaaPbB'
     client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)
     with open(os.getcwd() + "/tmp.png", 'rb') as fp:
         options = {
             'detect_direction': 'true',
             'language_type': 'CHN_ENG',
         }
         result = client.imageCensorComb(fp.read(), [
             'clarity',
             'antiporn',
         ])
     rrresult = json.dumps(result).decode("unicode-escape")
     print rrresult
コード例 #12
0
    def identity_picture(self, file_name, flag):
        """

        :param file_name: 一个网络图片的url, 或者一个本地图片的地址
        :param flag: 传送指定的参数‘url’ 或者  ‘local’
        :return:
        """
        s_client = AipImageCensor(self.APP_ID, self.API_KEY, self.SECRET_KEY)
        if flag == 'local':
            with open(file_name, 'rb') as f:
                try:
                    res = s_client.imageCensorUserDefined(f.read())
                    print(res)
                except TypeError:
                    raise print("类型错误")

        elif flag == 'url':
            res = s_client.imageCensorUserDefined(file_name)
            print(res)
コード例 #13
0
class TextReview():
    def __init__(self):
        self.client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)

    def recognize(self, text):
        ans = self.client.antiSpam(text)
        if ('error_msg' in ans.keys()):
            print(ans['error_msg'])
            return 0
        ans = ans['result']
        if (ans['reject'] == [] and ans['review'] == []):  #pass
            return 0
        return 1
        '''
コード例 #14
0
class BaiduAntiSpam(object):
    def __init__(self, api):
        """
        :param api: ['15052846', 'SiU9AAGaZn2Zja7d8iSVqce5', 'P6NZ07ROvKTXFnSlDMmH4hf1smOxbfAA']
        """
        self.api = api
        self.client = AipImageCensor(*self.api)

    @retry(tries=3, delay=2)
    def anti_spam(self, text):
        _ = self.client.antiSpam(text)
        _['log_id'] = text
        return _

    def get_result(self, file, corpus):
        with open(file, 'a') as f:
            for s in tqdm_notebook(corpus):
                res = self.anti_spam(s)
                f.writelines("%s\n" % res)
コード例 #15
0
ファイル: Util.py プロジェクト: wanghaoyu33437/ICare
class Util:
    class MysqlPool(object):
        """
        Mysql连接池
        """
        def __init__(self,
                     host="127.0.0.1",
                     port="3306",
                     user="******",
                     password="******",
                     database="dgamonitoring",
                     pool_name="mypool",
                     pool_size=10):
            res = {}
            self._host = host
            self._port = port
            self._user = user
            self._password = password
            self._database = database
            res["host"] = self._host
            res["port"] = self._port
            res["user"] = self._user
            res["password"] = self._password
            res["database"] = self._database
            self.dbconfig = res
            self.pool = self.create_pool(pool_name=pool_name,
                                         pool_size=pool_size)

        def create_pool(self, pool_name="mypool", pool_size=10):
            pool = pooling.MySQLConnectionPool(pool_name=pool_name,
                                               pool_size=pool_size,
                                               pool_reset_session=True,
                                               **self.dbconfig)
            return pool

        def close(self, conn, cursor):
            cursor.close()
            conn.close()

        def execute(self, sql, args=None, commit=False):
            """
            执行函数
            args支持(1,2,3,)形式
            DQL语句不用设置commit参数
            操作语句需要设置
          """
            # get connection form connection pool instead of create one.
            conn = self.pool.get_connection()
            cursor = conn.cursor()
            if args:
                cursor.execute(sql, args)
            else:
                cursor.execute(sql)
            if commit is True:
                conn.commit()
                self.close(conn, cursor)
                return None
            else:
                res = cursor.fetchall()
                self.close(conn, cursor)
                return res

        def executemany(self, sql, args, commit=False):

            conn = self.pool.get_connection()
            cursor = conn.cursor()
            cursor.executemany(sql, args)
            if commit is True:
                conn.commit()
                self.close(conn, cursor)
                return None
            else:
                res = cursor.fetchall()
                self.close(conn, cursor)
                return res

    def __init__(self):
        # 测试数据
        test_case = [np.zeros(10)]
        print('**** Load dict ****')
        f = open(config._URL['URL_DICT_PATH'], encoding='utf8')
        char = f.read()
        url_dict = eval(char)
        f = open(config._DGA['DGA_DICT_PATH'], encoding='utf8')
        char = f.read()
        dga_dict = eval(char)
        f.close()
        self.DGA_dict = dga_dict
        self.Url_dict = url_dict
        # 文本检测返回值
        self.URL_labels = ['暴恐违禁', '文本色情', '政治敏感', '恶意推广', '低俗辱骂', '低质灌水']
        print('**** Load model ****')
        ''' 创建加载模型的所需变量'''
        self.graph1 = tf.Graph()
        self.sess1 = tf.Session(graph=self.graph1)
        self.graph2 = tf.Graph()
        self.sess2 = tf.Session(graph=self.graph2)
        '''保证模型加载在一个图中,以便再使用模型时不为空'''
        with self.sess1.as_default():
            with self.graph1.as_default():
                self.DGAModel = tf.keras.models.load_model(
                    config._DGA['DGA_MODEL_PATH'])
                # 初始化的时候要进行一次模拟测试,防止layer找不到
                test_case = tf.keras.preprocessing.sequence.pad_sequences(
                    test_case,
                    maxlen=self.DGA_dict['maxlen'],
                )
                self.DGAModel.predict(test_case, verbose=0)
                print('DGA模型初始化成功')
        keras.backend.clear_session()
        with self.sess2.as_default():
            with self.graph2.as_default():
                self.UrlModel = keras.models.load_model(
                    config._URL['URL_MODEL_PATH'])
                test_case = keras.preprocessing.sequence.pad_sequences(
                    test_case,
                    maxlen=self.Url_dict['maxlen'],
                )
                self.UrlModel.predict(test_case, verbose=0)
                print('URL模型初始化成功')
        self.iface = config.IFACE
        self.DGA_Flag = 1
        self.URL_Flag = 1
        self.mydb = connect(host='localhost',
                            user='******',
                            passwd='123456',
                            database='dgamonitoring')
        print('**** Connect mysql ****')
        self.Mycursor = self.mydb.cursor()
        self.mysqlPool = self.MysqlPool()
        print('**** Connect success ****')
        self.client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)
        pass

    def Sniff_DGA(self):
        # 清空数据
        # sql = 'delete from dga_response;'
        # self.Mycursor.execute(sql)
        # self.mydb.commit()
        # self.mysqlPool.execute(sql,commit=True)
        print('**** Start Monitoring traffic ****')
        # 进程循环监测
        self.DGA_Flag = 1
        while self.DGA_Flag:
            sniff(prn=self.callback_DGA,
                  iface=self.iface,
                  filter='udp port 53',
                  count=2)
        self.DGA_Flag = 1

        # sniff(prn=capture,iface='Realtek PCIe GBE Family Controller',filter='udp port 53',count=20)
    def callback_DGA(self, packet):
        if packet:
            # print("抓包:",packet)
            i = 0
            for p in packet:
                # 有的没有IP 只有IPV9
                # print(p[IP].src)
                # 查询/响应标志,0为查询,1为响应
                qr = str(p[i][DNS].qr)
                src = p[i][IP].src
                dst = p[i][IP].dst
                # 表示返回码,0表示没有差错,3表示名字差错,2表示服务器错误(Server Failure)
                rcode = str(p[i][DNS].rcode)
                if '0' in qr:
                    qr = 'Query'
                    # 域名
                    qname = p[i][DNS].qd.qname
                    if type(qname) == bytes:
                        qname = (qname.decode('utf-8'))[:-1]
                    domainArray = qname.split('.')[:-1]
                    domain = [[self.DGA_dict[x] for x in y]
                              for y in domainArray if len(y) > 1]
                    domain = tf.keras.preprocessing.sequence.pad_sequences(
                        domain, maxlen=self.DGA_dict['maxlen'])
                    with self.sess1.as_default():
                        with self.graph1.as_default():
                            pre = np.max(self.DGAModel.predict(domain))
                    # dga_request=Request(domain=domain,pre=float(pre))
                    # dga_request.save()
                    oldQname = ''
                    sql = "insert into dga_flow(src,dst,domain,type,prediction) values(%s,%s,%s,%s,%s)"
                    val = (src, dst, qname, 'request', float(pre))
                    self.mysqlPool.execute(sql, val, commit=True)
                    # self.Mycursor.execute(sql, val)
                    # self.mydb.commit()
                    print("Found DGA Request:-->", qname, "--- Pre :", pre)
                if '1' in qr:
                    if '0' in rcode:
                        for j in range(10):
                            try:
                                rrname = p[j][DNS].an[j].rrname
                                rdata = p[j][DNS].an[j].rdata
                                if type(rrname) == bytes:
                                    rrname = (rrname.decode('utf-8'))[:-1]
                                if type(rdata) == bytes:
                                    rdata = (rdata.decode('utf-8'))[:-1]
                                    # print("数据"+rdata)
                                domainArray = rrname.split('.')[:-1]
                                domain = [[self.DGA_dict[x] for x in y]
                                          for y in domainArray if len(y) > 1]
                                domain = tf.keras.preprocessing.sequence.pad_sequences(
                                    domain, maxlen=self.DGA_dict['maxlen'])
                                with self.sess1.as_default():
                                    with self.graph1.as_default():
                                        pre = np.max(
                                            self.DGAModel.predict(domain))
                                oldRname = ''
                                sql = "insert into dga_flow(src,dst,domain,type,prediction) values(%s,%s,%s,%s,%s)"
                                val = (src, dst, rrname, 'response',
                                       float(pre))
                                self.mysqlPool.execute(sql, val, commit=True)
                                print("Found DGA Response-->", rrname,
                                      "---Pre :", pre)
                            except Exception as e:
                                pass
                i = i + 1

    '''  
    监测URL 
    '''

    def Sniff_URL(self):
        # sql = 'delete from URl_response'
        # sql1 = 'delete from URl_request'
        # self.Mycursor.execute(sql)
        # self.Mycursor.execute(sql1)
        # self.mydb.commit()
        print('**** StartMonitoring traffic ****')
        self.URL_Flag = 1
        # 线程开启监测tcp 80端口
        while self.URL_Flag:
            sniff(prn=self.callBack_URL,
                  iface=self.iface,
                  filter='tcp',
                  count=5)
        # sniff(prn=capture,iface='Realtek PCIe GBE Family Controller',filter='udp port 53',count=20)
        self.URL_Flag = 1
        print("****** 监测结束 *****")
        pass

    def callBack_URL(self, packet):
        if packet:
            i = 0
            src = '0.0.0.0'
            dst = '0.0.0.0'
            if (packet.haslayer(IP)):
                src = packet[IP].src
                dst = packet[IP].dst
            try:
                if packet.haslayer('HTTP'):
                    p = packet["HTTP"]
                    try:
                        if p.haslayer('HTTPRequest'):
                            a = p["HTTPRequest"]
                            method = bytes.decode(a.Method)
                            if (a.Host != None):
                                Url = 'http://' + bytes.decode(
                                    a.Host) + bytes.decode(a.Path)
                                url = [[self.Url_dict[x] for x in Url]]
                                url = keras.preprocessing.sequence.pad_sequences(
                                    url, maxlen=self.Url_dict['maxlen'])
                                with self.sess2.as_default():
                                    with self.graph2.as_default():
                                        pre = np.max(
                                            self.UrlModel.predict(url))
                                if (pre >= 0.7):
                                    '''当url恶意率超过0.7,进行该网页爬取分析'''
                                    t = threading.Thread(
                                        target=self.AnalysisUrl(Url))
                                    t.start()
                                print("Request url is :", Url, "pre :", pre)
                                sql = "insert into url_flow(src,dst,url,pre) values(%s,%s,%s,%s)"
                                val = (src, dst, Url, float(pre))
                                self.mysqlPool.execute(sql, val, commit=True)
                    except IndexError:
                        pass
                    try:
                        a = p["HTTPResponse"]
                        print('响应Url:', a.Location)
                    except IndexError:
                        pass
                    try:
                        a = p["Raw"]
                        try:
                            b = p["HTTPRequest"]
                            print("请求数据:", a.load)
                        except IndexError:
                            pass
                        try:
                            b = p["HTTPResponse"]
                            print('响应数据', a.load)
                        except IndexError:
                            pass
                        '''
                        对数据解析
                        也可将数据包保存下来
                        '''
                    except IndexError:
                        pass
            except IndexError:
                pass

    def getBaiduTextDivideRes(self, text):
        url = 'https://aip.baidubce.com/rest/2.0/antispam/v2/spam?access_token=24.d6402afd2e5dc564214b9a69a68d48fd.2592000.1571732535.282335-17305394'
        data = urllib.parse.urlencode({
            'content': text
        }).encode(encoding='utf8')
        request = urllib.request.Request(url, data=data)
        # 设置请求头
        request.add_header('Content-Type', 'application/x-www-form-urlencoded')
        response = urllib.request.urlopen(request)
        res = response.read()
        j = json.loads(res.decode('utf8'))
        review = j.get('result').get('review')
        reject = j.get('result').get('reject')
        print(review, reject)
        return review, reject

    def AnalysisUrl(self, url):
        req = urllib.request.Request(url)
        try:
            res = urllib.request.urlopen(req, timeout=5)
            result = res.read()
            html = BeautifulSoup(result.decode('utf8'), 'lxml')
            # 正则过滤
            text = html.get_text(strip=True)
            if len(text) > 5000:
                limit = 0
                while 1:
                    # 每次查5000字
                    if len(text[limit:limit + 5000]) == 0:
                        break
                    res = self.client.textCensorUserDefined(
                        (text[limit:limit + 5000]))
                    if res['conclusion'] != '合规':
                        result_list = list()
                        try:
                            datas = res['data']
                            for data in datas:
                                if (data['msg'] != '存在百度官方默认违禁词库不合规'):
                                    result_list.append(
                                        data['msg'].strip("存在").strip("不合规"))
                        except Exception as e:
                            print(e)
                    limit += 5000
            else:
                res = self.client.textCensorUserDefined((text))
                if res['conclusion'] != '合规':
                    result_list = list()
                    try:
                        datas = res['data']
                        for data in datas:
                            if (data['msg'] != '存在百度官方默认违禁词库不合规'):
                                result_list.append(
                                    data['msg'].strip("存在").strip("不合规"))
                    except Exception as e:
                        print(e)
            sql = "update url_flow set status=%s where url= %s"
            val = (str(result_list), url)
            self.mysqlPool.execute(sql, val, commit=True)
        except urllib.error.URLError:
            print('网页不可访问')
コード例 #16
0
ファイル: tp_check.py プロジェクト: hexiaowanmei/meets
from aip import AipImageCensor


# 图片审核
filePath = '../media/upload/aa.jpg'


def get_picture(filePath):
    with open(filePath, 'rb') as f:
        return f.read()


APP_ID = '16295532'
API_KEY = 'gWSlD06wqf8yADLNw1PSdIhI'
SECRET_KEY = 'pqxslzYtZvoDrpEN3qtebSexw8vqDlR7'


client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)


result = client.imageCensorUserDefined(get_picture(filePath))
print(result)

if __name__ == '__main__':
    get_picture(filePath)


コード例 #17
0
 def get_check_json(cls, content):
     client = AipImageCensor(cls.APP_ID, cls.API_KEY, cls.SECRET_KEY)
     return client.antiSpam(content=content)
コード例 #18
0
ファイル: urls.py プロジェクト: slxsxy/csgo_full
                password=GlobalVar.get_value('g_redis_password')))

# https://www.vaptcha.com
GlobalVar.set_value('g_vaptcha_id', '0')
GlobalVar.set_value('g_vaptcha_secretkey', '0')
websocket_clients = {}
GlobalVar.set_value('g_websocket_clients', websocket_clients)
# https://console.bce.baidu.com/

GlobalVar.set_value('g_baidu_APP_ID', '0')
GlobalVar.set_value('g_baidu_API_KEY', '0')
GlobalVar.set_value('g_baidu_APP_SECKEY', '0')
GlobalVar.set_value(
    'g_baidu_APP',
    AipImageCensor(GlobalVar.get_value('g_baidu_APP_ID'),
                   GlobalVar.get_value('g_baidu_API_KEY'),
                   GlobalVar.get_value('g_baidu_APP_SECKEY')))
websocket_urlpatterns = [
    path('websocket/room/', web_socket.websocket_main),
]
application = ProtocolTypeRouter({
    'websocket':
    AuthMiddlewareStack(URLRouter(websocket_urlpatterns)),
})
urlpatterns = [
    path('favicon.ico', serve, {'path': 'images/favicon.ico'}),
    path('ha4k1r_admin/', view.admin, name='admin_index'),
    path('bind_steam/', view.bind_steam, name='bind_steam'),
    re_path(r'^$', view.index, name='index'),
    re_path(r'^bind_steamid_process/(?P<key>\w+)/$',
            view.steam_login,
コード例 #19
0
 def jh(self, file_name):
     client = AipImageCensor(self.W_APP_ID, self.API_KEY, self.SECRET_KEY)
     res = client.imageCensorUserDefined(file_name)
     print(res)
コード例 #20
0
def initspam():
    global client1
    APP_ID = '14422159'
    API_KEY = 'psxn324sUoqFaNMjyob3FaqN'
    SECRET_KEY = 'jakFwZEk63dTPr52ibznaBXoI0To2GEs'
    client1 = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)
コード例 #21
0
ファイル: CheckSetu.py プロジェクト: Heptazhou/liqibot2
def Check_Baidu():
    imgContent, imgType = downloadImg(imgurl)
    if len(imgContent) < 5e3 or len(imgContent) > 4e6:
        return
    if imgType not in ['jpg', 'jpeg', 'png']:
        return

    from aip import AipImageCensor

    censor_APP_ID = '15708523'
    censor_API_KEY = '6USQY453ZVSjxYYej1F195IZ'
    censor_SECRET_KEY = 'r0rtrpRj8eHRnkPCfEhjOPRh2eO997Uv'

    #另一个百度账号
    if random.random() < 0:
        censor_APP_ID = '22908418'
        censor_API_KEY = 'Fz2zsXkSFmdH4BgcSwvGLeNP'
        censor_SECRET_KEY = 'VSbOwvqUNzG1cDW89O9iTnnweLvLyGGM'

    #classify_APP_ID = '17981247'
    #classify_API_KEY = '3HuleW8fwIPymQcRM1DNhigp'
    #classify_SECRET_KEY = 'LcClAOmKwGSIXR2st8ishMXUPXkiLaaI'

    censor_client = AipImageCensor(censor_APP_ID, censor_API_KEY,
                                   censor_SECRET_KEY)
    censor_result = censor_client.antiPorn(imgContent)

    #print(censor_result)
    if 'result_fine' in censor_result:
        for each in censor_result['result_fine']:
            #print('type', each['type'], 'prob', each['probability'])
            if each['class_name'] == '一般色情' and each['probability'] > 0.9:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '卡通色情' and each['probability'] > 0.51:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == 'SM' and each['probability'] > 0.65:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '艺术品色情' and each['probability'] > 0.9:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '儿童裸露' and each['probability'] > 0.9:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '低俗' and each['probability'] > 0.95:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '性玩具' and each['probability'] > 0.7:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '女性性感' and each['probability'] > 0.85:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '卡通女性性感' and each['probability'] > 0.51:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '男性性感' and each['probability'] > 0.95:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '自然男性裸露' and each['probability'] > 0.95:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '亲密行为' and each['probability'] > 0.90:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '卡通亲密行为' and each['probability'] > 0.65:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '特殊类' and each['probability'] > 0.95:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '一般正常' and each['probability'] > 1.0:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '卡通正常' and each['probability'] > 1.0:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '臀部特写' and each['probability'] > 0.85:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '裆部特写' and each['probability'] > 0.75:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '脚部特写' and each['probability'] > 0.85:
                print('色图!', end='')
                saveImg(imgurl)
                break
            elif each['class_name'] == '孕肚裸露' and each['probability'] > 0.99:
                print('色图!', end='')
                saveImg(imgurl)
                break
コード例 #22
0
from aip import AipImageCensor
""" 你的 APPID AK SK """
APP_ID = '10699663'
API_KEY = 'h6laYDwumS3UzwiCCjYGQPkP'
SECRET_KEY = 'd9GFQc6ntq0pBGHRv6YrFxxfEfDYq3QH'

client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)


def get_file_content(filePath):
    with open(filePath, 'rb') as fp:
        return fp.read()


img = get_file_content('cb.jpg')
result = client.imageCensorUserDefined(img)
print(result)
# if isinstance(result, dict):
#     if result.has_key('conclusion'):
#         if result['conclusion'] != u'合规':
#             for data in result['data']:
#                 print (data['msg'])
コード例 #23
0
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from . import models
import json

import random
from aip import AipImageCensor

APP_ID = '20127419'
API_KEY = 'HnCMlNQBzVsQr4z3S57vClIK'
SECRET_KEY = 'hqpehal5EiZtqs3bjCgoroLpRF9PZwLW'
client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)


def check(str):
    result = client.textCensorUserDefined(str)
    if result.get('conclusion') == '不合规':
        return True
    return False


# 添加评论
# todo: 自动审核
def addComment(request):
    comment = models.Comment()
    comment.Department = request.GET['Department']
    comment.Grade = request.GET['Grade']
    comment.Identity = request.GET['Identity']
    comment.Name = request.GET['Name']
    comment.Post = request.GET['Post']
    comment.Phone = request.GET['Phone']
コード例 #24
0
from aip import AipImageCensor

# """ 你的 APPID AK SK """
# APP_ID = '17899842'
# API_KEY = 'VM5S1ouZgwmk7Icc8x2l75pK'
# SECRET_KEY = '9Cvxl1qB4zGuidQod9Sw6zadQHcnIQZz'

""" API """
APP_ID = '19891414'  # 你的appid
API_KEY = 'usNMOs7sdzwep3QOgY4xoFV2'  # 你的apikey
SECRET_KEY = 'gw5vs1lKOLRGE6MstE8X8zbmxsXVBWIg'  # 你的secretkey


client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)
""" 读取图片 """
def get_file_content(filePath):
    with open(filePath, 'rb') as fp:
        return fp.read()

""" 调用色情识别接口 """
result = client.imageCensorUserDefined(get_file_content('sex3.jpg'))
""" 如果图片是url调用如下 """
#result = client.imageCensorUserDefined('https://imgsa.baidu.com/forum/w%3D580/sign=559cf2cad93f8794d3ff4826e21a0ead/f3bc0ddda3cc7cd94ac925ab3401213fb90e91eb.jpg')
print(result)

#官方测试:
コード例 #25
0
from aip import AipImageCensor
import passwd

APP_ID = passwd.sex.APP_ID  # 你的app id , 此处被隐藏,放在pass wd.py文件内,请自行申请
API_KEY = passwd.sex.API_KEY  # 你的api key
SECRET_KEY = passwd.sex.SECRET_KEY  # 你的secret key

client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)


# 读取图片
def get_file_content(file_path):
    with open(file_path, 'rb') as fp:
        return fp.read()


# 调用色情识别接口
# result = client.imageCensorUserDefined(get_file_content('sex1.png'))
# result = client.imageCensorUserDefined(get_file_content('sex2.jpg'))
# result = client.imageCensorUserDefined(get_file_content('sex3.jpg'))
# 如果图片是url调用如下
result = client.imageCensorUserDefined(
    'https://imgsa.baidu.com/forum/w%3D580/sign=559cf2cad93f8794d3ff4826e21a0ead/f3bc0ddda3cc7cd94ac925ab3401213fb90e91eb.jpg'
)
print(result)

# 官方测试 :https://ai.baidu.com/tech/imagecensoring
コード例 #26
0
ファイル: wz_check.py プロジェクト: hexiaowanmei/meets
	def check_Image(self, filepath):
		Image_client = AipImageCensor(self.APP_ID, self.API_KEY, self.SECRET_KEY)
		result = Image_client.imageCensorUserDefined(self.get_file_content(filepath))
		return result
コード例 #27
0
from aip import AipImageCensor
""" 你的 APPID AK SK """
APP_ID = '11156578'
API_KEY = '3K73kH6H4aGoZbUrE1N0oTO5'
SECRET_KEY = 'YoL5g6BCnWG4mQvEo0TjyDPozlySdDRp'

client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)
""" 读取图片 """


def get_file_content(filePath):
    with open(filePath, 'rb') as fp:
        return fp.read()


""" 调用色情识别接口 """
result = client.imageCensorUserDefined(get_file_content('c:/code/11.jpg'))
result2 = client.imageCensorUserDefined(get_file_content('c:/code/22.jpg'))
print(result)
print(result2)
""" 如果图片是url调用如下 """
result3 = client.imageCensorUserDefined('http://www.example.com/image.jpg')
コード例 #28
0
from aip import AipImageCensor

""" 你的 APPID AK SK """
APP_ID = '11156578'
API_KEY = '3K73kH6H4aGoZbUrE1N0oTO5'
SECRET_KEY = 'YoL5g6BCnWG4mQvEo0TjyDPozlySdDRp'
client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)
result = client.antiSpam('民警提醒说,此类骗局中,通常骗子要求汇款的理由包括“发红包”、“买礼物”、“生病就医”、“凑路费”、“生意需要资金”、“见面需要彩礼”等各种理由。因此,微信交友遇到这样情形要小心防骗')

print(result)
コード例 #29
0
 def __init__(self):
     self.client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)
コード例 #30
0
#
# # """ 调用人体检测与属性识别 """
# clients.bodyAttr(image)
#
# # """ 如果有可选参数 """
# options = {}
# options["age"] = "gender,age,upper_color"
#
# # """ 带参数调用人体检测与属性识别 """
# res = clients.bodyAttr(image, options)
# # print(res)
# c = res['person_info'][0]['attributes']
# # print(c)
# for k,v in c.items():
#     print(k,v['name'])

# 图片鉴黄
s_client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)
""" 读取图片 """


def get_file_content(filePath):
    with open(filePath, 'rb') as fp:
        return fp.read()


""" 调用色情识别接口 """
result = s_client.imageCensorUserDefined(
    get_file_content('images/12345678.jpg'))
print(result)
コード例 #31
0
# -*- coding: utf-8 -*-

# 定义常量
APP_ID = '9839224'
API_KEY = '38aM2cGHnGXgfjwPgNv3hgHN'
SECRET_KEY = 'ze0DckCR2GTpFcz8LX17L61Ec8NV9Bc7'

# 引入AipImageCensor SDK
from aip import AipImageCensor


# 初始化AipImageCensor对象
aipImageCensor = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)

# 读取图片
def get_file_content(filePath):
    with open(filePath, 'rb') as fp:
        return fp.read()

# 调用色情识别接口
result = aipImageCensor.antiPorn(get_file_content('antiporn.jpg'))
print(result)