コード例 #1
0
ファイル: BBScan.py プロジェクト: MythHack/BBScan
def save_report_thread(q_results, file):
        start_time = time.time()
        if args.md:
            a_template = template['markdown']
        else:
            a_template = template['html']

        t_general = Template(a_template['general'])
        t_host = Template(a_template['host'])
        t_list_item = Template(a_template['list_item'])
        output_file_suffix = a_template['suffix']

        all_results = []
        report_name = os.path.basename(file).lower().replace('.txt', '') \
            + '_' + time.strftime('%Y%m%d_%H%M%S', time.localtime()) + output_file_suffix

        global STOP_ME
        try:
            while not STOP_ME:
                if q_results.qsize() == 0:
                    time.sleep(0.1)
                    continue

                html_doc = ""
                while q_results.qsize() > 0:
                    all_results.append(q_results.get())

                for item in all_results:
                    host, results = item
                    _str = ""
                    for key in results.keys():
                        for _ in results[key]:
                            _str += t_list_item.substitute(
                                {'status': _['status'], 'url': _['url'], 'title': _['title']}
                            )
                    _str = t_host.substitute({'host': host, 'list': _str})
                    html_doc += _str

                cost_time = time.time() - start_time
                cost_min = int(cost_time / 60)
                cost_seconds = '%.2f' % (cost_time % 60)
                html_doc = t_general.substitute(
                    {'cost_min': cost_min, 'cost_seconds': cost_seconds, 'content': html_doc}
                )

                with codecs.open('report/%s' % report_name, 'w', encoding='utf-8') as outFile:
                    outFile.write(html_doc)

            if all_results:
                print '[%s] Scan report saved to report/%s' % (get_time(), report_name)
                if args.browser:
                    webbrowser.open_new_tab(os.path.abspath('report/%s' % report_name))
            else:
                lock.acquire()
                print '[%s] No vulnerabilities found on sites in %s.' % (get_time(), file)
                lock.release()

        except Exception, e:
            print '[save_report_thread Exception] %s %s' % (type(e), str(e))
            sys.exit(-1)
コード例 #2
0
def save_report_thread(q_results, file):
        start_time = time.time()
        if args.md:
            a_template = template['markdown']
        else:
            a_template = template['html']

        t_general = Template(a_template['general'])
        t_host = Template(a_template['host'])
        t_list_item = Template(a_template['list_item'])
        output_file_suffix = a_template['suffix']

        all_results = []
        report_name = os.path.basename(file).lower().replace('.txt', '') \
            + '_' + time.strftime('%Y%m%d_%H%M%S', time.localtime()) + output_file_suffix

        global STOP_ME
        try:
            while not STOP_ME:
                if q_results.qsize() == 0:
                    time.sleep(0.1)
                    continue

                html_doc = ""
                while q_results.qsize() > 0:
                    all_results.append(q_results.get())

                for item in all_results:
                    host, results = item
                    _str = ""
                    for key in results.keys():
                        for _ in results[key]:
                            _str += t_list_item.substitute(
                                {'status': _['status'], 'url': _['url'], 'title': _['title']}
                            )
                    _str = t_host.substitute({'host': host, 'list': _str})
                    html_doc += _str

                cost_time = time.time() - start_time
                cost_min = int(cost_time / 60)
                cost_seconds = '%.2f' % (cost_time % 60)
                html_doc = t_general.substitute(
                    {'cost_min': cost_min, 'cost_seconds': cost_seconds, 'content': html_doc}
                )

                with codecs.open('report/%s' % report_name, 'w', encoding='utf-8') as outFile:
                    outFile.write(html_doc)

            if all_results:
                print '[%s] Scan report saved to report/%s' % (get_time(), report_name)
                if args.browser:
                    webbrowser.open_new_tab(os.path.abspath('report/%s' % report_name))
            else:
                lock.acquire()
                print '[%s] No vulnerabilities found on sites in %s.' % (get_time(), file)
                lock.release()

        except Exception, e:
            print '[save_report_thread Exception] %s %s' % (type(e), str(e))
            sys.exit(-1)
コード例 #3
0
ファイル: BBScan.py プロジェクト: c4bbage/BBScan
def save_report_thread(q_results, file):
    start_time = time.time()
    t_html = Template(TEMPLATE_html)
    t_host = Template(TEMPLATE_host)
    t_normal = Template(TEMPLATE_list_item)
    all_results = []
    report_name = os.path.basename(file).lower().replace('.txt', '') + '_' + \
                  time.strftime('%Y%m%d_%H%M%S', time.localtime()) + '.html'

    last_qsize = 0
    global STOP_ME
    try:
        while not (STOP_ME and q_results.qsize() == 0):
            if q_results.qsize() == last_qsize:
                time.sleep(1.0)
                continue
            else:
                last_qsize = q_results.qsize()
            html_doc = ""
            while q_results.qsize() > 0:
                all_results.append(q_results.get())
            for item in all_results:
                host, results = item
                _str = ""
                for key in results.keys():
                    for _ in results[key]:
                        _str += t_normal.substitute({
                            'status': _['status'],
                            'url': _['url'],
                            'title': _['title']
                        })
                _str = t_host.substitute({'host': host, 'list': _str})
                html_doc += _str

            if all_results:
                cost_time = time.time() - start_time
                cost_min = int(cost_time / 60)
                cost_seconds = '%.2f' % (cost_time % 60)
                html_doc = t_html.substitute({
                    'cost_min': cost_min,
                    'cost_seconds': cost_seconds,
                    'content': html_doc
                })

                with open('report/%s' % report_name, 'w') as outFile:
                    outFile.write(html_doc)

        if all_results:
            print '[%s] Scan report saved to report/%s' % (get_time(),
                                                           report_name)
            if args.browser:
                webbrowser.open_new_tab(
                    os.path.abspath('report/%s' % report_name))
        else:
            lock.acquire()
            print '[%s] No vulnerabilities found on sites in %s.' % (
                get_time(), file)
            lock.release()
    except IOError, e:
        sys.exit(-1)
コード例 #4
0
ファイル: BBScan.py プロジェクト: uppentest/BBScan
def save_report_thread(q_results, file):
        start_time = time.time()
        t_html = Template(TEMPLATE_html)
        t_host = Template(TEMPLATE_host)
        t_normal = Template(TEMPLATE_list_item)
        all_results = []
        report_name = os.path.basename(file).lower().replace('.txt', '') + '_' + \
                      time.strftime('%Y%m%d_%H%M%S', time.localtime()) + '.html'

        last_qsize = 0
        global STOP_ME
        try:
            while not (STOP_ME and q_results.qsize() == 0):
                if q_results.qsize() == last_qsize:
                    time.sleep(1.0)
                    continue
                else:
                    last_qsize = q_results.qsize()
                html_doc = ""
                while q_results.qsize() > 0:
                    all_results.append(q_results.get())
                for item in all_results:
                    host, results = item
                    _str = ""
                    for key in results.keys():
                        for _ in results[key]:
                            _str += t_normal.substitute( {'status': _['status'], 'url': _['url']} )
                    _str = t_host.substitute({'host': host, 'list': _str})
                    html_doc += _str

                if all_results:
                    cost_time = time.time() - start_time
                    cost_min = int(cost_time / 60)
                    cost_seconds = '%.2f' % (cost_time % 60)
                    html_doc = t_html.substitute({'cost_min': cost_min, 'cost_seconds': cost_seconds, 'content': html_doc})

                    with open('report/%s' % report_name, 'w') as outFile:
                        outFile.write(html_doc)


            if all_results:
                print '[%s] Scan report saved to report/%s' % (get_time(), report_name)
                if args.browser:
                    webbrowser.open_new_tab(os.path.abspath('report/%s' % report_name))
            else:
                lock.acquire()
                print '[%s] No vulnerabilities found on sites in %s.' % (get_time(), file)
                lock.release()
        except IOError, e:
            sys.exit(-1)
コード例 #5
0
 def is_port_open(self):
     try:
         s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
         s.settimeout(4.0)
         default_port = 443 if self.schema.lower() == 'https' else 80
         host, port = self.host.split(':') if self.host.find(':') > 0 else (self.host, default_port)
         if s.connect_ex((host, int(port))) == 0:
             print '[%s] Scan %s' % (get_time(), self.host)
             return True
         else:
             print '[%s] Fail to connect to %s' % (get_time(), self.host)
             return False
         s.close()
     except Exception, e:
         return False
コード例 #6
0
ファイル: BBScan.py プロジェクト: MythHack/BBScan
 def is_port_open(self):
     try:
         s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
         s.settimeout(4.0)
         default_port = 443 if self.schema.lower() == 'https' else 80
         host, port = self.host.split(':') if self.host.find(':') > 0 else (self.host, default_port)
         if s.connect_ex((host, int(port))) == 0:
             print '[%s] Scan %s' % (get_time(), self.host)
             return True
         else:
             print '[%s] Fail to connect to %s' % (get_time(), self.host)
             return False
         s.close()
     except Exception, e:
         return False
コード例 #7
0
def domain_lookup():
    r = Resolver()
    r.timeout = r.lifetime = 8.0
    while True:
        try:
            host = q_hosts.get(timeout=0.1)
            print "[%s] host=> %s" % ("domain_lookup", host)
        except:
            break
        _schema, _host, _path = parse_url(host)
        try:
            m = re.search('\d+\.\d+\.\d+\.\d+', _host.split(':')[0])
            if m:
                q_targets.put({'file': '', 'url': host})
                ips_to_scan.append(m.group(0))
            else:
                answers = r.query(_host.split(':')[0])
                if answers:
                    q_targets.put({'file': '', 'url': host})
                    for _ in answers:
                        ips_to_scan.append(_.address)
        except Exception, e:
            lock.acquire()
            print '[%s][Warning] Invalid domain:', (get_time(), host)
            lock.release()
コード例 #8
0
ファイル: BBScan.py プロジェクト: moss1993/BBScan
    def check_404(self):
        try:
            try:
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                s.settimeout(5.0)
                default_port = 443 if self.schema.lower() == 'https' else 80
                host, port = self.host.split(
                    ':') if self.host.find(':') > 0 else (self.host,
                                                          default_port)
                if s.connect_ex((host, int(port))) == 0:
                    s.close()
                    self._404_status, headers, html_doc = \
                        self._http_request('/bbscan_wants__your_response.php')
                else:
                    self._404_status, headers, html_doc = -1, {}, ''
            except:
                self._404_status, headers, html_doc = -1, {}, ''
            finally:
                s.close()

            if self._404_status == -1:
                print '[%s] [ERROR] Fail to connect to %s' % (get_time(),
                                                              self.host)
            self.has_404 = (self._404_status == 404)
            if not self.has_404:
                self.len_404_doc = len(html_doc)
            return self.has_404
        except Exception, e:
            logging.error("[Check_404] Exception %s" % e)
コード例 #9
0
ファイル: admin_interface.py プロジェクト: AliceMye/yyy
def upload_movie(recv_dic,conn):
    # 电影是已经纯在的 直接上他
    file_name = common.get_session() + recv_dic['movie_name']
    file_size = recv_dic['file_size']

    path = os.path.join(setttings.UPLOAD_MOVIE_PATH,file_name)

    movie_obj = models.Movie(name=file_name,
                 path=path,
                 is_free=recv_dic['is_free'],
                 file_md5=recv_dic['file_md5'],
                 is_delete=0,
                 upload_time=common.get_time(),
                 user_id=recv_dic['user_id'])

    # 写文键
    recv_size= 0
    with open(path,'wb')as f:
        while recv_size<file_size:
            data = conn.recv(1024)
            f.write(data)
            recv_size += len(data)
        print('下载成功')


    send_dic = {"flag":True,'msg':'上传成功'}

    movie_obj.save()
    common.send_msg(send_dic,conn)
コード例 #10
0
ファイル: BBScan.py プロジェクト: uppentest/BBScan
    def check_404(self):
        try:
            try:
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                s.settimeout(5.0)
                default_port = 443 if self.schema.lower() == 'https' else 80
                host, port = self.host.split(':') if self.host.find(':') > 0 else (self.host, default_port)
                if s.connect_ex((host, int(port))) == 0:
                    s.close()
                    self._404_status, headers, html_doc = \
                        self._http_request('/bbscan_wants__your_response.php')
                else:
                    self._404_status, headers, html_doc = -1, {}, ''
            except:
                self._404_status, headers, html_doc = -1, {}, ''
            finally:
                s.close()

            if self._404_status == -1:
                print '[%s] [ERROR] Fail to connect to %s' % (get_time(), self.host)
            self.has_404 = (self._404_status == 404)
            if not self.has_404:
                self.len_404_doc = len(html_doc)
            return self.has_404
        except Exception, e:
            logging.error("[Check_404] Exception %s" % e)
コード例 #11
0
ファイル: BBScan.py プロジェクト: uppentest/BBScan
def batch_scan(q_targets, q_results, lock, args):
        s = InfoDisScanner(args.timeout*60, args=args)
        while True:
            try:
                target = q_targets.get(timeout=0.1)
            except:
                break
            _url = target['url']
            _file = target['file']

            #lock.acquire()
            print '[%s] Scan %s' % (get_time(), _url if _url else os.path.basename(_file).rstrip('.log') )
            #lock.release()
            if _url:
                s.init_from_url(_url)
            else:
                if os.path.getsize(_file) > 0:
                    s.init_from_file(_file)
                    if s.host == '':
                        continue
                else:
                    continue
            host, results = s.scan(threads=args.t)
            if results:
                q_results.put( (host, results) )
                for key in results.keys():
                    for url in results[key]:
                        print  '[+] [%s] %s' % (url['status'], url['url'])
コード例 #12
0
def download_movie_interface(client_back_dic, conn):

    movie_id = client_back_dic.get('movie_id')
    movie_name = client_back_dic.get('movie_name')
    movie_type = client_back_dic.get('movie_type')
    user_id = client_back_dic.get('user_id')
    # movie_obj = models.Movie.select(id=movie_id)[0]
    # movie_path = movie_obj.path
    movie_path = os.path.join(settings.DOWNLOAD_PATH, movie_name)
    movie_size = os.path.getsize(movie_path)
    send_dic = {'flag': True, 'msg': '准备下载', 'movie_size': movie_size}
    user_obj = models.User.select(id=user_id)[0]

    if movie_type == '免费':
        wait_time = 0

        if not user_obj.is_vip:

            wait_time = 20

        send_dic['wait_time'] = wait_time

    print(send_dic)
    common.send_data(send_dic, conn, movie_path)

    obj = models.DownloadRecord(user_id=user_id,
                                movie_id=movie_id,
                                download_time=common.get_time())
    obj.save()
コード例 #13
0
ファイル: tmp.py プロジェクト: z3roTo0ne/SubDomainsResultDeal
def domain_lookup(q_targets, q_hosts, lock, ips_to_scan):
    r = Resolver()
    r.timeout = r.lifetime = 8.0
    while True:
        try:
            host = q_hosts.get(timeout=0.1)
        except:
            break
        _schema, _host, _path = parse_url(host)
        #print "_schema:{0}\t_host:{1}\t_path:{2}".format(_schema, _host, _path)
        #print _host.split(":")[0]
        try:
            m = re.search('\d+\.\d+\.\d+\.\d+', _host.split(':')[0])
            if m:
                q_targets.put({'file': '', 'url': host})
                ips_to_scan.append(m.group(0))
                #print "in try->if"
            else:
                # 无论查不查的到都将这个url放在target中
                q_targets.put({'file': '', 'url': host})
                answers = r.query(_host.split(':')[0])
                if answers:
                    for _ in answers:
                        ips_to_scan.append(_.address)
        except Exception, e:
            lock.acquire()
            print '[%s][Warning] Invalid domain: [%s]' % (get_time(), host)
            print str(e)
            lock.release()
コード例 #14
0
ファイル: BBScan.py プロジェクト: moss1993/BBScan
def batch_scan(q_targets, q_results, lock, args):
    s = InfoDisScanner(args.timeout * 60, args=args)
    while True:
        try:
            target = q_targets.get(timeout=0.1)
        except:
            break
        _url = target['url']
        _file = target['file']

        #lock.acquire()
        print '[%s] Scan %s' % (get_time(), _url if _url else
                                os.path.basename(_file).rstrip('.log'))
        #lock.release()
        if _url:
            s.init_from_url(_url)
        else:
            if os.path.getsize(_file) > 0:
                s.init_from_file(_file)
                if s.host == '':
                    continue
            else:
                continue
        host, results = s.scan(threads=args.t)
        if results:
            q_results.put((host, results))
            for key in results.keys():
                for url in results[key]:
                    print '[+] [%s] %s' % (url['status'], url['url'])
コード例 #15
0
def register_interface(client_back_dic, conn):

    # 写业务逻辑
    # 1.判断用户名是否存在
    username = client_back_dic.get('username')
    # 通过用户名当作条件查询
    user_obj_list = models.User.select(name=username)

    # 若存在,给客户端返回数据, 告诉用户,用户已存在!
    if user_obj_list:
        send_dic = {'flag': False, 'msg': '用户已存在!'}

    # 若不存在,保存数据到MySQL数据库中, 返回注册成功给客户端
    else:
        password = client_back_dic.get('password')
        user_obj = models.User(
            name=username,
            #  pwd, is_vip, is_locked, user_type, register_time
            pwd=common.get_md5_pwd(password),
            is_vip=0,  # 0表示不是VIP, 1表示VIP
            is_locked=0,  # 0表示不锁定, 1表示锁定
            user_type=client_back_dic.get('user_type'),
            register_time=common.get_time())

        user_obj.save()

        send_dic = {'flag': True, 'msg': '注册成功'}

    common.send_data(send_dic, conn)
コード例 #16
0
def upload_movie_interface(client_back_dic, conn):
    print('炮王来交货啦!')

    # 确保电影名称是唯一的  随机字符串 + 电影名称
    movie_name = common.get_random_code() + client_back_dic.get('movie_name')  # .mp4

    movie_size = client_back_dic.get('file_size')

    movie_path = os.path.join(
        settings.DOWNLOAD_PATH, movie_name
    )

    # 1.接受上传的电影
    data_recv = 0
    with open(movie_path, 'wb') as f:
        while data_recv < movie_size:
            data = conn.recv(1024)
            f.write(data)
            data_recv += len(data)

    # 2.把电影数据保存到mysql中
    movie_obj = models.Movie(
        name=movie_name, file_md5=client_back_dic.get('file_md5'),
        is_free=client_back_dic.get('is_free'), is_delete=0,
        path=movie_path, user_id=client_back_dic.get('user_id'),
        upload_time=common.get_time()
    )
    movie_obj.save()

    send_dic = {
        'flag': True, 'msg': f'{client_back_dic.get("movie_name")}电影上传成功!'
    }

    common.send_data(send_dic, conn)
コード例 #17
0
 def is_port_open(self):
     try:
         s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
         s.settimeout(5.0)
         default_port = 443 if self.schema.lower() == 'https' else 80
         host, port = self.host.split(':') if self.host.find(':') > 0 else (self.host, default_port)
         if s.connect_ex((host, int(port))) == 0:
             print '[%s] Scan %s' % (get_time(), self.host)
             return True
         else:
             print '[%s] [ERROR] Fail to connect to %s' % (get_time(), self.host)
             return False
     except Exception as  e:
         return False
     finally:
         s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0))
         s.close()
コード例 #18
0
ファイル: AllRecipes.py プロジェクト: killix/recipes
def prep_time():
	if tree is None:
		raise Exception("parsing.set() a doc first")
	try:
		time_str = ' '.join(tree.find(id='liPrep').text.split()[1:])	
		return common.get_time(time_str.strip())
	except AttributeError:
		return 0
コード例 #19
0
ファイル: admin_interface.py プロジェクト: AliceMye/yyy
def release_notice(recv_dic,conn):
    # 直接发布公告
    # 哪个管理员发的记录user_id

    notice_obj = models.Notice(title=recv_dic['title'],
                               content=recv_dic['content'],
                               user_id=recv_dic['user_id'],
                               create_time=common.get_time()

    )
    notice_obj.save()
    send_dic = {'flag': True, 'msg': '发布公告成功'}
    common.send_msg(send_dic, conn)
コード例 #20
0
def put_notice_interface(client_back_dic, conn):
    title = client_back_dic.get('title')
    content = client_back_dic.get('content')
    user_id = client_back_dic.get('user_id')
    notice_obj = models.Notice(title=title, content=content, user_id=user_id,
                  create_time=common.get_time())

    notice_obj.save()

    send_dic = {
        'msg': '公告发布成功!'
    }

    common.send_data(send_dic, conn)
コード例 #21
0
ファイル: BBScan.py プロジェクト: uppentest/BBScan
 def init_final(self):
     self.max_depth = self._cal_depth(self.path)[1] + 3     # max depth to scan
     if self.args.no_check404:
         self._404_status = 404
         self.has_404 = True
     else:
         self.check_404()           # check existence of HTTP 404
         if self._404_status == -1:
             return
     if not self.has_404:
         print '[%s] [Warning] %s has no HTTP 404.' % (get_time(), self.host)
     _path, _depth = self._cal_depth(self.path)
     self._enqueue('/')
     self._enqueue(_path)
     if not self.args.no_crawl and not self.file:
         self.crawl_index(_path)
コード例 #22
0
ファイル: common_interface.py プロジェクト: AliceMye/yyy
def register(recv_dic, conn):
    user_list = models.User.select(name=recv_dic['name'])

    if not user_list:
        # 注册
        user_obj = models.User(name=recv_dic['name'],
                               password=recv_dic['password'],
                               is_vip=0,
                               is_locked=0,
                               user_type=recv_dic['user_type'],
                               register_time=common.get_time())
        user_obj.save()
        send_dic = {'flag': True, 'msg': '注册成功'}
    else:
        send_dic = {'flag': False, 'msg': '用户名已存在'}
    common.send_msg(send_dic, conn)
コード例 #23
0
 def init_final(self):
     if not self.is_port_open():
         return
     self.base_url = '%s://%s' % (self.schema, self.host)
     self.max_depth = self._cal_depth(self.path)[1] + 5
     if self.args.no_check404:
         self._404_status = 404
         self.has_404 = True
     else:
         self.check_404()           # check existence of HTTP 404
     if not self.has_404:
         print '[%s] [Warning] %s has no HTTP 404.' % (get_time(), self.host)
     _path, _depth = self._cal_depth(self.path)
     # self._enqueue('/')
     self._enqueue(_path)
     if not self.args.no_crawl and not self.log_file:
         self.crawl_index(_path)
コード例 #24
0
ファイル: BBScan.py プロジェクト: moss1993/BBScan
 def init_final(self):
     self.max_depth = self._cal_depth(self.path)[1] + 3  # max depth to scan
     if self.args.no_check404:
         self._404_status = 404
         self.has_404 = True
     else:
         self.check_404()  # check existence of HTTP 404
         if self._404_status == -1:
             return
     if not self.has_404:
         print '[%s] [Warning] %s has no HTTP 404.' % (get_time(),
                                                       self.host)
     _path, _depth = self._cal_depth(self.path)
     self._enqueue('/')
     self._enqueue(_path)
     if not self.args.no_crawl and not self.file:
         self.crawl_index(_path)
コード例 #25
0
ファイル: BBScan.py プロジェクト: MythHack/BBScan
 def init_final(self):
     if not self.is_port_open():
         return
     self.base_url = '%s://%s' % (self.schema, self.host)
     self.max_depth = self._cal_depth(self.path)[1] + 5
     self.session = requests.session()
     if self.args.no_check404:
         self._404_status = 404
         self.has_404 = True
     else:
         self.check_404()           # check existence of HTTP 404
     if not self.has_404:
         print '[%s] [Warning] %s has no HTTP 404.' % (get_time(), self.host)
     _path, _depth = self._cal_depth(self.path)
     self._enqueue('/')
     self._enqueue(_path)
     if not self.args.no_crawl and not self.log_file:
         self.crawl_index(_path)
コード例 #26
0
def upload_movie(user_dic, conn):
    recv_size = 0
    print('----->', user_dic['file_name'])
    file_name = common.get_time() + user_dic['file_name']
    path = os.path.join(setting.BASE_MOVIE_LIST, file_name)
    with open(path, 'wb') as f:
        while recv_size < user_dic['file_size']:
            recv_data = conn.recv(1024)
            f.write(recv_data)
            recv_size += len(recv_data)
            # print('recvsize:%s filesize:%s' % (recv_size, user_dic['file_size']))
    print('%s :上传成功' % file_name)
    movie = models.Movie(name=file_name,
                         path=path,
                         is_free=user_dic['is_free'],
                         user_id=user_dic['user_id'],
                         file_md5=user_dic['file_md5'])
    movie.save()
    back_dic = {'flag': True, 'msg': '上传成功'}
    return back_dic
コード例 #27
0
ファイル: BBScan.py プロジェクト: MythHack/BBScan
def domain_lookup():
    r = Resolver()
    r.timeout = r.lifetime = 8.0
    while True:
        try:
            host = q_hosts.get(timeout=0.1)
        except:
            break
        _schema, _host, _path = parse_url(host)
        try:
            m = re.search('\d+\.\d+\.\d+\.\d+', _host.split(':')[0])
            if m:
                q_targets.put({'file': '', 'url': host})
                ips_to_scan.append(m.group(0))
            else:
                answers = r.query(_host.split(':')[0])
                if answers:
                    q_targets.put({'file': '', 'url': host})
                    for _ in answers:
                        ips_to_scan.append(_.address)
        except Exception, e:
            lock.acquire()
            print '[%s][Warning] Invalid domain:', (get_time(), host)
            lock.release()
コード例 #28
0
        input_files = glob.glob(args.d + '/*.txt')
    elif args.crawler:
        input_files = ['crawler']
    elif args.host:
        input_files = ['hosts']    # several hosts on command line

    ips_to_scan = []    # all IPs to be scanned during current scan

    for file in input_files:
        if args.host:
            lines = [' '.join(args.host)]
        elif args.f or args.d:
            with open(file) as inFile:
                lines = inFile.readlines()
        try:
            print '[%s] Batch Web Scan start.' % get_time()
            q_results = multiprocessing.Manager().Queue()
            q_targets = multiprocessing.Manager().Queue()
            lock = multiprocessing.Manager().Lock()
            STOP_ME = False

            threading.Thread(target=save_report_thread, args=(q_results, file)).start()
            print '[%s] Report thread created, prepare target Queue...' % get_time()

            if args.crawler:
                _input_files = glob.glob(args.crawler + '/*.log')
                for _file in _input_files:
                    q_targets.put({'file': _file, 'url': ''})

            if args.host or args.f or args.d:
                q_hosts = Queue()
コード例 #29
0
ファイル: BBScan.py プロジェクト: MythHack/BBScan
    def _scan_worker(self):
        while self.url_queue.qsize() > 0:
            if time.time() - self.START_TIME > self.TIME_OUT:
                self.url_queue.queue.clear()
                print '[%s] [ERROR] Timed out task: %s' % (get_time(), self.host)
                return
            try:
                item = self.url_queue.get(timeout=0.1)
            except:
                return
            try:
                url_description, tag, code, content_type, content_type_no = item
                prefix = url_description['prefix']
                url = url_description['full_url']
                url = url.replace('{sub}', self.host.split('.')[0])
                if url.find('{hostname_or_folder}') >= 0:
                    _url = url[: url.find('{hostname_or_folder}')]
                    folders = _url.split('/')
                    for _folder in reversed(folders):
                        if _folder not in ['', '.', '..']:
                            url = url.replace('{hostname_or_folder}', _folder)
                            break
                url = url.replace('{hostname_or_folder}', self.host.split(':')[0])
                url = url.replace('{hostname}', self.host.split(':')[0])
                if url.find('{parent}') > 0:
                    if url.count('/') < 2:
                        continue
                    ret = url.split('/')
                    ret[-2] = ret[-1].replace('{parent}', ret[-2])
                    url = '/' + '/'.join(ret[:-1])

            except Exception, e:
                logging.error('[_scan_worker Exception] [1] %s' % str(e))
                continue
            if not item or not url:
                break

            # print '[%s]' % url.strip()
            try:
                status, headers, html_doc = self._http_request(url)
                cur_content_type = headers.get('content-type', '')

                if cur_content_type.find('image/') >= 0:    # exclude image type
                    continue

                if len(html_doc) < 10:    # data too short
                    continue

                if not self.exclude_text(html_doc):    # exclude text found
                    continue

                valid_item = False
                if self.find_text(html_doc):
                    valid_item = True
                else:
                    if status != code and status in [301, 302, 400, 404, 500, 501, 502, 503, 505]:
                        continue
                    if cur_content_type.find('application/json') >= 0 and \
                            not url.endswith('.json'):    # no json
                        continue

                    if tag:
                        if html_doc.find(tag) >= 0:
                            valid_item = True
                        else:
                            continue    # tag mismatch

                    if content_type and cur_content_type.find(content_type) < 0 \
                            or content_type_no and cur_content_type.find(content_type_no) >= 0:
                        continue    # type mismatch

                    if self.has_404 or status != self._404_status:
                        if code and status != code and status != 206:    # code mismatch
                            continue
                        elif code != 403 and status == 403:
                            continue
                        else:
                            valid_item = True

                    if not self.has_404 and status in (200, 206) and url != '/' and not tag:
                        _len = len(html_doc)
                        _min = min(_len, self.len_404_doc)
                        if _min == 0:
                            _min = 10.0
                        if float(_len - self.len_404_doc) / _min > 0.3:
                            valid_item = True

                    if status == 206:
                        if cur_content_type.find('text') < 0 and cur_content_type.find('html') < 0:
                            valid_item = True

                if valid_item:
                    self.lock.acquire()
                    # print '[+] [Prefix:%s] [%s] %s' % (prefix, status, 'http://' + self.host +  url)
                    if prefix not in self.results:
                        self.results[prefix] = []
                    m = re.search('<title>(.*?)</title>', html_doc)
                    title = m.group(1) if m else ''

                    _ = {'status': status, 'url': '%s%s' % (self.base_url, url), 'title': title}
                    if _ not in self.results[prefix]:
                        self.results[prefix].append(_)
                    self.lock.release()

                if len(self.results) >= 10:
                    print '[ERROR] Over 10 vulnerabilities found [%s], seems to be false positives.' % prefix
                    self.url_queue.queue.clear()
            except Exception, e:
                logging.error('[_scan_worker.Exception][2][%s] %s' % (url, str(e)))
                traceback.print_exc()
コード例 #30
0
ファイル: BBScan.py プロジェクト: uppentest/BBScan
    def _scan_worker(self):
        while self.url_queue.qsize() > 0:
            if time.time() - self.START_TIME > self.TIME_OUT:
                print '[%s] [ERROR] Timed out task: %s' % (get_time(), self.host)
                return
            try:
                item = self.url_queue.get(timeout=0.1)
            except:
                return
            try:
                url_description, tag, code, content_type, content_type_no = item
                url = url_description['full_url']
                url = url.replace('{sub}', self.host.split('.')[0])
                prefix = url_description['prefix']
                if url.find('{hostname_or_folder}') >= 0:
                    _url = url[: url.find('{hostname_or_folder}')]
                    if _url.count('/') == 1:
                        url = url.replace('{hostname_or_folder}', self.host)
                    elif _url.count('/') > 1:
                        url = url.replace('{hostname_or_folder}', _url.split('/')[-2])
                url = url.replace('{hostname}', self.host)
                if url.find('{parent}') > 0:
                    if url.count('/') >= 2:
                        ret = url.split('/')
                        ret[-2] = ret[-1].replace('{parent}', ret[-2])
                        url =  '/' + '/'.join(ret[:-1])
                    else:
                        continue
            except Exception, e:
                logging.error('[_scan_worker Exception 1] %s' % e)
                continue
            if not item or not url:
                break

            #print '[%s]' % url.strip()
            try:
                status, headers, html_doc = self._http_request(url)

                if headers.get('content-type', '').find('image/') >= 0:    # exclude image type
                    continue

                if html_doc.strip() == '' or len(html_doc) < 10:    # data too short
                    continue

                if not self.exclude_text(html_doc):    # exclude text found
                    continue

                valid_item = False
                if status == 200 and  self.find_text(html_doc):
                    valid_item = True
                else:
                    if status in [400, 404, 503, 502, 301, 302]:
                        continue
                    if  headers.get('content-type', '').find('application/json') >= 0 and \
                            not url.endswith('.json'):    # no json
                        continue

                    if tag:
                        if html_doc.find(tag) >= 0:
                            valid_item = True
                        else:
                            continue    # tag mismatch

                    if content_type and headers.get('content-type', '').find(content_type) < 0 or \
                        content_type_no and headers.get('content-type', '').find(content_type_no) >=0:
                        continue    # type mismatch

                    if self.has_404 or status!=self._404_status:
                        if code and status != code and status != 206:    # code mismatch
                            continue
                        elif code!= 403 and status == 403:
                            continue
                        else:
                            valid_item = True

                    if (not self.has_404) and status in (200, 206) and item[0]['full_url'] != '/' and (not tag):
                        _len = len(html_doc)
                        _min = min(_len, self.len_404_doc)
                        if _min == 0:
                            _min = 10
                        if abs(_len - self.len_404_doc) / _min  > 0.3:
                            valid_item = True

                    if status == 206:
                        if headers.get('content-type', '').find('text') < 0 and headers.get('content-type', '').find('html') < 0:
                            valid_item = True
                        else:
                            continue

                if valid_item:
                    self.lock.acquire()
                    # print '[+] [Prefix:%s] [%s] %s' % (prefix, status, 'http://' + self.host +  url)
                    if not prefix in self.results:
                        self.results[prefix]= []
                    _ = {'status': status, 'url': '%s://%s%s' % (self.schema, self.host, url)}
                    if _ not in self.results[prefix]:
                        self.results[prefix].append(_)
                    self.lock.release()

                if len(self.results) >= 10:
                    print 'More than 10 vulnerabilities found for [%s], seems to be false positives, exit.' % prefix
                    return
            except Exception, e:
                logging.error('[InfoDisScanner._scan_worker][2][%s] Exception %s' % (url, e))
                import traceback
                traceback.print_exc()
コード例 #31
0
ファイル: BBScan.py プロジェクト: uppentest/BBScan
        input_files = glob.glob(args.d + '/*.txt')
    elif args.crawler:
        input_files = ['crawler']
    elif args.host:
        input_files = ['hosts']    # several hosts on command line

    scanned_ips = []    # all scanned IPs in current scan

    for file in input_files:
        if args.host:
            lines = [' '.join(args.host)]
        elif args.f or args.d:
            with open(file) as inFile:
                lines = inFile.readlines()
        try:
            print '[%s] Batch web scan start.' % get_time()
            q_results = multiprocessing.Manager().Queue()
            q_targets = multiprocessing.Manager().Queue()
            lock = multiprocessing.Manager().Lock()

            STOP_ME = False
            threading.Thread(target=save_report_thread, args=(q_results, file)).start()
            print '[%s] Report thread created, prepare target Queue...' % get_time()

            if args.crawler:
                _input_files = glob.glob(args.crawler + '/*.log')
                for _file in _input_files:
                    q_targets.put({'file': _file, 'url': ''})

            if args.host or args.f or args.d:
                for line in lines:
コード例 #32
0
ファイル: BBScan.py プロジェクト: moss1993/BBScan
    def _scan_worker(self):
        while self.url_queue.qsize() > 0:
            if time.time() - self.START_TIME > self.TIME_OUT:
                print '[%s] [ERROR] Timed out task: %s' % (get_time(),
                                                           self.host)
                return
            try:
                item = self.url_queue.get(timeout=0.1)
            except:
                return
            try:
                url_description, tag, code, content_type, content_type_no = item
                url = url_description['full_url']
                url = url.replace('{sub}', self.host.split('.')[0])
                prefix = url_description['prefix']
                if url.find('{hostname_or_folder}') >= 0:
                    _url = url[:url.find('{hostname_or_folder}')]
                    if _url.count('/') == 1:
                        url = url.replace('{hostname_or_folder}', self.host)
                    elif _url.count('/') > 1:
                        url = url.replace('{hostname_or_folder}',
                                          _url.split('/')[-2])
                url = url.replace('{hostname}', self.host)
                if url.find('{parent}') > 0:
                    if url.count('/') >= 2:
                        ret = url.split('/')
                        ret[-2] = ret[-1].replace('{parent}', ret[-2])
                        url = '/' + '/'.join(ret[:-1])
                    else:
                        continue
            except Exception, e:
                logging.error('[_scan_worker Exception 1] %s' % e)
                continue
            if not item or not url:
                break

            #print '[%s]' % url.strip()
            try:
                status, headers, html_doc = self._http_request(url)

                if headers.get('content-type',
                               '').find('image/') >= 0:  # exclude image type
                    continue

                if html_doc.strip(
                ) == '' or len(html_doc) < 10:  # data too short
                    continue

                if not self.exclude_text(html_doc):  # exclude text found
                    continue

                valid_item = False
                if status == 200 and self.find_text(html_doc):
                    valid_item = True
                else:
                    if status in [400, 404, 503, 502, 301, 302]:
                        continue
                    if  headers.get('content-type', '').find('application/json') >= 0 and \
                            not url.endswith('.json'):    # no json
                        continue

                    if tag:
                        if html_doc.find(tag) >= 0:
                            valid_item = True
                        else:
                            continue  # tag mismatch

                    if content_type and headers.get('content-type', '').find(content_type) < 0 or \
                        content_type_no and headers.get('content-type', '').find(content_type_no) >=0:
                        continue  # type mismatch

                    if self.has_404 or status != self._404_status:
                        if code and status != code and status != 206:  # code mismatch
                            continue
                        elif code != 403 and status == 403:
                            continue
                        else:
                            valid_item = True

                    if (not self.has_404) and status in (
                            200,
                            206) and item[0]['full_url'] != '/' and (not tag):
                        _len = len(html_doc)
                        _min = min(_len, self.len_404_doc)
                        if _min == 0:
                            _min = 10
                        if abs(_len - self.len_404_doc) / _min > 0.3:
                            valid_item = True

                    if status == 206:
                        if headers.get('content-type',
                                       '').find('text') < 0 and headers.get(
                                           'content-type',
                                           '').find('html') < 0:
                            valid_item = True
                        else:
                            continue

                if valid_item:
                    self.lock.acquire()
                    # print '[+] [Prefix:%s] [%s] %s' % (prefix, status, 'http://' + self.host +  url)
                    if not prefix in self.results:
                        self.results[prefix] = []
                    _ = {
                        'status': status,
                        'url': '%s://%s%s' % (self.schema, self.host, url),
                        'title': self.get_title(html_doc)
                    }
                    if _ not in self.results[prefix]:
                        self.results[prefix].append(_)
                    self.lock.release()

                if len(self.results) >= 10:
                    print 'More than 10 vulnerabilities found for [%s], seems to be false positives, exit.' % prefix
                    return
            except Exception, e:
                logging.error(
                    '[InfoDisScanner._scan_worker][2][%s] Exception %s' %
                    (url, e))
                import traceback
                traceback.print_exc()
コード例 #33
0
ファイル: BBScan.py プロジェクト: mgcfish/BBScan
    def _scan_worker(self):
        while self.url_queue.qsize() > 0:
            if time.time() - self.START_TIME > self.TIME_OUT:
                self.url_queue.queue.clear()
                print '[%s] [ERROR] Timed out task: %s' % (get_time(),
                                                           self.host)
                return
            try:
                item = self.url_queue.get(timeout=0.1)
            except:
                return
            try:
                url_description, tag, code, content_type, content_type_no = item
                prefix = url_description['prefix']
                url = url_description['full_url']
                # print url
                url = url.replace('{sub}', self.host.split('.')[0])
                if url.find('{hostname_or_folder}') >= 0:
                    _url = url[:url.find('{hostname_or_folder}')]
                    folders = _url.split('/')
                    for _folder in reversed(folders):
                        if _folder not in ['', '.', '..']:
                            url = url.replace('{hostname_or_folder}', _folder)
                            break
                url = url.replace('{hostname_or_folder}',
                                  self.host.split(':')[0])
                url = url.replace('{hostname}', self.host.split(':')[0])
                if url.find('{parent}') > 0:
                    if url.count('/') < 2:
                        continue
                    ret = url.split('/')
                    ret[-2] = ret[-1].replace('{parent}', ret[-2])
                    url = '/' + '/'.join(ret[:-1])

            except Exception as e:
                logging.error('[_scan_worker Exception] [1] %s' % str(e))
                continue
            if not item or not url:
                break

            # print '[%s]' % url.strip()
            try:
                status, headers, html_doc = self._http_request(url)
                cur_content_type = headers.get('content-type', '')

                if cur_content_type.find('image/') >= 0:  # exclude image
                    continue

                if ('html' in cur_content_type or 'text' in cur_content_type) and \
                        0 <= len(html_doc) <= 10:    # text too short
                    continue

                if self.find_exclude_text(html_doc):  # excluded text found
                    continue

                valid_item = False
                if self.find_text(html_doc):
                    valid_item = True
                else:
                    if status != code and status in [
                            301, 302, 400, 404, 501, 502, 503, 505
                    ]:
                        continue
                    if cur_content_type.find('application/json') >= 0 and \
                            not url.endswith('.json'):    # no json
                        continue

                    if tag:
                        if html_doc.find(tag) >= 0:
                            valid_item = True
                        else:
                            continue  # tag mismatch

                    if content_type and cur_content_type.find(content_type) < 0 \
                            or content_type_no and cur_content_type.find(content_type_no) >= 0:
                        continue  # type mismatch

                    if self.has_404 or status != self._404_status:
                        if code and status != code and status != 206:  # code mismatch
                            continue
                        elif code != 403 and status == 403:
                            continue
                        else:
                            valid_item = True

                    if not self.has_404 and status in (
                            200, 206) and url != '/' and not tag:
                        _len = len(html_doc)
                        _min = min(_len, self.len_404_doc)
                        if _min == 0:
                            _min = 10.0
                        if float(_len - self.len_404_doc) / _min > 0.3:
                            valid_item = True

                    if status == 206:
                        if cur_content_type.find(
                                'text') < 0 and cur_content_type.find(
                                    'html') < 0:
                            valid_item = True

                if valid_item:
                    self.lock.acquire()
                    # print '[+] [Prefix:%s] [%s] %s' % (prefix, status, 'http://' + self.host +  url)
                    if prefix not in self.results:
                        self.results[prefix] = []
                    m = re.search('<title>(.*?)</title>', html_doc)
                    title = m.group(1) if m else ''

                    _ = {
                        'status': status,
                        'url': '%s%s' % (self.base_url, url),
                        'title': title
                    }
                    if _ not in self.results[prefix]:
                        self.results[prefix].append(_)
                    self.lock.release()

                if len(self.results) >= 10:
                    print '[ERROR] Over 10 vulnerabilities found [%s], seems to be false positives.' % prefix
                    self.url_queue.queue.clear()
            except Exception as e:
                logging.error('[_scan_worker.Exception][2][%s] %s' %
                              (url, str(e)))
                traceback.print_exc()
コード例 #34
0
ファイル: BBScan.py プロジェクト: moss1993/BBScan
        input_files = glob.glob(args.d + '/*.txt')
    elif args.crawler:
        input_files = ['crawler']
    elif args.host:
        input_files = ['hosts']  # several hosts on command line

    scanned_ips = []  # all scanned IPs in current scan

    for file in input_files:
        if args.host:
            lines = [' '.join(args.host)]
        elif args.f or args.d:
            with open(file) as inFile:
                lines = inFile.readlines()
        try:
            print '[%s] Batch web scan start.' % get_time()
            q_results = multiprocessing.Manager().Queue()
            q_targets = multiprocessing.Manager().Queue()
            lock = multiprocessing.Manager().Lock()

            STOP_ME = False
            threading.Thread(target=save_report_thread,
                             args=(q_results, file)).start()
            print '[%s] Report thread created, prepare target Queue...' % get_time(
            )

            if args.crawler:
                _input_files = glob.glob(args.crawler + '/*.log')
                for _file in _input_files:
                    q_targets.put({'file': _file, 'url': ''})
コード例 #35
0
def release_notice(user_dic):
    notice = models.Notice(user_dic['notice_name'], user_dic['notice_content'],
                           user_dic['name'], common.get_time())
    notice.save()
    back_dic = {'flag': True, 'msg': '发布公告成功'}
    return back_dic