Exemplo n.º 1
0
    def server_command(**kwargs):
        commond = 'appium -a {ip} -p {port} -U {deviceName} -g {log}'.format(ip=kwargs.get('ip'),
                                                                             port=kwargs.get('port'),
                                                                             deviceName=kwargs.get(
                                                                                 'deviceName'),
                                                                             log=kwargs.get('log_path'))

        logger.info('启动服务: {}'.format(commond))
        res = subprocess.Popen(commond, stdout=open(kwargs.get('log_path'), 'a+'), stderr=subprocess.PIPE, shell=True)
Exemplo n.º 2
0
def sendMail():
    content = '''
    hi ALL :
            UI自动化执行完成,请见报告。


                                tester
    '''
    config = Configure().read(ConfType.MAIL)
    title = '主流程回归'
    # 发送的文件
    filepath = getError.getpng()
    # 邮件发送者
    sender = config.get('sender')
    # 邮件发送者用户名
    senderName = config.get('username')
    # 邮件发送者密码
    senderPwd = config.get('passwd')
    # 邮件接收人
    receiverList = eval(config.get('receiverlist'))
    # 发送时间
    timeStr = time.strftime("%Y%m%d%H%M%S", time.localtime())
    # 标题
    subject = u"【UI自动化测试报告】" + title + "_" + timeStr
    # 邮件对象
    msg = MIMEMultipart()
    # 你所发的文字信息将以html形式呈现
    part = MIMEText(content, _subtype='html', _charset="utf-8")
    msg.attach(part)
    for v in filepath:
        if v[0].endswith('.html'):
            logger.debug('打开的文件: %s' % v[1])
            part = MIMEText(open(v[1], 'r', encoding='utf8').read())
            part["Content-Type"] = 'application/octet-stream'
            part["Content-Disposition"] = 'attachment; filename="%s"' % v[0]  # 这里的filename可以任意写,写什么名字,邮件中显示什么名字
            msg.attach(part)
        elif v[0].endswith('.png'):
            part = MIMEImage(open(v[1], 'rb').read())
            part["Content-Type"] = 'application/octet-stream'
            part["Content-Disposition"] = 'attachment; filename="%s"' % v[0]  # 这里的filename可以任意写,写什么名字,邮件中显示什么名字
            msg.attach(part)
    msg['Subject'] = Header(subject, 'utf-8')
    # 来自...
    msg['From'] = sender
    # 给谁...
    if len(receiverList) > 1:
        msg['To'] = ";".join(receiverList)
    else:
        msg['To'] = receiverList[0]
    logger.info('邮件接收LIST: %s' % msg['To'])
    try:
        smtp = smtplib.SMTP()
        smtp.connect(config.get('smtpserver'), 25)  # 连接至邮件服务器
        smtp.login(senderName, senderPwd)  # 登录邮件服务器
        smtp.sendmail(sender, receiverList, msg.as_string())  # 发送邮件
    except Exception as e:
        print(e)
Exemplo n.º 3
0
    def parse(self, steps, *args, **kargs):
        '''
        操作元素
        :param steps:
        :param args:
        :param kargs:
        :return:
        '''
        for step in steps:
            by = step['by']
            locator = step['locator']
            action = step['action']
            skip = step.get("skip")
            logger.debug(f'对元素({by},{locator}),进行{action}操作')
            action = action.split(',')
            try:
                if 'clear' in action:  # 清除输入框
                    self.find(by, locator, *args, **kargs).clear()
                if 'click' in action:  # 点击元素
                    self.find(by, locator, *args, **kargs).click()
                if 'send_keys' in action:  # 输入内容
                    self.find(by, locator, *args, **kargs).send_keys(
                        self.function_analysis(step['context']))
                if 'right_click' in action:  # 右击
                    el = self.find(by, locator, *args, **kargs)
                    ActionChains(self.driver).context_click(el).perform()
                if 'double_click' in action:  # 双击
                    el = self.find(by, locator, *args, **kargs)
                    ActionChains(self.driver).double_click(el).perform()
                if 'drag_and_drop' in action:  # 拖拽
                    el = self.find(by, locator, *args, **kargs)
                    target = self.find(step['by'], step['locator_target'],
                                       *args, **kargs)
                    ActionChains(self.driver).drag_and_drop(el,
                                                            target).perform()
                if 'clicks' in action:  # 点击所有元素
                    eles = self.finds(by, locator, *args, **kargs)
                    # 对元素集合分别做点击操作
                    for ele in eles:
                        # 部分元素无法点击时,不报错
                        try:
                            ele.click()
                        except Exception:
                            pass
            except TimeoutException:
                if skip:
                    logger.info(f"元素集({by},{locator})为空,跳过不处理")
                else:
                    raise TimeoutException(
                        f"元素集({by},{locator})为空,无法跳过,请检查元素是否存在")

            except Exception as e:
                logger.debug(f'对元素({by},{locator}),进行{action}操作时出现错误:{e}')
                raise e
Exemplo n.º 4
0
 def assert_result(self, element, timeout=20):
     '''
     断言
     :param element:
     :param timeout:
     :return:
     '''
     result = WebDriverWait(self.driver, timeout).until(
         lambda x: element in x.page_source)  # 解决手机卡顿造成的误判
     logger.info(f'断言结果:{result}')
     return result
Exemplo n.º 5
0
    def _redirect_process(self, response):
        location = response.headers['location']
        if 'sorry' not in location:
            host = urlparse(location).scheme + '://' + urlparse(location).netloc
            self.host = host

            if self.debug:
                logger.warning('Host redirect detected: "%s"' % self.host)
                logger.info('New host(%s) useed' % self.host)
        elif 'sorry' in location:
            if self.debug:
                logger.warning('Captche verify detected, cant load plugin to process...')
                logger.warning('Exit...')

            sys.exit()
Exemplo n.º 6
0
    def run(self):
        self.controller.server()
        # test_server 偶现 校验失败 需要特殊里返回直接过
        if self.controller.test_server:
            driver = self.controller.driver()
            logger.info('开始执行CASE!当前启动[%s]个DRIVER!' % driver.qsize())

            # 当前执行的case
            for case in range(driver.qsize()):
                # 根据driver启动多线程跑case,对每个线程通过手机名 命名
                t = threading.Thread(target=self.case, name=device_q.get())
                self.threads.append(t)
                t.start()
            for t in self.threads:
                t.join()
Exemplo n.º 7
0
    def _redirect_process(self, response):
        location = response.headers['location']
        if 'sorry' not in location:
            host = urlparse(location).scheme + '://' + urlparse(location).netloc
            self.host = host

            if self.debug:
                logger.warning('Host redirect detected: "%s"' % self.host)
                logger.info('New host(%s) useed' % self.host)
        elif 'sorry' in location:
            if self.debug:
                logger.warning('Captche verify detected, cant load plugin to process...')
                logger.warning('Exit...')

            sys.exit()
Exemplo n.º 8
0
def download_work(args):
    if args.PROXY:
        init_proxy(args.PROXY)

    cookie = args.COOKIE if args.COOKIE else None

    n_success = 0
    n_fail = 0
    if args.poc != 'all':
        poc_id = args.poc
        if not re.search(_ID_REGEX, poc_id):
            logger.error('Error format on poc id, please reinput.')
        else:
            if download_poc(poc_id, cookie):
                n_success += 1
            else:
                n_fail += 1
    else:
        logger.info('Download all pocs from "beebeeto.com"')
        logger.warning(
            'PoC existed will be overwrite, type [Enter] to continue.')
        raw_input()
        if True:
            crawl_dic = {'http://beebeeto.com/pdb/?page=1': False}

            while False in crawl_dic.values():
                crawl_url = choice([
                    link for link, crawled in crawl_dic.items() if not crawled
                ])

                try:
                    content = requests.get(crawl_url).content
                    crawl_dic[crawl_url] = True
                except Exception, e:
                    logger.error('Exception occured "%s" (%s)' %
                                 (Exception, e))
                    break

                if content:
                    crawl_dic = parse_page_from_content(content, crawl_dic)

                    ids = parse_poc_id_from_content(content)
                    for poc_id in ids:
                        if download_poc(poc_id, cookie):
                            n_success += 1
                        else:
                            n_fail += 1
        else:
Exemplo n.º 9
0
def init_proxy(proxy):
    res = urlparse(proxy)

    use_proxy = True
    if res.scheme == 'socks4':
        mode = socks.SOCKS4
    elif res.scheme == 'socks5':
        mode = socks.SOCKS5
    elif res.scheme == 'http':
        mode = socks.HTTP
    else:
        use_proxy = False
        logger.warning('Unknown proxy "%s", starting without proxy...' % proxy)

    if use_proxy:
        socks.set_default_proxy(mode, res.netloc.split(':')[0], int(res.netloc.split(':')[1]))
        socket.socket = socks.socksocket
        logger.info('Proxy "%s" using' % proxy)
Exemplo n.º 10
0
def download_work(args):
    if args.PROXY:
        init_proxy(args.PROXY)

    cookie = args.COOKIE if args.COOKIE else None

    n_success = 0
    n_fail = 0
    if args.poc != 'all':
        poc_id = args.poc
        if not re.search(_ID_REGEX, poc_id):
            logger.error('Error format on poc id, please reinput.')
        else:
            if download_poc(poc_id, cookie):
                n_success += 1
            else:
                n_fail += 1
    else:
        logger.info('Download all pocs from "beebeeto.com"')
        logger.warning('PoC existed will be overwrite, type [Enter] to continue.')
        raw_input()
        if True:
            crawl_dic = {'http://beebeeto.com/pdb/?page=1': False}

            while False in crawl_dic.values():
                crawl_url = choice([link for link, crawled in crawl_dic.items() if not crawled])

                try:
                    content = requests.get(crawl_url).content
                    crawl_dic[crawl_url] = True
                except Exception, e:
                    logger.error('Exception occured "%s" (%s)' % (Exception, e))
                    break

                if content:
                    crawl_dic = parse_page_from_content(content, crawl_dic)

                    ids = parse_poc_id_from_content(content)
                    for poc_id in ids:
                        if download_poc(poc_id, cookie):
                            n_success += 1
                        else:
                            n_fail += 1
        else:
Exemplo n.º 11
0
def init_proxy(proxy):
    res = urlparse(proxy)

    use_proxy = True
    if res.scheme == 'socks4':
        mode = socks.SOCKS4
    elif res.scheme == 'socks5':
        mode = socks.SOCKS5
    elif res.scheme == 'http':
        mode = socks.HTTP
    else:
        use_proxy = False
        logger.warning('Unknown proxy "%s", starting without proxy...' % proxy)

    if use_proxy:
        socks.set_default_proxy(mode,
                                res.netloc.split(':')[0],
                                int(res.netloc.split(':')[1]))
        socket.socket = socks.socksocket
        logger.info('Proxy "%s" using' % proxy)
Exemplo n.º 12
0
    def fetch_results(self, query):
        url_collection = []
        #host_collection = []

        start = 0
        logger.info('Starting search with google: %s' % query)
        logger.warning('You can interrupt this process with [Ctrl+c]')

        next_url = None
        while True:
            try:
                if next_url:
                    content = self.access(next_url)
                else:
                    content = self.search(query, page_num=100, start=start)
            except GoogleSearchLimitError, e:
                logger.error('%s' % e)
                return url_collection
            except GoogleSearchInitError, e:
                logger.error('%s' % e)
                return url_collection
Exemplo n.º 13
0
    def fetch_results(self, query):
        url_collection = []
        #host_collection = []

        start = 0
        logger.info('Starting search with google: %s' % query)
        logger.warning('You can interrupt this process with [Ctrl+c]')

        next_url = None
        while True:
            try:
                if next_url:
                    content = self.access(next_url)
                else:
                    content = self.search(query, page_num=100, start=start)
            except GoogleSearchLimitError, e:
                logger.error('%s' % e)
                return url_collection
            except GoogleSearchInitError, e:
                logger.error('%s' % e)
                return url_collection
Exemplo n.º 14
0
def batch_work(args):
    if args.METHOD not in ['verify', 'exploit']:
        logger.error('Error method, please check out...')
        sys.exit()

    if args.PROXY:
        init_proxy(args.PROXY)

    if args.poc != 'all':
        poc = import_module_with_path(args.poc)

        logger.info('Batch startting with "%s"' % ('verify' if args.METHOD == 'verify' else 'exploit'))
        start_time = time.time()
        bt = BatchTest(seed_file=args.targets,
                       funcs2run=(poc.__name__, (poc.MyPoc.verify if args.METHOD == 'verify' else poc.MyPoc.exploit)),
                       result_file='batch_%s_result_' % args.METHOD
                                   + os.path.splitext(os.path.basename(args.poc))[0] + '.txt',
                       thread_num=args.THREADS,
                       verbose=False)
        bt.start(norm_target_func=normalize_url)
        logger.info('total number: %d, success number: %d, failed number: %d'
                    % (bt.total_num, bt.success_num, (bt.total_num - bt.success_num)))
        logger.info('cost %f seconds.' % (time.time() - start_time))
    else:
        # Add
        pass
Exemplo n.º 15
0
def quit_process(filename, results):
    logger.info('Processing results...')
    pre_num = results.__len__()
    results = {}.fromkeys(results).keys()
    num = results.__len__()

    if filename:
        save_results(filename, results)
        logger.info('Save results to "%s" finished' % filename)
    else:
        for r in results:
            print r

    logger.info('All: %d, Non-duplication: %d' % (pre_num, num))
Exemplo n.º 16
0
def quit_process(filename, results):
    logger.info('Processing results...')
    pre_num = results.__len__()
    results = {}.fromkeys(results).keys()
    num = results.__len__()

    if filename:
        save_results(filename, results)
        logger.info('Save results to "%s" finished' % filename)
    else:
        for r in results:
            print r

    logger.info('All: %d, Non-duplication: %d' % (pre_num, num))
Exemplo n.º 17
0
def search_work(args):
    repi = SearchPoC(args.keyword, path=args.PATH)
    res = repi.search()

    for key, value in res.items():
        logger.info('%s    %s' % (key, value))
Exemplo n.º 18
0
                return url_collection
            except GoogleSearchInitError, e:
                logger.error('%s' % e)
                return url_collection
            except KeyboardInterrupt, e:
                return url_collection
            except Exception, e:
                continue

            if content:
                next_url = parse_next_url_from_content(content)

                temp_urls = parse_url_from_content(content)
                if len(temp_urls) > 0:
                    url_collection.extend(temp_urls)
                    logger.info('Catched %d results currently' % url_collection.__len__())

                    start += 100
                else:
                    logger.warning('No more results found, mo longer continue to search')
                    return url_collection

                if not next_url:
                    logger.warning('No more results found, no longer continue to search')
                    return url_collection


def fetch_work(args):
    if args.PROXY:
        init_proxy(args.PROXY)
Exemplo n.º 19
0
                return url_collection
            except GoogleSearchInitError, e:
                logger.error('%s' % e)
                return url_collection
            except KeyboardInterrupt, e:
                return url_collection
            except Exception, e:
                continue

            if content:
                next_url = parse_next_url_from_content(content)

                temp_urls = parse_url_from_content(content)
                if len(temp_urls) > 0:
                    url_collection.extend(temp_urls)
                    logger.info('Catched %d results currently' % url_collection.__len__())

                    start += 100
                else:
                    logger.warning('No more results found, mo longer continue to search')
                    return url_collection

                if not next_url:
                    logger.warning('No more results found, no longer continue to search')
                    return url_collection


def fetch_work(args):
    if args.PROXY:
        init_proxy(args.PROXY)
Exemplo n.º 20
0
        raw_input()
        if True:
            crawl_dic = {'http://beebeeto.com/pdb/?page=1': False}

            while False in crawl_dic.values():
                crawl_url = choice([link for link, crawled in crawl_dic.items() if not crawled])

                try:
                    content = requests.get(crawl_url).content
                    crawl_dic[crawl_url] = True
                except Exception, e:
                    logger.error('Exception occured "%s" (%s)' % (Exception, e))
                    break

                if content:
                    crawl_dic = parse_page_from_content(content, crawl_dic)

                    ids = parse_poc_id_from_content(content)
                    for poc_id in ids:
                        if download_poc(poc_id, cookie):
                            n_success += 1
                        else:
                            n_fail += 1
        else:
            logger.info('Download cancel.')
            return

    logger.info('total number: %d, success number: %d, failed number: %d'
                % (n_success + n_fail, n_success, n_fail))

Exemplo n.º 21
0
def search_work(args):
    repi = SearchPoC(args.keyword, path=args.PATH)
    res = repi.search()

    for key, value in res.items():
        logger.info('%s    %s' % (key, value))
Exemplo n.º 22
0
            crawl_dic = {'http://beebeeto.com/pdb/?page=1': False}

            while False in crawl_dic.values():
                crawl_url = choice([
                    link for link, crawled in crawl_dic.items() if not crawled
                ])

                try:
                    content = requests.get(crawl_url).content
                    crawl_dic[crawl_url] = True
                except Exception, e:
                    logger.error('Exception occured "%s" (%s)' %
                                 (Exception, e))
                    break

                if content:
                    crawl_dic = parse_page_from_content(content, crawl_dic)

                    ids = parse_poc_id_from_content(content)
                    for poc_id in ids:
                        if download_poc(poc_id, cookie):
                            n_success += 1
                        else:
                            n_fail += 1
        else:
            logger.info('Download cancel.')
            return

    logger.info('total number: %d, success number: %d, failed number: %d' %
                (n_success + n_fail, n_success, n_fail))
Exemplo n.º 23
0
def batch_work(args):
    if args.METHOD not in ['verify', 'exploit']:
        logger.error('Error method, please check out...')
        sys.exit()

    if args.PROXY:
        init_proxy(args.PROXY)

    if args.poc != 'all':
        poc = import_module_with_path(args.poc)
        funcs = (poc.__name__, (poc.MyPoc.verify if args.METHOD == 'verify'
                                else poc.MyPoc.exploit))
        outfile = 'batch_%s_result_' % args.METHOD + os.path.splitext(
            os.path.basename(args.poc))[0] + '.txt'

        logger.info('Batch startting with "%s"' %
                    ('verify' if args.METHOD == 'verify' else 'exploit'))
        start_time = time.time()
        bt = BatchTest(seed_file=args.targets,
                       funcs2run=funcs,
                       result_file=outfile,
                       thread_num=args.THREADS,
                       verbose=False)

        bt.start(norm_target_func=normalize_url)
        logger.info('total number: %d, success number: %d, failed number: %d' %
                    (bt.total_num, bt.success_num,
                     (bt.total_num - bt.success_num)))
        logger.info('cost %f seconds.' % (time.time() - start_time))
    else:
        # Add
        path = args.MODULE_DIR
        module_path = _default_module_path if not path else os.path.expanduser(
            path)
        pocs = import_all_modules_with_dirname(module_path)
        funcs = [(poc.__name__, poc.MyPoc.verify
                  if args.METHOD == 'verify' else poc.MyPoc.exploit)
                 for poc in pocs]
        outfile = 'batch_%s_result_all' % args.METHOD + '.txt'

        logger.info('Batch all startting with "%s"' %
                    ('verify' if args.METHOD == 'verify' else 'exploit'))

        start_time = time.time()
        bt = BatchTest(seed_file=args.targets,
                       funcs2run=funcs,
                       result_file=outfile,
                       thread_num=args.THREADS,
                       verbose=False)

        bt.start(norm_target_func=normalize_url)
        logger.info('total number: %d, success number: %d, failed number: %d' %
                    (bt.total_num, bt.success_num,
                     (bt.total_num - bt.success_num)))
        logger.info('cost %f seconds.' % (time.time() - start_time))