Пример #1
0
def get_battery(devices):
    """
    获取设备当前电量  adb -s 设备唯一标识(devices) shell dumpsys battery
    :param devices: 被测设备唯一标识
    :return:
    """
    try:
        logging.info('获取设备的当前电量')
        cmd = "adb -s " + devices + " shell dumpsys battery"
        logging.info(cmd)
        output = subprocess.check_output(cmd).split()
        if not output:
            raise ConnectAdbError
        st = ".".join([x.decode() for x in output])  # 转换为string
        logging.debug("st = " + st)
        battery2 = int(re.findall("level:.(\d+)*", st, re.S)[0])
    except ConnectAdbError as e:
        logging.error(e)
        raise
    except Exception as e:
        logging.error(e)
        battery2 = 90
    logging.info('读取手机电量: ' + str(battery2))
    # Pickle.write_info(battery2, PATH("../info/" + devices + "_battery.pickle"))
    Pickle.write_info(
        battery2,
        Path.scan_files(select_path=Path.info_path(),
                        postfix=''.join(devices.split(':')) +
                        '_battery.pickle'))
    return battery2
Пример #2
0
def get_men(pkg_name, devices):
    """
    获取应用占用内存 adb -s 设备唯一标识(devices) shell dumpsys meminfo 包名(pkg_name)
    :param pkg_name: 被测应用包名
    :param devices: 被测设备唯一标识
    :return:
    """
    try:
        logging.info('读取 ' + pkg_name + ' 内存占用')
        cmd = "adb -s " + devices + " shell  dumpsys  meminfo %s" % pkg_name
        logging.info(cmd)
        output = subprocess.check_output(cmd).split()
        if not output:
            raise ConnectAdbError
        s_men = ".".join([x.decode() for x in output])  # 转换为string
        logging.debug("s_men = " + s_men)
        men2 = int(re.findall("TOTAL.(\d+)*", s_men, re.S)[0])
    except ConnectAdbError as e:
        logging.error(e)
        raise
    except Exception as e:
        logging.error(e)
        men2 = 0
    logging.info('读取内存占用: ' + str(men2))
    Pickle.write_info(
        men2,
        Path.scan_files(select_path=Path.info_path(),
                        postfix=''.join(devices.split(':')) + '_men.pickle'))
    # Pickle.write_info(men2, PATH("../info/" + devices + "_men.pickle"))
    return men2
Пример #3
0
def get_fps(pkg_name, devices):
    """
    获取应用运行时的FPS  adb -s 设备的唯一标识(devices) shell dumpsys gfxinfo 包名(pkg_name)
    :param pkg_name:  被测应用包名
    :param devices:  被测设备的唯一标识
    :return:
    """
    logging.info('读取 ' + pkg_name + ' 运行时的FPS')
    try:
        _adb = "adb -s " + devices + " shell dumpsys gfxinfo %s" % pkg_name
        logging.info(_adb)
        results = os.popen(_adb).read().strip()
        if not results:
            raise ConnectAdbError
        logging.debug("results = " + results)
        frames = [x for x in results.split('\n') if validator(x)]
        logging.debug(frames)
        frame_count = len(frames)
        jank_count = 0
        vsync_overtime = 0
        render_time = 0
        for frame in frames:
            time_block = re.split(r'\s+', frame.strip())
            if len(time_block) == 3:
                try:
                    render_time = float(time_block[0]) + float(
                        time_block[1]) + float(time_block[2])
                except:
                    render_time = 0
            '''
            当渲染时间大于16.67,按照垂直同步机制,该帧就已经渲染超时
            那么,如果它正好是16.67的整数倍,比如66.68,则它花费了4个垂直同步脉冲,减去本身需要一个,则超时3个
            如果它不是16.67的整数倍,比如67,那么它花费的垂直同步脉冲应向上取整,即5个,减去本身需要一个,即超时4个,可直接算向下取整

            最后的计算方法思路:
            执行一次命令,总共收集到了m帧(理想情况下m=128),但是这m帧里面有些帧渲染超过了16.67毫秒,算一次jank,一旦jank,
            需要用掉额外的垂直同步脉冲。其他的就算没有超过16.67,也按一个脉冲时间来算(理想情况下,一个脉冲就可以渲染完一帧)

            所以FPS的算法可以变为:
            m / (m + 额外的垂直同步脉冲) * 60
            '''
            if render_time > 16.67:
                jank_count += 1
                if render_time % 16.67 == 0:
                    vsync_overtime += int(render_time / 16.67) - 1
                else:
                    vsync_overtime += int(render_time / 16.67)

        _fps = int(frame_count * 60 / (frame_count + vsync_overtime))
    except ConnectAdbError as e:
        logging.error(e)
        raise
    except Exception as e:
        logging.error(e)
        _fps = 0
    logging.info('读取FPS: ' + str(_fps))
    Pickle.write_info(
        _fps,
        Path.scan_files(select_path=Path.info_path(),
                        postfix=''.join(devices.split(':')) + '_fps.pickle'))
Пример #4
0
def evaluate_phodopus():
    home_dir = Path(os.path.expanduser('~'))
    hfm_dir = Path(
        '/home/acgtyrant/Projects/HamsterForMTK/'
        'Hamster_Android_SDK/src/com/mapbar/hamster')
    testset_dir = home_dir / 'BigDatas/car/testset'
    num_stages = (14, 15, 16)
    ratios = (0.25, 0.5, 0.75, 1)
    for num_stage, ratio in itertools.product(num_stages, ratios):
        cascade_model_dir = Path(
                '/home/acgtyrant/BigDatas/car/scaled_cascade-{}-{}'.format(
                        num_stage, ratio))
        parameters = phodopus.Parameters(
                hfm_dir / 'deploy.prototxt',
                hfm_dir / 'mb_confirm__iter_60000.caffemodel',
                hfm_dir / 'net_mean_file',
                None,
                hfm_dir / 'location_finer_1026_1_test.prototxt',
                hfm_dir / 'lf1__iter_60000.caffemodel',
                hfm_dir / 'mean_file')
        qin_rate = 0.5
        # KITTI 的标准是 70%, 即 Phodopus.kitti_rate, 但秦曰:数据可能会很难看,所以就用 50% 吧
        logging.basicConfig(
                format='%(levelname)s:%(message)s', level=logging.DEBUG)
        phodopus.evaluate_cascade(
                testset_dir,
                cascade_model_dir,
                parameters,
                qin_rate)
Пример #5
0
def main():
    trainset_directory = Path('/home/acgtyrant/BigDatas/car/trainset')
    backup_directory = trainset_directory / 'backup'
    try:
        backup_directory.mkdir()
    except Exception as e:
        logging.warning(e, exc_info=True)
    label_pathnames = [
        pathname for pathname in trainset_directory.iterdir()
        if pathname.suffix == '.txt'
    ]
    for label_pathname in label_pathnames:
        backup(label_pathname, backup_directory / label_pathname.name)
    for label_pathname in label_pathnames:
        try:
            sample_pathname = find_sample(label_pathname)
        except:
            logging.warning(
                '{}\'s sample does not exist'.format(label_pathname))
        else:
            practical_remove_unvalid_line = partial(
                remove_unvalid_line, sample_pathname=sample_pathname)
            functions = [
                dos2unix, sort, uniq, practical_remove_unvalid_line,
                remove_duplicated_lines, sort
            ]
            for function in functions:
                wrapper(label_pathname, function)
        logging.info('regularize {} done'.format(label_pathname))
Пример #6
0
def get_flow(pd, devices):
    """
    获取应用的流量信息   adb -s 设备的唯一标识(devices) shell cat /proc/PID(pd)/net/dev
    :param pd:   应用的pid
    :param devices: 被测设备唯一标识
    :return:
    """
    logging.info('获取应用的流量信息')
    try:
        up_flow = down_flow = 0
        if pd is not None:
            cmd = "adb -s " + devices + " shell cat /proc/" + pd + "/net/dev"
            logging.info(cmd)
            _flow = subprocess.Popen(
                cmd,
                shell=True,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE).stdout.readlines()
            logging.debug(_flow)
            for item in _flow:
                logging.debug(item.split()[0].decode())
                if item.split()[0].decode() == "wlan0:":  # wifi
                    # 0 上传流量,1 下载流量
                    up_flow = int(item.split()[1].decode())
                    down_flow = int(item.split()[9].decode())
                    logging.debug('上行流量 : ' + str(up_flow))
                    logging.debug('下行流量 : ' + str(down_flow))
                elif item.split()[0].decode() == "rmnet0:":  # gprs
                    logging.info("-----flow---------")
                    up_flow = int(item.split()[1].decode())
                    down_flow = int(item.split()[9].decode())
                    logging.debug('上行流量 : ' + str(up_flow))
                    logging.debug('下行流量 : ' + str(down_flow))
                # else:
                #     up_flow = 0
                #     down_flow = 0
                #     logging.debug('上行流量 : '+str(up_flow))
                #     logging.debug('下行流量 : '+str(down_flow))
                #     break
        Pickle.write_flow_info(
            up_flow, down_flow,
            Path.scan_files(select_path=Path.info_path(),
                            postfix=''.join(devices.split(':')) +
                            '_flow.pickle'))
    except ConnectAdbError as e:
        logging.error(e)
        raise
    except Exception as e:
        logging.error('获取应用流量信息失败')
        logging.error(e)
Пример #7
0
def delete_fp():
    try:
        lg = [Path.father_path + '\\log', Path.report_path()]
        for j in lg:
            if os.path.exists(j):
                ls = os.listdir(j)
                for i in range(0, len(ls)):
                    path = os.path.join(lg, ls[i])
                    if os.path.exists(path):
                        create_time = time.localtime(
                            os.stat(path).st_ctime)  # 文件最后访问时间
                        y = time.strftime('%Y', create_time)
                        m = time.strftime('%m', create_time)
                        d = time.strftime('%d', create_time)
                        h = time.strftime('%H', create_time)
                        M = time.strftime('%M', create_time)
                        d2 = datetime.datetime(int(y), int(m), int(d), int(h),
                                               int(M))  # 格式化时间
                        time_difference = (datetime.datetime.now() -
                                           d2).days  # 计算时间差
                        if time_difference >= 2:  # 时间差超过10天,则删除
                            shutil.rmtree(path)
            else:
                logging.warning('日志或测试报告文件夹不存在')
    except Exception as e:
        logging.error(e)
Пример #8
0
def main():
    parser = argparse.ArgumentParser(
        description='读取 cascade_model, 并直接调用 detectMultiScale 做全图多目标识别')
    parser.add_argument('-v', action='store', dest='video_pathname_str')
    parser.add_argument('-c', action='store', dest='cascade_pathname_str')
    parser.add_argument('--noshow', action='store_true', default=False)
    special_help = '无用参数,仅为兼容 makefile/phodopus 现有的 parameter.'
    parser.add_argument('--proto', action='store', help=special_help)
    parser.add_argument('--model', action='store', help=special_help)
    parser.add_argument('--mean', action='store', help=special_help)
    parser.add_argument('--lf_proto', action='store', help=special_help)
    parser.add_argument('--lf_model', action='store', help=special_help)
    parser.add_argument('--lf_mean', action='store', help=special_help)
    # 以上六行的参数目前对脚本没用,仅仅为了兼容 makefile/phodopus.
    args = parser.parse_args(sys.argv[1:])
    video = video_generator(Path(args.video_pathname_str))
    cascade = cv2.CascadeClassifier(args.cascade_pathname_str)
    for frame_filename_str, frame in video:
        rects = cascade.detectMultiScale(frame, 1.2, 3, 0, (20, 20))
        new_rects = []
        for x, y, width, height in rects:
            if not args.noshow:
                frame = cv2.rectangle(frame, (x, y), (x + width, y + height),
                                      (0, 255, 0))
            new_rect = Rect(1 * x, 1 * y, 1 * width, 1 * height)
            new_rects.append(new_rect)
        rects_str = ' '.join([','.join(map(str, rect)) for rect in new_rects])
        sys.stdout.write('{} {}\n'.format(frame_filename_str, rects_str))
        if not args.noshow:
            cv2.imshow('fight!', frame)
            cv2.waitKey(0)
Пример #9
0
def start_server():
    cmd = 'taskkill /F /IM node.exe'
    logging.info(cmd)
    os.system(cmd)
    logging.info('启动appium服务')
    try:
        cd = 'start /b appium -a 127.0.0.1 -p 4723 --bootstrap-port 9517 --session-override --command-timeout 600'
        logging.info(cd)
        subprocess.call(cd,
                        shell=True,
                        stdout=open(
                            Path.log_path() + runtime.test_start_time() +
                            'appium.log', 'w'),
                        stderr=subprocess.STDOUT)
        appium_server_url = 'http://localhost:4723/wd/hub/'
        logging.info(appium_server_url)
        time.sleep(5)
        response = requests.get(appium_server_url)
        print(response.status_code)
        if response.status_code is 404:
            logging.info('appium服务启动成!!')
        else:
            raise Exception
    except Exception as a:
        logging.error('启动appium服务失败 %s' % a)
Пример #10
0
def log_config():
    try:
        logger = logging.getLogger()
        logger.setLevel(logging.DEBUG)  # Log等级总开关
        # 第二步,创建一个handler,用于写入日志文件
        logfile = Path.log_path() + runtime.test_start_time() + '.log'
        fh = logging.FileHandler(logfile, mode='w+')
        fh.setLevel(logging.DEBUG)  # 输出到file的log等级的开关
        # 第三步,再创建一个handler,用于输出到控制台
        ch = logging.StreamHandler()
        ch.setLevel(logging.DEBUG)  # 输出到console的log等级的开关
        # 第四步,定义handler的输出格式
        formatter = logging.Formatter(
            "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"
        )
        fh.setFormatter(formatter)
        ch.setFormatter(formatter)
        # 第五步,将logger添加到handler里面
        logger.addHandler(fh)
        logger.addHandler(ch)
        logging.info('测试开始时间:%s' % runtime.test_start_time())
        # logging.basicConfig(level=logging.DEBUG,
        #                     format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
        #                     datefmt='%a, %d %b %Y %H:%M:%S',
        #                     filename=Path.log_path()+runtime.test_start_time()+'.log',
        #                     filemode='w')
    except Exception as e:
        print(e)
        raise Custom_exception.LogConfigError
Пример #11
0
def main():
    directory = Path(os.path.expanduser('~')) / 'BigDatas/car'
    info_pathname = directory / 'positive_samples.txt'
    new_info_pathname = directory / 'scaled_positive_samples.txt'
    with info_pathname.open() as info_file, \
            new_info_pathname.open('w') as new_info_file:
        for line in info_file:
            frame_filename_str, num_str, rects_str = (line.rstrip('\n').split(
                ' ', 2))
            image_pathname = directory / frame_filename_str
            image = cv2.imread(str(image_pathname))
            image_height, image_width, _ = image.shape
            rects_strs = rects_str.split()
            rects = []
            for index in range(0, len(rects_strs), 4):
                x, y, width, height = map(int, rects_strs[index:index + 4])
                new_width = int(width * 1.2)
                new_x = int(x - width * 0.1)
                new_height = int(height * 1.2)
                new_y = int(y - height * 0.1)
                if (new_x >= 0 and new_y >= 0 and new_width < image_width
                        and new_height < image_height):
                    rects.append(Rect(new_x, new_y, new_width, new_height))
                else:
                    rects.append(Rect(x, y, width, height))
            new_rects_str = ' '.join(
                [' '.join(map(str, tuple(rect))) for rect in rects])
            new_info_file.write('{} {} {}\n'.format(frame_filename_str,
                                                    num_str, new_rects_str))
Пример #12
0
def watch_squirrel():
    logging.basicConfig(
            format='%(levelname)s:%(message)s', level=logging.DEBUG)
    home_dir = Path(os.path.expanduser('~'))
    testset_dir = home_dir / 'BigDatas/squirrel/testset'
    video_pathname = testset_dir / '1970-1-2_08-40-06.avi'
    label_pathname = video_pathname.with_suffix('.txt')
    tp_pathname = video_pathname.with_suffix('.tp')
    fp_pathname = video_pathname.with_suffix('.fp')
    fn_pathname = video_pathname.with_suffix('.fn')
    # tp_count, fp_count, fn_count = squirrel.parse(
            # log_pathname,
            # label_pathname,
            # tp_pathname,
            # fp_pathname,
            # fn_pathname)
    # precision, recall, fb_measure, _ = squirrel.statistics(
            # tp_count,
            # fp_count,
            # fn_count)
    # print('precision: {:.3}'.format(precision))
    # print('recall: {:.3}'.format(recall))
    # print('fb_measure: {:.3}'.format(fb_measure))
    squirrel.watch(
            video_pathname,
            label_pathname,
            tp_pathname,
            fp_pathname,
            fn_pathname,
            is_evaluate_save=True)
Пример #13
0
def watch_phodopus():
    home_dir = Path(os.path.expanduser('~'))
    testset_dir = home_dir / 'BigDatas/daytime/trainset'
    video_pathname = testset_dir / '1362384843.mov'
    label_pathname = video_pathname.with_suffix('.txt')
    log_pathname = video_pathname.with_suffix('.log')
    tp_pathname = video_pathname.with_suffix('.tp')
    fp_pathname = video_pathname.with_suffix('.fp')
    fn_pathname = video_pathname.with_suffix('.fn')
    tp_count, fp_count, fn_count = phodopus.parse(
            log_pathname,
            label_pathname,
            tp_pathname,
            fp_pathname,
            fn_pathname)
    precision, recall, fb_measure, _ = phodopus.statistics(
            tp_count,
            fp_count,
            fn_count)
    print('precision: {:.3}'.format(precision))
    print('recall: {:.3}'.format(recall))
    print('fb_measure: {:.3}'.format(fb_measure))
    phodopus.watch(
            video_pathname,
            label_pathname,
            tp_pathname,
            fp_pathname,
            fn_pathname)
Пример #14
0
def day_task():
    home_dir = Path(os.path.expanduser('~'))
    testset_dir = home_dir / 'BigDatas/daytime/testset'
    tp_counts, fp_counts, fn_counts = 0, 0, 0
    for video_pathname in [
            pathname
            for pathname in testset_dir.iterdir()
            if pathname.suffix == '.avi']:
        label_pathname = video_pathname.with_suffix('.txt')
        log_pathname = video_pathname.with_suffix('.log')
        tp_pathname = video_pathname.with_suffix('.tp')
        fp_pathname = video_pathname.with_suffix('.fp')
        fn_pathname = video_pathname.with_suffix('.fn')
        tp_count, fp_count, fn_count = phodopus.parse(
                log_pathname,
                label_pathname,
                tp_pathname,
                fp_pathname,
                fn_pathname)
        tp_counts += tp_count
        fp_counts += fp_count
        fn_counts += fn_count
    precision, recall, fb_measure, _ = phodopus.statistics(
            tp_counts,
            fp_counts,
            fn_counts)
    print('precision: {:.3}'.format(precision))
    print('recall: {:.3}'.format(recall))
    print('fb_measure: {:.3}'.format(fb_measure))
Пример #15
0
def mkdirInit(devices, app):
    # destroy(devices)
    # cpu = PATH("../info/" + devices + "_cpu.pickle")
    # men = PATH("../info/" + devices + "_men.pickle")
    # flow = PATH("../info/" + devices + "_flow.pickle")
    # battery = PATH("../info/" + devices + "_battery.pickle")
    # fps = PATH("../info/" + devices + "_fps.pickle")
    dev = ''.join(devices.split(':'))
    cpu = Path.info_path() + '\\' + dev + "_cpu.pickle"
    men = Path.info_path() + '\\' + dev + "_men.pickle"
    flow = Path.info_path() + '\\' + dev + "_flow.pickle"
    battery = Path.info_path() + '\\' + dev + "_battery.pickle"
    fps = Path.info_path() + '\\' + dev + "_fps.pickle"
    app[devices] = {
        "cpu": cpu,
        "men": men,
        "flow": flow,
        "battery": battery,
        "fps": fps,
        "header": get_phone(devices)
    }
    OperateFile(cpu).mkdir_file()
    OperateFile(men).mkdir_file()
    OperateFile(flow).mkdir_file()
    OperateFile(battery).mkdir_file()
    OperateFile(fps).mkdir_file()
    # OperateFile(PATH("../info/info.pickle")).remove_file()
    # OperateFile(PATH("../info/info.pickle")).mkdir_file()  # 用于记录统计结果的信息,是[{}]的形式
    OperateFile(Path.info_path() + '\\' + dev + "_info.pickle").remove_file()
    OperateFile(Path.info_path() + '\\' + dev + "_info.pickle").mkdir_file()
Пример #16
0
 def __init__(self):
     global case
     try:
         self.data = xlrd.open_workbook(Path.scan_files(postfix='case.xls'))
         case = self.data.sheet_by_name(u'测试用例')
         self.case_num = case.nrows  # 测试用例行数
     except Exception as e:
         creat_case.exception_handling(e)
         raise Custom_exception.OpenXlsError
Пример #17
0
def evaluate():
    logging.basicConfig(
            format='%(levelname)s:%(message)s', level=logging.DEBUG)
    home_dir = Path(os.path.expanduser('~'))
    hfm_dir = Path(
            '/home/acgtyrant/Projects/HamsterForMTK/'
            'Hamster_Android_SDK/src/com/mapbar/hamster')
    testset_dir = home_dir / 'BigDatas/car/testset'
    parameters = phodopus.Parameters(
            hfm_dir / 'deploy.prototxt',
            hfm_dir / 'mb_confirm__iter_60000.caffemodel',
            hfm_dir / 'net_mean_file',
            Path('/home/acgtyrant/BigDatas/car/cascade/cascade15.xml'),
            hfm_dir / 'location_finer_1026_1_test.prototxt',
            hfm_dir / 'lf1__iter_60000.caffemodel',
            hfm_dir / 'mean_file')
    qin_rate = 0.5
    # KITTI 的标准是 70%, 即 Phodopus.kitti_rate, 但秦曰:数据可能会很难看,所以就用 50% 吧
    tp_counts, fp_counts, fn_counts = 0, 0, 0
    for pathname in testset_dir.iterdir():
        if pathname.suffix in video_suffixes:
            log_pathname = pathname.with_suffix('.log')
            label_pathname = pathname.with_suffix('.txt')
            logging.basicConfig(
                    format='%(levelname)s:%(message)s', level=logging.INFO)
            phodopus.evaluate_pathname(
                    parameters,
                    pathname,
                    log_pathname,
                    qin_rate)
            tp_count, fp_count, fn_count = phodopus.parse(
                    log_pathname,
                    label_pathname,
                    overlap_rate=0.5)
            tp_counts += tp_count
            fp_counts += fp_count
            fn_counts += fn_count
    precision, recall, fb_measure, _ = phodopus.statistics(
            tp_counts,
            fp_counts,
            fn_counts)
    logging.info('precision: {:.3}'.format(precision))
    logging.info('recall: {:.3}'.format(recall))
    logging.info('fb_measure: {:.3}'.format(fb_measure))
Пример #18
0
def append2PATH(paths, tmp_path):
  """ Appends the given argument to the PATH in the registry.
      The paths argument can contain multiple paths separated by ';'. """
  from common import is_win32, Path, call_read, subprocess, \
    chunks, tounicode, tounicodes
  paths, tmp_path = tounicodes((paths, tmp_path))
  sep = ";"
  path_list = paths.split(sep)

  # 1. Get the current PATH value.
  echo_cmd = ["wine", "cmd.exe", "/c", "echo", "%PATH%"][is_win32:]
  CUR_PATH = call_read(echo_cmd).rstrip('\r\n') # Remove trailing \r\n.
  CUR_PATH = tounicode(CUR_PATH)
  if not is_win32 and '/' in paths: # Convert the Unix paths to Windows paths.
    winepath = lambda p: call_read(["winepath", "-w", p])[:-1]
    path_list = map(winepath, path_list)
    path_list = tounicodes(path_list)

  # 2. Avoid appending the same path(s) again. Strip '\' before comparing.
  op_list = [p.rstrip('\\') for p in CUR_PATH.split(sep)]
  path_list = filter((lambda p: p.rstrip('\\') not in op_list), path_list)
  if not path_list: return

  # 3. Create a temporary reg file.
  paths = sep.join(path_list)
  NEW_PATH = CUR_PATH + sep + paths + '\0' # Join with the current PATH.
  if 1 or is_win32: # Encode in hex.
    NEW_PATH = NEW_PATH.encode('utf-16')[2:].encode('hex')
    NEW_PATH = ','.join(chunks(NEW_PATH, 2)) # Comma separated byte values.
    var_type = 'hex(2)'
  #else: # Escape backslashes and wrap in quotes.
    #NEW_PATH = '"%s"' % NEW_PATH.replace('\\', '\\\\')
    #var_type = 'str(2)'
  # Write to "tmp_path/newpath.reg".
  tmp_reg = Path(tmp_path)/"newpath.reg"
  tmp_reg.open("w", "utf-16").write("""Windows Registry Editor Version 5.00\r
\r
[HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\
Control\\Session Manager\\Environment]\r
"Path"=%s:%s\r\n""" % (var_type, NEW_PATH))

  # 4. Apply the reg file to the registry. "/s" means run silently.
  regedit = ["wine", "regedit.exe", "/s", tmp_reg][is_win32:]
  subprocess.call(regedit)
Пример #19
0
def append2PATH(paths, tmp_path=""):
  """ Appends the given argument to the PATH variable in the Windows registry.
      The paths argument can contain multiple paths separated by ';'. """
  from common import is_win32, Path, call_proc, call_read, chunks
  sep = ";"
  # Split by sep, make absolute and normalize.
  path_list = map(lambda p: Path(p).abspath.normpath, paths.split(sep))

  # 1. Get the current PATH value.
  echo_cmd = ["wine", "cmd.exe", "/c", "echo", "%PATH%"][is_win32:]
  CUR_PATH = call_read(echo_cmd).rstrip('\r\n') # Remove trailing \r\n.
  if not is_win32 and '/' in paths: # Convert the Unix paths to Windows paths.
    winepath = lambda p: call_read("winepath", "-w", p)[:-1]
    path_list = map(winepath, path_list)

  # 2. Avoid appending the same path(s) again. Strip '\' before comparing.
  op_list = [p.rstrip('\\') for p in CUR_PATH.split(sep)]
  path_list = filter((lambda p: p.rstrip('\\') not in op_list), path_list)
  if not path_list: return

  # 3. Create a temporary reg file.
  paths = sep.join(path_list)
  NEW_PATH = CUR_PATH + sep + paths + '\0' # Join with the current PATH.
  if 1 or is_win32: # Encode in hex.
    NEW_PATH = NEW_PATH.encode('utf-16')[2:].encode('hex')
    NEW_PATH = ','.join(chunks(NEW_PATH, 2)) # Comma separated byte values.
    var_type = 'hex(2)'
  #else: # Escape backslashes and wrap in quotes.
    #NEW_PATH = '"%s"' % NEW_PATH.replace('\\', '\\\\')
    #var_type = 'str(2)'
  # Write to "tmp_path/newpath.reg".
  tmp_reg = Path(tmp_path)/"newpath.reg"
  tmp_reg.write("""Windows Registry Editor Version 5.00\r
\r
[HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\\
Control\\Session Manager\\Environment]\r
"Path"=%s:%s\r\n""" % (var_type, NEW_PATH), encoding="utf-16")

  # 4. Apply the reg file to the registry. "/s" means run silently.
  regedit = ["wine", "regedit.exe", "/s", tmp_reg][is_win32:]
  call_proc(regedit)
Пример #20
0
def evaluate_pathname(parameters,
                      video_pathname,
                      log_pathname=None,
                      overlap_rate=kitti_rate):
    phodopus_evaluate_command = (
        command_pathname,
        '-v',
        video_pathname,
        '--proto',
        parameters.proto_pathname,
        '--model',
        parameters.model_pathname,
        '--mean',
        parameters.mean_pathname,
        '-c',
        parameters.cascade_model_pathname,
        '--lf_proto',
        parameters.lf_proto_pathname,
        '--lf_model',
        parameters.lf_model_pathname,
        '--lf_mean',
        parameters.lf_mean_pathname,
        '--noshow',
    )
    phodopus_evaluate_command = map(str, phodopus_evaluate_command)
    logging.debug('start to use {} to evaluate {}'.format(
        parameters.cascade_model_pathname.name, video_pathname.name))
    log_file = _unwrap_or_tempfile(log_pathname, 'w')
    null_file = open(os.devnull, 'w')
    try:
        start = time.time()
        subprocess.call(phodopus_evaluate_command,
                        stdout=log_file,
                        stderr=null_file)
        end = time.time()
        elapsed_time = end - start
        if video_pathname.suffix == '':
            frame_count = len(list(video_pathname.iterdir()))
        else:
            video = cv2.VideoCapture(str(video_pathname))
            frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
            video.release()
        logging.debug('use {} to evaluate {} done'.format(
            parameters.cascade_model_pathname.name, video_pathname.name))
        log_pathname = Path(log_file.name)
        label_pathname = video_pathname.with_suffix('.txt')
        return (
            parse(log_pathname, label_pathname, overlap_rate=overlap_rate) +
            (frame_count, elapsed_time))
    finally:
        log_file.close()
        null_file.close()
Пример #21
0
def exception_handling(e,
                       index=None,
                       test_name=None,
                       method_name=None,
                       op=None):
    """
    错误处理
    :param e: 报错内容
    :param index: 用例编号
    :param test_name: 测试用例名称
    :param method_name: 测试用例对应方法
    :param op: 操作驱动
    :return:
    """
    global l
    logging.error(e)
    path = Path.report_path() + runtime.test_start_time() + '_error'
    mkdir_log_directory.mk_dir(path)  # 创建错误日志目录
    path1 = Path.log_path() + runtime.test_start_time() + '.log'
    if index:
        log_error = path + '\\' + test_name.decode('utf8') + '.txt'  # 记录错误日志文件
        way = path + '\\' + method_name + runtime.test_start_time() + '.png'
        op.screen(way, path)  # 截图
        log.error_log(path1, log_error, test_name)
        if 'AssertionError' in e:
            for i in range(0, len(l)):
                if index == l[i][0]:
                    l[i].append('fail')
                    l[i].append(log_error)
                    l[i].append(way)
        else:
            for i in range(0, len(l)):
                if index == l[i][0]:
                    l[i].append('error')
                    l[i].append(log_error)
                    l[i].append(way)
    else:
        log_error = path + '\\' + 'error.txt'  # 记录错误日志文件
        log.error_log(path1, log_error)
Пример #22
0
def cpu_rate(pd, cpu_num, devices):
    """
    计算某进程的cpu使用率
    100*( processCpuTime2 – processCpuTime1) / (totalCpuTime2 – totalCpuTime1) (按100%计算,如果是多核情况下还需乘以cpu的个数);
    cpu_num cpu几核
    pid 进程id
    :param pd:  应用运行的PID
    :param cpu_num:   CPU个数
    :param devices:  设备的唯一标识
    :return:
    """
    process_cpu_time1 = process_cpu_time(pd, devices)
    # print(process_cpu_time1)
    time.sleep(1)
    process_cpu_time2 = process_cpu_time(pd, devices)
    # print(process_cpu_time2)
    process_cpu_time3 = process_cpu_time2 - process_cpu_time1
    # print(process_cpu_time3)
    total_cpu_time1 = total_cpu_time(devices)
    # print(total_cpu_time1)
    time.sleep(1)
    total_cpu_time2 = total_cpu_time(devices)
    # print(total_cpu_time2)
    total_cpu_time3 = (total_cpu_time2 - total_cpu_time1) * cpu_num
    # print(total_cpu_time3)
    logging.info("totalCpuTime3=" + str(total_cpu_time3))
    logging.info("processCpuTime3=" + str(process_cpu_time3))
    # try:
    #     cpu = 100 * process_cpu_time3 / total_cpu_time3
    # except:
    #     cpu = 0
    cpu = 100 * process_cpu_time3 / total_cpu_time3
    # Pickle.write_info(cpu, PATH("../info/" + devices + "_cpu.pickle"))
    Pickle.write_info(
        cpu,
        Path.scan_files(select_path=Path.info_path(),
                        postfix=''.join(devices.split(':')) + '_cpu.pickle'))
    logging.info("CPU使用率: " + str(cpu) + '%')
Пример #23
0
def main():
    command_pathname = caffe_directory / 'build/tools/caffe'
    solver_pathname = Path('')
    command = [
        command_pathname,
        'train',
        '-gpu',
        'all',
        '-solver',
        solver_pathname,
        '-shuffle',
    ]
    command = list(map(str, command))
    logging.info(' '.join(command))
    subprocess.call(command)
Пример #24
0
    def __init__(self):
        try:
            desired_caps = {}
            desired_caps['platformName'] = 'Android'
            desired_caps['platformVersion'] = Get_Phone.get_android_version()     # 设备版本
            desired_caps['deviceName'] = Get_Phone.get_device_name()        # 设备名称
            desired_caps['app'] = PATH(Path.scan_files(postfix='.apk'))     # 待测应用
#           desired_caps['appPackage'] = 'com.sixty.nidoneClient'
#           desired_caps['appActivity'] = 'com.sixty.nidoneClient.view.activity.SDK_WebApp'
            desired_caps['unicodeKeyboard'] = True
            desired_caps['resetKeyboard'] = True
#           如果设置的是app在电脑上的路径,则不需要配appPackage和appActivity,同理反之
            self.driver = webdriver.Remote("http://localhost:4723/wd/hub", desired_caps)
        except Exception as e:
            creat_case.exception_handling(e)
            raise Custom_exception.GetDriverError
Пример #25
0
def _evaluate_num_stage(testset_dir, cascade_model_dir, parameters,
                        overlap_rate, fb_measure_weight, num_stage):
    logging.info('starting to evaluate cascade{}.xml'.format(num_stage))
    filename = Path('{}{}{}'.format('cascade', num_stage, '.xml'))
    parameters = parameters._replace(cascade_model_pathname=cascade_model_dir /
                                     filename)
    tuple_results = [
        evaluate_pathname(parameters,
                          pathname,
                          log_pathname=None,
                          overlap_rate=overlap_rate)
        for pathname in testset_dir.iterdir()
        if pathname.suffix in video_suffixes
    ]
    results = zip(*tuple_results)
    return statistics(*map(sum, results), fb_measure_weight=fb_measure_weight)
Пример #26
0
def send_email(test_time):
    address = configemail.ConfigEmail()
    # 发件人地址
    From = address.get_sender()
    # 收件人地址,多个收件人用逗号隔开
    To = address.get_addressee()
    # 附件名
    file_name = Path.report_path() + test_time + '.html'

    server = smtplib.SMTP(address.get_smtp())
    # 仅smtp服务器需要验证时
    server.login(address.get_login(), address.get_authorization_code())

    # 构造MIMEMultipart对象做为根容器
    main_msg = email.MIMEMultipart.MIMEMultipart()

    # 构造MIMEText对象做为邮件显示内容并附加到根容器
    text_msg = email.MIMEText.MIMEText("Nidone测试报告", charset="utf-8")
    main_msg.attach(text_msg)

    # 构造MIMEBase对象做为文件附件内容并附加到根容器
    ctype, encoding = mimetypes.guess_type(file_name)
    if ctype is None or encoding is not None:
        ctype = 'application/octet-stream'
    maintype, subtype = ctype.split('/', 1)
    file_msg = email.MIMEImage.MIMEImage(open(file_name, 'rb').read(), subtype)
    logging.info(ctype, encoding)
    # 设置附件头
    basename = os.path.basename(file_name)
    # 修改邮件头
    file_msg.add_header('Content-Disposition', 'attachment', filename=basename)
    main_msg.attach(file_msg)

    # 设置根容器属性
    main_msg['From'] = From
    main_msg['To'] = To
    main_msg['Subject'] = "Nidone测试报告 "
    main_msg['Date'] = email.Utils.formatdate()

    # 得到格式化后的完整文本
    fulltext = main_msg.as_string()

    # 用smtp发送邮件
    try:
        server.sendmail(From, To, fulltext)
    finally:
        server.quit()
Пример #27
0
def _evaluate_pathname(video_pathname, log_pathname, trheshold):
    squirrel_evaluate_command = (
            command_pathname,
            '-v', video_pathname,
            '--noshow',  # 本脚本原则上不显示视频,旨在加快评测效率
    )
    squirrel_evaluate_command = map(str, squirrel_evaluate_command)
    logging.debug('start to evaluate {}'.format(video_pathname.name))
    log_file = _unwrap_or_tempfile(log_pathname, 'w')
    null_file = open(os.devnull, 'w')
    try:
        start = time.time()
        subprocess.call(
                squirrel_evaluate_command,
                stdout=log_file,
                stderr=null_file)
        end = time.time()
        elapsed_time = end - start
        if video_pathname.suffix == '':
            frame_count = len(list(video_pathname.iterdir()))
        else:
            video = cv2.VideoCapture(str(video_pathname))
            frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
            video.release()
        logging.debug('evaluate {} done'.format(video_pathname.name))
        log_pathname = Path(log_file.name)
        label_pathname = video_pathname.with_suffix('.txt')
        tp_pathname = video_pathname.with_suffix('.tp')
        fp_pathname = video_pathname.with_suffix('.fp')
        fn_pathname = video_pathname.with_suffix('.fn')
        tp_count, fp_count, fn_count = parse(
                log_pathname,
                label_pathname,
                tp_pathname,
                fp_pathname,
                fn_pathname,
                trheshold)
        return statistics(
                tp_count,
                fp_count,
                fn_count,
                frame_count,
                elapsed_time)
    finally:
        log_file.close()
        null_file.close()
Пример #28
0
 def __init__(self):
     config = configparser.ConfigParser()
     config.read(Path.scan_files(prefix='email_address'))
     try:
         # 发件人地址
         self.Sender = config['email_address']['Sender']
         # 收件人地址,多个收件人用逗号隔开
         self.Addressee = config['email_address']['Addressee']
         # 第三方smtp,例如网易的,smtp.163.com
         self.smtp = config['email_address']['smtp']
         # 授权登录账号
         self.login = config['email_address']['login']
         # 授权码
         self.AuthorizationCode = config['email_address'][
             'AuthorizationCode']
     except Exception as e:
         creat_case.exception_handling(e, "邮件信息初始化")
         raise Custom_exception.MailInitializationError
Пример #29
0
def report(info, devices):
    try:
        logging.info('初始化测试报告')
        workbook = xlsxwriter.Workbook(Path.report_path() +
                                       ''.join(devices.split(':')) + '报告.xlsx')
        bo = OperateReport(workbook)
        logging.info('生成监控报告')
        bo.monitor(info)
        logging.info('生成错误日志')
        bo.crash()
        logging.info('生成详细报告')
        bo.analysis(info)
        bo.close()
        logging.info('报告生成成功')
    except Exception as e:
        logging.error('生成测试报告失败')
        logging.error(e)
        raise
Пример #30
0
def generate_background(cascade_pathname, scale_down_ratio):
    background_pathname = directory / 'background.txt'
    for image_pathname in background_directory.iterdir():
        image_pathname.unlink()
    with background_pathname.open('w') as background_file:
        for pathname in trainset_directory.iterdir():
            if pathname.suffix == '' and Path(pathname / '0.jpg').exists():
                logging.debug('detect {}'.format(pathname))
                log_pathname = pathname.with_suffix('.log')
                label_pathname = pathname.with_suffix('.txt')
                fp_pathname = pathname.with_suffix('.fp')
                with log_pathname.open('w') as log_file:
                    detect(pathname, cascade_pathname, scale_down_ratio,
                           log_file)
                phodopus.parse(log_pathname,
                               label_pathname,
                               fp_pathname=fp_pathname)
                with fp_pathname.open() as fp_file:
                    for line in fp_file:
                        image_filename_str, rects_str = line.split(' ', 1)
                        for index, rect_str in enumerate(rects_str.split()):
                            rect = Rect(rect_str)
                            image = cv2.imread(
                                str(pathname / image_filename_str))
                            image = image[rect.y:rect.y + rect.height,
                                          rect.x:rect.x + rect.width]
                            width = int(image.shape[1] / scale_down_ratio)
                            height = int(image.shape[0] / scale_down_ratio)
                            image = cv2.resize(image, (width, height))
                            new_image_filename_str = '{}-{}-{}.jpg'.format(
                                pathname.name,
                                image_filename_str.split('.')[0], index)
                            new_image_pathname = (background_directory /
                                                  new_image_filename_str)
                            cv2.imwrite(str(new_image_pathname), image)
                            background_file.write('background/{}\n'.format(
                                new_image_filename_str))
    shuf_command = [
        'shuf',
        str(background_pathname), '-o',
        str(background_pathname)
    ]
    shuf_command = map(str, shuf_command)
    subprocess.call(shuf_command)
Пример #31
0
def main():
    directory = Path('/home/acgtyrant/BigDatas/car')
    samples_directory = directory / 'samples'
    if not samples_directory.exists():
        samples_directory.mkdir()
    trainset_directory = directory / 'trainset'
    info_pathname = directory / 'positive_samples.txt'
    for label_pathname in [
            pathname for pathname in trainset_directory.iterdir()
            if pathname.suffix == '.txt'
    ]:
        try:
            sample_pathname = find_sample(label_pathname)
            _handle_video(label_pathname, sample_pathname, samples_directory)
            _handle_info(sample_pathname, info_pathname)
        except Exception:
            samples_directory.rmdir()
            info_pathname.unlink()
            raise
        logging.info('handle {} done'.format(label_pathname))