Example #1
0
def juggle_route(route_name, segment_number, qlog):
    r = Route(route_name)

    logs = r.qlog_paths() if qlog else r.log_paths()
    if segment_number is not None:
        logs = logs[segment_number:segment_number + 1]

    if None in logs:
        fallback_answer = input(
            "At least one of the rlogs in this segment does not exist, would you like to use the qlogs? (y/n) : "
        )
        if fallback_answer == 'y':
            logs = r.qlog_paths()
            if segment_number is not None:
                logs = logs[segment_number:segment_number + 1]
        else:
            print(
                f"Please try a different {'segment' if segment_number is not None else 'route'}"
            )
            return

    all_data = []
    pool = multiprocessing.Pool(24)
    for d in pool.map(load_segment, logs):
        all_data += d

    tempfile = NamedTemporaryFile(suffix='.rlog')
    save_log(tempfile.name, all_data, compress=False)
    del all_data

    juggle_file(tempfile.name)
Example #2
0
def juggle_route(route_or_segment_name, segment_count, qlog, can, layout):
    segment_start = 0
    if 'cabana' in route_or_segment_name:
        query = parse_qs(urlparse(route_or_segment_name).query)
        api = CommaApi(get_token())
        logs = api.get(
            f'v1/route/{query["route"][0]}/log_urls?sig={query["sig"][0]}&exp={query["exp"][0]}'
        )
    elif route_or_segment_name.startswith(
            "http://") or route_or_segment_name.startswith(
                "https://") or os.path.isfile(route_or_segment_name):
        logs = [route_or_segment_name]
    else:
        route_or_segment_name = SegmentName(route_or_segment_name,
                                            allow_route_name=True)
        segment_start = max(route_or_segment_name.segment_num, 0)

        if route_or_segment_name.segment_num != -1 and segment_count is None:
            segment_count = 1

        r = Route(route_or_segment_name.route_name.canonical_name)
        logs = r.qlog_paths() if qlog else r.log_paths()

    segment_end = segment_start + segment_count if segment_count else None
    logs = logs[segment_start:segment_end]

    if None in logs:
        ans = input(
            f"{logs.count(None)}/{len(logs)} of the rlogs in this segment are missing, would you like to fall back to the qlogs? (y/n) "
        )
        if ans == 'y':
            logs = r.qlog_paths()[segment_start:segment_end]
        else:
            print("Please try a different route or segment")
            return

    all_data = []
    with multiprocessing.Pool(24) as pool:
        for d in pool.map(load_segment, logs):
            all_data += d

    if not can:
        all_data = [d for d in all_data if d.which() not in ['can', 'sendcan']]

    # Infer DBC name from logs
    dbc = None
    for cp in [m for m in all_data if m.which() == 'carParams']:
        try:
            DBC = __import__(f"selfdrive.car.{cp.carParams.carName}.values",
                             fromlist=['DBC']).DBC
            dbc = DBC[cp.carParams.carFingerprint]['pt']
        except Exception:
            pass
        break

    with tempfile.NamedTemporaryFile(suffix='.rlog', dir=juggle_dir) as tmp:
        save_log(tmp.name, all_data, compress=False)
        del all_data
        start_juggler(tmp.name, dbc, layout)
Example #3
0
def juggle_route(route_name, segment_number, segment_count, qlog, can, layout):
    if 'cabana' in route_name:
        query = parse_qs(urlparse(route_name).query)
        api = CommaApi(get_token())
        logs = api.get(
            f'v1/route/{query["route"][0]}/log_urls?sig={query["sig"][0]}&exp={query["exp"][0]}'
        )
    elif route_name.startswith("http://") or route_name.startswith(
            "https://") or os.path.isfile(route_name):
        logs = [route_name]
    else:
        r = Route(route_name)
        logs = r.qlog_paths() if qlog else r.log_paths()

    if segment_number is not None:
        logs = logs[segment_number:segment_number + segment_count]

    if None in logs:
        fallback_answer = input(
            "At least one of the rlogs in this segment does not exist, would you like to use the qlogs? (y/n) : "
        )
        if fallback_answer == 'y':
            logs = r.qlog_paths()
            if segment_number is not None:
                logs = logs[segment_number:segment_number + segment_count]
        else:
            print(
                f"Please try a different {'segment' if segment_number is not None else 'route'}"
            )
            return

    all_data = []
    with multiprocessing.Pool(24) as pool:
        for d in pool.map(load_segment, logs):
            all_data += d

    if not can:
        all_data = [d for d in all_data if d.which() not in ['can', 'sendcan']]

    # Infer DBC name from logs
    dbc = None
    for cp in [m for m in all_data if m.which() == 'carParams']:
        try:
            DBC = __import__(f"selfdrive.car.{cp.carParams.carName}.values",
                             fromlist=['DBC']).DBC
            dbc = DBC[cp.carParams.carFingerprint]['pt']
        except (ImportError, KeyError, AttributeError):
            pass
        break

    tempfile = NamedTemporaryFile(suffix='.rlog', dir=juggle_dir)
    save_log(tempfile.name, all_data, compress=False)
    del all_data

    start_juggler(tempfile.name, dbc, layout)
Example #4
0
def juggle_route(route_name, segment_number, qlog, can, layout):

    if route_name.startswith("http://") or route_name.startswith(
            "https://") or os.path.isfile(route_name):
        logs = [route_name]
    else:
        r = Route(route_name)
        logs = r.qlog_paths() if qlog else r.log_paths()

    if segment_number is not None:
        logs = logs[segment_number:segment_number + 1]

    if None in logs:
        fallback_answer = input(
            "At least one of the rlogs in this segment does not exist, would you like to use the qlogs? (y/n) : "
        )
        if fallback_answer == 'y':
            logs = r.qlog_paths()
            if segment_number is not None:
                logs = logs[segment_number:segment_number + 1]
        else:
            print(
                f"Please try a different {'segment' if segment_number is not None else 'route'}"
            )
            return

    all_data = []
    pool = multiprocessing.Pool(24)
    for d in pool.map(load_segment, logs):
        all_data += d

    if not can:
        all_data = [d for d in all_data if d.which() not in ['can', 'sendcan']]

    # Infer DBC name from logs
    dbc = None
    for cp in [m for m in all_data if m.which() == 'carParams']:
        try:
            DBC = __import__(f"selfdrive.car.{cp.carParams.carName}.values",
                             fromlist=['DBC']).DBC
            dbc = DBC[cp.carParams.carFingerprint]['pt']
        except (ImportError, KeyError, AttributeError):
            pass
        break

    tempfile = NamedTemporaryFile(suffix='.rlog', dir=juggle_dir)
    save_log(tempfile.name, all_data, compress=False)
    del all_data

    juggle_file(tempfile.name, dbc, layout)
Example #5
0
def juggle_route(route_name):
  r = Route(route_name)
  all_data = []

  pool = multiprocessing.Pool(24)

  all_data = []
  for d in pool.map(load_segment, r.log_paths()):
    all_data += d

  tempfile = NamedTemporaryFile(suffix='.rlog')
  save_log(tempfile.name, all_data, compress=False)
  del all_data

  juggle_file(tempfile.name)
Example #6
0
def run_test_process(data):
  segment, cfg, args, cur_log_fn, ref_log_path, lr_dat = data
  res = None
  if not args.upload_only:
    lr = LogReader.from_bytes(lr_dat)
    res, log_msgs = test_process(cfg, lr, ref_log_path, args.ignore_fields, args.ignore_msgs)
    # save logs so we can upload when updating refs
    save_log(cur_log_fn, log_msgs)

  if args.update_refs or args.upload_only:
    print(f'Uploading: {os.path.basename(cur_log_fn)}')
    assert os.path.exists(cur_log_fn), f"Cannot find log to upload: {cur_log_fn}"
    upload_file(cur_log_fn, os.path.basename(cur_log_fn))
    os.remove(cur_log_fn)
  return (segment, cfg.proc_name, res)
Example #7
0
        ]
        results: Any = {TEST_ROUTE: {}}
        results[TEST_ROUTE]["modeld"] = compare_logs(cmp_log,
                                                     log_msgs,
                                                     ignore_fields=ignore)
        diff1, diff2, failed = format_diff(results, ref_commit)

        print(diff1)
        with open("model_diff.txt", "w") as f:
            f.write(diff2)

    if update or failed:
        from selfdrive.test.openpilotci import upload_file

        print("Uploading new refs")

        new_commit = get_git_commit()
        log_fn = "%s_%s_%s.bz2" % (TEST_ROUTE, "model", new_commit)
        save_log(log_fn, log_msgs)
        try:
            upload_file(log_fn, os.path.basename(log_fn))
        except Exception as e:
            print("failed to upload", e)

        with open(ref_commit_fn, 'w') as f:
            f.write(str(new_commit))

        print("\n\nNew ref commit: ", new_commit)

    sys.exit(int(failed))
Example #8
0
    ref_commit = get_git_commit()
    if ref_commit is None:
        raise Exception("couldn't get ref commit")
    with open(ref_commit_fn, "w") as f:
        f.write(ref_commit)

    for car_brand, segment in segments:
        rlog_fn = get_segment(segment, original=True)

        if rlog_fn is None:
            print("failed to get segment %s" % segment)
            sys.exit(1)

        lr = LogReader(rlog_fn)
        print('injecting model into % s' % segment)
        lr = inject_model(lr, segment)

        route_name, segment_num = segment.rsplit("--", 1)
        log_fn = "%s/%s/rlog_%s.bz2" % (route_name.replace(
            "|", "/"), segment_num, ref_commit)
        tmp_name = 'tmp_%s_%s' % (route_name, segment_num)
        save_log(tmp_name, lr)

        if not no_upload:
            upload_file(tmp_name, log_fn)
            print('uploaded %s', log_fn)
            os.remove(tmp_name)
        os.remove(rlog_fn)

    print("done")
import argparse

from selfdrive.test.process_replay.compare_logs import save_log
from selfdrive.test.process_replay.process_replay import CONFIGS, replay_process
from tools.lib.logreader import MultiLogIterator
from tools.lib.route import Route

if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="Run process on route and create new logs",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument("route", help="The route name to use")
    parser.add_argument("process", help="The process to run")
    args = parser.parse_args()

    cfg = [c for c in CONFIGS if c.proc_name == args.process][0]

    route = Route(args.route)
    lr = MultiLogIterator(route.log_paths())
    inputs = list(lr)

    outputs = replay_process(cfg, inputs)

    # Remove message generated by the process under test and merge in the new messages
    produces = {o.which() for o in outputs}
    inputs = [i for i in inputs if i.which() not in produces]
    outputs = sorted(inputs + outputs, key=lambda x: x.logMonoTime)

    fn = f"{args.route}_{args.process}.bz2"
    save_log(fn, outputs)
        w = msg.which()

        if w == 'liveCalibration':
            pm.send(w, msg.as_builder())

        if w == 'frame':
            msg = msg.as_builder()

            frame_id = msg.frame.frameId
            img = frame_reader.get(frame_id, pix_fmt="rgb24")[:, :, ::-1]

            msg.frame.image = img.flatten().tobytes()
            pm.send(w, msg)

            model = recv_one(model_sock)
            model = model.as_builder()
            model.logMonoTime = 0
            model = model.as_reader()
            out_msgs.append(model)

    save_log(args.output_path, out_msgs)

    # tm = model.logMonoTime / 1.0e9
    # model = model.model
    #     append_dict("model/data/path", tm, model.path.to_dict(), values)
    #     append_dict("model/data/left_lane", tm, model.leftLane.to_dict(), values)
    #     append_dict("model/data/right_lane", tm, model.rightLane.to_dict(), values)
    #     append_dict("model/data/lead", tm, model.lead.to_dict(), values)

    # save_dict_as_column_store(values, os.path.join(args.output_path, "LiveVisionD", args.segment_name))
Example #11
0
        for cfg in CONFIGS:
            if (len(args.whitelist_procs) and cfg.proc_name not in args.whitelist_procs) or \
               (not len(args.whitelist_procs) and cfg.proc_name in args.blacklist_procs):
                continue

            cur_log_fn = os.path.join(
                FAKEDATA, f"{segment}_{cfg.proc_name}_{cur_commit}.bz2")
            if not args.upload_only:
                ref_log_fn = os.path.join(
                    FAKEDATA, f"{segment}_{cfg.proc_name}_{ref_commit}.bz2")
                results[segment][cfg.proc_name], log_msgs = test_process(
                    cfg, lr, ref_log_fn, args.ignore_fields, args.ignore_msgs)

                # save logs so we can upload when updating refs
                save_log(cur_log_fn, log_msgs)

            if upload:
                print(f'Uploading: {os.path.basename(cur_log_fn)}')
                assert os.path.exists(
                    cur_log_fn), f"Cannot find log to upload: {cur_log_fn}"
                upload_file(cur_log_fn, os.path.basename(cur_log_fn))
                os.remove(cur_log_fn)

    diff1, diff2, failed = format_diff(results, ref_commit)
    if not args.upload_only:
        with open(os.path.join(PROC_REPLAY_DIR, "diff.txt"), "w") as f:
            f.write(diff2)
        print(diff1)

        if failed: