Exemplo n.º 1
0
def reducto_optimizer(video):

    distances = [i / 100 for i in list(range(0, 201, 10))]
    safes = [(i - 100) / 1000 for i in list(range(0, 200, 5))]
    dist_safe_prod = list(product(distances, safes))

    query = video['queries'][0]
    with multiprocessing.Pool() as pool:
        result = pool.map(
            partial(reducto_optimizer_acc, _video=video, _query=query),
            dist_safe_prod)

    output = [{
        'distance': dist_safe_prod[i][0],
        'safe': dist_safe_prod[i][1],
        **result[i]
    } for i in range(len(dist_safe_prod))]
    output_path = Path(
        'data'
    ) / 'focus' / 'optimization' / f'{video["dataset"]}-{query["metrics"]}-{query["target_acc"]:.2f}.json'
    dump_json(output, output_path, mkdir=True)
    print(f'dumped to {output_path}')
    data_above_acc = [d for d in output if d['accs'][1] > query['target_acc']]
    data_above_acc.sort(key=lambda x: x['frac_mean'])
    pprint(data_above_acc[0])
Exemplo n.º 2
0
        ).first()
        if inference_record:
            inference = inference_record.to_json()
        else:
            inference = model.infer_video(segment)
            inference_record = Inference(
                segment=segment_record,
                model=model.name,
                result=[
                    InferenceResult.from_json(inf)
                    for _, inf in inference.items()
                ],
            )
            inference_record.save()
        dump_json(
            inference,
            f'data/inference/{dataset_name}/{segment.parent.name}/{segment.stem}.json',
            mkdir=True)

        # -- skip if required ------------------------------------------
        if args.skip_diffeval:
            pbar.update()
            continue

        # -- evaluation ------------------------------------------------
        frame_pairs = evaluator.get_frame_pairs(inference, diff_results)

        per_frame_evaluations = {}
        for metric in evaluator.keys:
            metric_evaluations = FrameEvaluation.objects(
                segment=segment_record, evaluator=metric)
            pairs = [(me.ground_truth, me.comparision)
Exemplo n.º 3
0
            if len(seg_inf.keys()) != len(seg_scaled_inf.keys()):
                continue

            frame_evals = [
                evaluator.evaluate_single_frame(seg_inf[fid],
                                                seg_scaled_inf[fid])
                for fid in list(seg_inf.keys())
            ]
            # only valid for mAP metrics
            avg_evals = {
                metric:
                sum(evl[metric] for evl in frame_evals) / len(frame_evals)
                for metric in evaluator.keys
            }
            result.append({
                'subset': seg.parent.name,
                'segment': seg.name,
                'scale': scale,
                **avg_evals,
            })

            eval_str = f'{seg.parent.name}/{seg.stem} ' + ','.join(
                f'{k}={v:.4f}' for k, v in avg_evals.items())
            pbar.set_postfix_str(eval_str)

            pbar.update()

        dump_json(result, result_json_path)
        pbar.close()
        mongoengine.disconnect()