コード例 #1
0
def bench(name, args, run_pipeline, configs, force=False, no_delete=False):

    sample_size = len(args['videos'])

    def run_name(cluster_config, job_config):
        worker_type = cluster_config.worker.type
        return '{name}-{cpu}cpu-{mem}mem-{batch}batch-{wpkt}wpkt-{iopkt}iopkt-{ldwk}ldwk-{svwk}svwk-{vid}vid'.format(
            name=name,
            cpu=worker_type.get_cpu(),
            mem=worker_type.get_mem(),
            batch=job_config.batch,
            wpkt=job_config.work_packet_size,
            iopkt=job_config.io_packet_size,
            ldwk=cluster_config.num_load_workers,
            svwk=cluster_config.num_save_workers,
            vid=sample_size)

    def run_config(args, db_wrapper, job_config):
        db = db_wrapper.db

        # Start the Scanner job
        log.info('Starting Scanner job')

        run_opts = {
            'io_packet_size': job_config.io_packet_size,
            'work_packet_size': job_config.work_packet_size,
        }
        ppw = job_config.pipelines_per_worker
        if ppw != -1:
            run_opts['pipeline_instances_per_node'] = ppw

        run_pipeline(db, detach=True, run_opts=run_opts, **args)

        # Wait until it succeeds or crashes
        start = now()
        log.info('Monitoring cluster')
        result, metrics = db_wrapper.cluster.monitor(db)
        end = now() - start

        # If we crashed:
        if not result:

            # Restart the cluster if it's in a bad state
            db_wrapper.cluster.start()

            raise TestFailure("Out of memory")

        # Write out profile if run succeeded
        outputs = run_pipeline(db, no_execute=True, **args)
        try:
            outputs[0]._column._table.profiler().write_trace(
                '/app/data/traces/{}.trace'.format(
                    run_name(db_wrapper.cluster.config(), job_config)))
        except Exception:
            log.error('Failed to write trace')
            traceback.print_exc()

        return end, pd.DataFrame(metrics)

    def test_config(args, db_wrapper, cluster_config, job_config):
        time, metrics = run_config(args, db_wrapper, job_config)

        if time is not None:
            price_per_hour = cluster_config.price(no_master=True)
            price_per_video = (time /
                               3600.0) * price_per_hour / float(sample_size)
            return price_per_video, metrics
        else:
            return None

    results = []

    for (cluster_config, job_configs) in configs:

        # Only bring up the cluster if there exists a job config that hasn't been computed
        if not force and all([
                pcache.has(run_name(cluster_config, job_config))
                for job_config in job_configs
        ]):
            results.append([
                pcache.get(run_name(cluster_config, job_config))
                for job_config in job_configs
            ])

        else:
            with make_cluster(cluster_config,
                              no_delete=no_delete) as db_wrapper:
                log.info('Cluster config: {}'.format(cluster_config))

                def try_config(job_config):
                    log.info('Job config: {}'.format(job_config))
                    try:
                        return test_config(args, db_wrapper, cluster_config,
                                           job_config)
                    except TestFailure as e:
                        print(e)
                        return (str(e), None)
                    except Exception as e:
                        traceback.print_exc()
                        return (traceback.format_exc(), None)

                def try_config_cached(job_config):
                    return pcache.get(run_name(cluster_config, job_config),
                                      force=force,
                                      fn=lambda: try_config(job_config))

                results.append(list(map(try_config_cached, job_configs)))

    # Don't do this at top-level in case this file is incidentally imported into Jupyter
    import matplotlib
    matplotlib.use('agg')
    import matplotlib.pyplot as plt

    def plot(metrics, name):
        ax = metrics.plot('TIME', name)
        ax.set_title(name)
        ax.set_ylabel('Percent')
        ax.set_xlabel('Sample')
        fig = ax.get_figure()
        fig.tight_layout()
        fig.savefig('/tmp/graph.svg')
        fig.clf()
        return open('/tmp/graph.svg', 'r').read()

    report_template = '''
    <!DOCTYPE html>
    <html>
      <head>
        <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta.2/css/bootstrap.min.css" integrity="sha384-PsH8R72JQ3SOdhVi3uxftmaW6Vc51MKb0q5P2rRUpPvrszuE4W1povHYgTpBfshb" crossorigin="anonymous">
        <style>
          svg {{ width: 50%; margin: 0; float: left; }}
          p {{ margin-bottom: 0; }}
        </style>
      </head>
      <body>
        <div class="container">
          <h1>Scanner benchmark report</h1>
          {report}
        </div>
      </body>
    </html>
    '''

    blocks = ''
    for ((cluster_config, job_configs),
         cluster_results) in zip(configs, results):
        for (job_config, (job_result, metrics)) in zip(job_configs,
                                                       cluster_results):
            if metrics is None:
                blocks += '<div><h3>{name}</h3><p>{result}</p></div>'.format(
                    name=run_name(cluster_config, job_config),
                    result=job_result)
                continue

            cpu = plot(metrics, 'CPU%')
            mem = plot(metrics, 'MEMORY%')
            block = '''
            <div>
              <h3>{name}</h3>
              <p>${result:.05f}/video</p>
              <div>
                {cpu}
                {mem}
              </div>
            </div>
            '''.format(name=run_name(cluster_config, job_config),
                       result=job_result,
                       cpu=cpu,
                       mem=mem)
            blocks += block

    report = report_template.format(report=blocks)

    with open(
            '/app/data/benchmarks/{}-{}.html'.format(
                name, strftime('%Y-%m-%d-%H-%M')), 'w') as f:
        f.write(report)

    # Collect all traces into a tarfile
    sp.check_call('cd /app/data && tar -czf bench.tar.gz traces benchmarks',
                  shell=True)

    # Let desktop know bench is complete, and should download benchmark files
    notifier.notify('Benchmark complete', action='bench')
コード例 #2
0
detect_clothing = ClothingDetectionPipeline.make_runner()

videos = list(Video.objects.all().order_by('id'))

cfg = cluster_config(num_workers=100,
                     worker=worker_config('n1-standard-16', gpu=1),
                     pipelines=[clothing_detection.ClothingDetectionPipeline])

with make_cluster(cfg, sql_pool=2, no_delete=True) as db_wrapper:
    # if True:
    #     db_wrapper = ScannerWrapper.create()

    db = db_wrapper.db

    print('Fetching frames')
    frames = pcache.get('clothing_frames',
                        lambda: par_for(frames_for_video, videos, workers=8))
    videos, frames = unzip([(v, f) for (v, f) in zip(videos, frames)
                            if len(f) > 0])
    videos = list(videos)
    frames = list(frames)

    videos = videos
    frames = frames

    bbox_tables = [
        ScannerSQLTable(
            Face,
            v,
            num_elements=len(f),
            filter=
            'MOD(query_frame.number, CAST(FLOOR(query_video.fps * 3) AS INTEGER)) = 0'
コード例 #3
0
 def try_config_cached(job_config):
     return pcache.get(run_name(cluster_config, job_config),
                       force=force,
                       fn=lambda: try_config(job_config))
コード例 #4
0
        def run_pipeline(db, videos, frames, **kwargs):
            return face_detection.detect_faces(db, videos=[v.for_scannertools() for v in videos], frames=frames, cache=False, **kwargs)

        cfg = cluster_config(num_workers=5, worker=worker_config('n1-standard-32'))
        configs = [(cfg, [ScannerJobConfig(io_packet_size=1000, work_packet_size=20, batch=1)])]
        bench('face', {'videos': videos, 'frames': [[f['number'] for f in Frame.objects.filter(video=v).values('number').order_by('number')] for v in videos]},
              run_pipeline, configs, no_delete=True, force=True)


with Timer('run'):

    print('Getting frames')
    def load_frames():
        return [[f['number'] for f in Frame.objects.filter(video=v, shot_boundary=False).values('number').order_by('number')]
                for v in tqdm(videos)]
    frames = pcache.get('face_frames', load_frames)

    cfg = cluster_config(
        num_workers=100,
        worker=worker_config('n1-standard-64'),
        num_load_workers=2,
        num_save_workers=2)
    with make_cluster(cfg, sql_pool=4, no_delete=True) as db_wrapper:

    # if True:
    #     db_wrapper = ScannerWrapper.create(enable_watchdog=False)

        db = db_wrapper.db

        print('Starting detection')
        detect_faces(
コード例 #5
0
            num_workers=5, worker=worker_config('n1-standard-32'), pipelines=[face_embedding.FaceEmbeddingPipeline])
        configs = [(cfg, [
            ScannerJobConfig(io_packet_size=500, work_packet_size=20, pipelines_per_worker=4),
            ScannerJobConfig(io_packet_size=1000, work_packet_size=20, pipelines_per_worker=4),
            ScannerJobConfig(io_packet_size=1000, work_packet_size=80, pipelines_per_worker=4),
            ScannerJobConfig(io_packet_size=1000, work_packet_size=20, pipelines_per_worker=8),
        ])]
        bench('embedding', {'videos': videos, 'frames': [frames_for_video(v) for v in videos]},
              run_pipeline, configs, no_delete=True, force=True)

    exit()

videos = list(Video.objects.filter(threeyears_dataset=True).order_by('id'))
def load_frames():
    return par_for(frames_for_video, videos, workers=8)
frames = pcache.get('emb_frames', load_frames, force=True)
videos, frames = unzip([(v, f) for (v, f) in zip(videos, frames)
                        if len(f) > 0])
videos = list(videos)
frames = list(frames)

# Export packed embeddings and IDs into single files
if False:
    def get_ids(video):
        return [f['id'] for f in Face.objects.filter(frame__video=video).order_by('frame__number', 'id').values('id')]

    all_ids = pcache.get('emb_ids', (lambda: par_for(get_ids, videos, workers=4)))

    import struct
    with open('/app/data/embs/sevenyears_ids.bin', 'wb') as f:
        for i, ids in tqdm(enumerate(all_ids)):
コード例 #6
0
              force=True)

    exit()

videos = videos
cfg = cluster_config(num_workers=100,
                     worker=worker_config('n1-standard-64'),
                     pipelines=[gender_detection.GenderDetectionPipeline])

with make_cluster(cfg, sql_pool=2, no_delete=True) as db_wrapper:
    db = db_wrapper.db

    # if True:
    #     db_wrapper = ScannerWrapper.create()

    frames = pcache.get('gender_frames',
                        lambda: par_for(frames_for_video, videos, workers=8))
    videos, frames = unzip([(v, f) for (v, f) in zip(videos, frames)
                            if len(f) > 0])
    videos = list(videos)
    frames = list(frames)
    detect_genders(db,
                   videos=[v.for_scannertools() for v in videos],
                   db_videos=videos,
                   frames=frames,
                   faces=[
                       ScannerSQLTable(
                           Face,
                           v,
                           num_elements=len(f),
                           filter='query_frame.shot_boundary = false')
                       for v, f in zip(videos, frames)
コード例 #7
0
    cfg = cluster_config(
        num_workers=50,
        worker=worker_config('n1-standard-16', gpu=2),
        pipelines=[hairstyle_detection.HairStyleDetectionPipeline])
    #pipelines=[clothing_detection.ClothingDetectionPipeline])

    # with make_cluster(cfg, sql_pool=2, no_delete=True) as db_wrapper:
    if True:
        db_wrapper = ScannerWrapper.create()

        db = db_wrapper.db

        print('Fetching frames')
        frames = pcache.get(
            'clothing_frames',
            lambda: par_for(frames_for_video, videos, workers=8))
        videos, frames = unzip([(v, f) for (v, f) in zip(videos, frames)
                                if len(f) > 0])
        videos = list(videos)
        frames = list(frames)

        videos = videos
        frames = frames

        print('Running pipeline')

        clothing = detect_clothing(
            db,
            videos=[v.for_scannertools() for v in videos],
            frames=frames,
コード例 #8
0
def word_counts():
    r = requests.get('http://localhost:8111/wordcounts')
    return r.json()

VOCAB_THRESHOLD = 100

def load_vocab():
    counts = word_counts()
    print('Full vocabulary size: {}'.format(len(counts)))

    vocabulary = sorted([word for (word, count) in counts.items() if count > VOCAB_THRESHOLD])
    print('Filtered vocabulary size: {}'.format(len(vocabulary)))

    return vocabulary

vocabulary = pcache.get('vocabulary', load_vocab)
vocab_size = len(vocabulary)


class SegmentTextDataset(Dataset):
    def __init__(self, docs, vocabulary=None, segment_size=SEGMENT_SIZE, segment_stride=SEGMENT_STRIDE, use_cuda=False):
        self._segment_size = segment_size
        self._use_cuda = use_cuda
        self._vocabulary = vocabulary
        self._doc_names = docs
        self._doc_lens = doc_len()
        self._num_segs = np.array([
            len(range(0, self._doc_lens[doc]-segment_size+1, segment_stride))
            for doc in self._doc_names
        ])
        self._back_index = [