Exemple #1
0
    def worker_killer_task(config, master_address, worker_address):
        from scannerpy import ProtobufGenerator, Config, start_worker
        import time
        import grpc
        import subprocess
        import signal
        import os

        c = Config(None)

        import scanner.metadata_pb2 as metadata_types
        import scanner.engine.rpc_pb2 as rpc_types
        import scanner.types_pb2 as misc_types
        import libscanner as bindings

        protobufs = ProtobufGenerator(config)

        # Kill worker
        channel = grpc.insecure_channel(
            worker_address,
            options=[('grpc.max_message_length', 24499183 * 2)])
        worker = protobufs.WorkerStub(channel)

        try:
            worker.Shutdown(protobufs.Empty())
        except grpc.RpcError as e:
            status = e.code()
            if status == grpc.StatusCode.UNAVAILABLE:
                print('could not shutdown worker!')
                exit(1)
            else:
                raise ScannerException('Worker errored with status: {}'
                                       .format(status))

        # Spawn a worker that we will force kill
        script_dir = os.path.dirname(os.path.realpath(__file__))
        with open(os.devnull, 'w') as fp:
            p = subprocess.Popen(
                ['python ' +  script_dir + '/spawn_worker.py'],
                shell=True,
                stdout=fp, stderr=fp,
                preexec_fn=os.setsid)

            # Wait a bit for the worker to do its thing
            time.sleep(10)

            # Force kill worker process to trigger fault tolerance
            os.killpg(os.getpgid(p.pid), signal.SIGTERM) 
            p.communicate()

            # Wait for fault tolerance to kick in
            time.sleep(25)

            # Spawn the worker again
            subprocess.call(['python ' +  script_dir + '/spawn_worker.py'],
                            shell=True)
Exemple #2
0
def fault_db():
    # Create new config
    #with tempfile.NamedTemporaryFile(delete=False) as f:
    with open('/tmp/config_test', 'w') as f:
        cfg = Config.default_config()
        cfg['storage']['db_path'] = tempfile.mkdtemp()
        cfg['network']['master'] = 'localhost'
        cfg['network']['master_port'] = '5010'
        cfg['network']['worker_port'] = '5011'
        f.write(toml.dumps(cfg))
        cfg_path = f.name

    # Setup and ingest video
    with Database(master='localhost:5010',
                  workers=[],
                  config_path=cfg_path, no_workers_timeout=120) as db:
        # Download video from GCS
        url = "https://storage.googleapis.com/scanner-data/test/short_video.mp4"
        with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as f:
            host = socket.gethostname()
            # HACK: special proxy case for Ocean cluster
            if host in ['ocean', 'crissy', 'pismo', 'stinson']:
                resp = requests.get(
                    url,
                    stream=True,
                    proxies={'https': 'http://proxy.pdl.cmu.edu:3128/'})
            else:
                resp = requests.get(url, stream=True)
            assert resp.ok
            for block in resp.iter_content(1024):
                f.write(block)
            vid1_path = f.name

        # Make a second one shorter than the first
        with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as f:
            vid2_path = f.name
        run([
            'ffmpeg', '-y', '-i', vid1_path, '-ss', '00:00:00', '-t',
            '00:00:10', '-c:v', 'libx264', '-strict', '-2', vid2_path
        ])

        db.ingest_videos([('test1', vid1_path), ('test2', vid2_path)])

        yield db

        # Tear down
        run([
            'rm', '-rf', cfg['storage']['db_path'], cfg_path, vid1_path,
            vid2_path
        ])
Exemple #3
0
    def worker_killer_task(config, master_address):
        from scannerpy import ProtobufGenerator, Config, start_worker
        import time
        import grpc
        import subprocess
        import signal
        import os

        c = Config(None)

        import scanner.metadata_pb2 as metadata_types
        import scanner.engine.rpc_pb2 as rpc_types
        import scanner.types_pb2 as misc_types
        import scannerpy.libscanner as bindings

        protobufs = ProtobufGenerator(config)

        # Spawn a worker that we will force kill
        script_dir = os.path.dirname(os.path.realpath(__file__))
        with open(os.devnull, 'w') as fp:
            p = subprocess.Popen(
                [
                    'python ' + script_dir +
                    '/spawn_worker.py {:d}'.format(force_kill_spawn_port)
                ],
                shell=True,
                stdout=fp,
                stderr=fp,
                preexec_fn=os.setsid)

            # Wait a bit for the worker to do its thing
            time.sleep(10)

            # Force kill worker process to trigger fault tolerance
            os.killpg(os.getpgid(p.pid), signal.SIGTERM)
            p.kill()
            p.communicate()

            # Wait for fault tolerance to kick in
            time.sleep(15)

            # Spawn the worker again
            subprocess.call(
                [
                    'python ' + script_dir +
                    '/spawn_worker.py {:d}'.format(normal_spawn_port)
                ],
                shell=True)
Exemple #4
0
def make_config(master_port=None, worker_port=None, path=None):
    cfg = Config.default_config()
    cfg['network']['master'] = 'localhost'
    cfg['storage']['db_path'] = tempfile.mkdtemp()
    if master_port is not None:
        cfg['network']['master_port'] = master_port
    if worker_port is not None:
        cfg['network']['worker_port'] = worker_port

    if path is not None:
        with open(path, 'w') as f:
            cfg_path = path
            f.write(toml.dumps(cfg))
    else:
        with tempfile.NamedTemporaryFile(delete=False) as f:
            cfg_path = f.name
            f.write(bytes(toml.dumps(cfg), 'utf-8'))
    return (cfg_path, cfg)
Exemple #5
0
    def worker_shutdown_task(config, master_address, worker_address):
        from scannerpy import ProtobufGenerator, Config, start_worker
        import time
        import grpc
        import subprocess

        c = Config(None)

        import scanner.metadata_pb2 as metadata_types
        import scanner.engine.rpc_pb2 as rpc_types
        import scanner.types_pb2 as misc_types
        import libscanner as bindings

        protobufs = ProtobufGenerator(config)

        # Wait to kill worker
        time.sleep(8)
        # Kill worker
        channel = grpc.insecure_channel(
            worker_address,
            options=[('grpc.max_message_length', 24499183 * 2)])
        worker = protobufs.WorkerStub(channel)

        try:
            worker.Shutdown(protobufs.Empty())
        except grpc.RpcError as e:
            status = e.code()
            if status == grpc.StatusCode.UNAVAILABLE:
                print('could not shutdown worker!')
                exit(1)
            else:
                raise ScannerException('Worker errored with status: {}'
                                       .format(status))

        # Wait a bit
        time.sleep(15)
        script_dir = os.path.dirname(os.path.realpath(__file__))
        subprocess.call(['python ' +  script_dir + '/spawn_worker.py'],
                        shell=True)
Exemple #6
0
def db():
    # Create new config
    with tempfile.NamedTemporaryFile(delete=False) as f:
        cfg = Config.default_config()
        cfg['storage']['db_path'] = tempfile.mkdtemp()
        f.write(toml.dumps(cfg))
        cfg_path = f.name

    # Setup and ingest video
    with Database(config_path=cfg_path, debug=True) as db:
        url = "https://storage.googleapis.com/scanner-data/test/short_video.mp4"
        with tempfile.NamedTemporaryFile(delete=False) as f:
            resp = requests.get(url, stream=True)
            assert resp.ok
            for block in resp.iter_content(1024):
                f.write(block)
            vid_path = f.name
        db.ingest_videos([('test', vid_path)])

        yield db

        # Tear down
        subprocess.check_call(
            ['rm', '-rf', cfg['storage']['db_path'], cfg_path, vid_path])
Exemple #7
0
from django.core.management.base import BaseCommand
from query.models import Video, Face, LabelSet, Frame
from scannerpy import ProtobufGenerator, Config
import os
import cv2
import math
import numpy as np
import tensorflow as tf
import align.detect_face
from collections import defaultdict
from array import *
from functools import wraps
import inspect

cfg = Config()
proto = ProtobufGenerator(cfg)


def initializer(func):
    """
    Automatically assigns the parameters.

    >>> class process:
    ...     @initializer
    ...     def __init__(self, cmd, reachable=False, user='******'):
    ...         pass
    >>> p = process('halt', True)
    >>> p.cmd, p.reachable, p.user
    ('halt', True, 'root')
    """
    names, varargs, keywords, defaults = inspect.getargspec(func)
Exemple #8
0
import math
import itertools
import numpy as np
from operator import itemgetter
import traceback
from pprint import pprint

ESPER_ENV = os.environ.get('ESPER_ENV')
BUCKET = os.environ.get('BUCKET')
DATASET = os.environ.get('DATASET')  # TODO(wcrichto): move from config to runtime
DATA_PATH = os.environ.get('DATA_PATH')
FALLBACK_ENABLED = False
logger = logging.getLogger(__name__)

# TODO(wcrichto): find a better way to do this
Config()
from scanner.types_pb2 import BoundingBox

DIFF_BBOX_THRESHOLD = 0.35
# 24 frames/sec - so this requires more than a sec overlap
FRAME_OVERLAP_THRESHOLD = 25


def _print(*args):
    print(*args)
    sys.stdout.flush()


def index(request):
    schemas = []
    for name, ds in ModelDelegator().datasets().iteritems():
Exemple #9
0
from scannerpy import ProtobufGenerator, Config, start_worker
import time
import grpc
import sys

c = Config(None)

import scanner.metadata_pb2 as metadata_types
import scanner.engine.rpc_pb2 as rpc_types
import scanner.types_pb2 as misc_types
import scannerpy.libscanner as bindings

con = Config(config_path='/tmp/config_test')
protobufs = ProtobufGenerator(con)

master_address = str(con.master_address) + ':' + str(con.master_port)
port = int(sys.argv[1])

params = bindings.default_machine_params()
mp = protobufs.MachineParameters()
mp.ParseFromString(params)
del mp.gpu_ids[:]
params = mp.SerializeToString()

start_worker(master_address, machine_params=params, config=con, block=True,
             port=port,
             watchdog=False)