Example #1
0
    def collate_fn(self, batch):
        inputs = list(zip(*batch))

        for augment in self.augments:
            inputs = augment.forward(inputs)

        for i, dataset in enumerate(self.datasets):
            inputs[i] = util.collate(dataset['DataType'], inputs[i])

        return inputs
Example #2
0
# get arch, comps
with open('exec_ops/pytorch/archs/yolov5.json', 'r') as f:
    arch = json.load(f)
with open('python/skyhook/pytorch/components/yolov5.json', 'r') as f:
    comps = {'yolov5': {'ID': 'yolov5', 'Params': json.load(f)}}

# set mode
comp_params = json.loads(arch['Components'][0].get('Params', '{}'))
comp_params['mode'] = mode
arch['Components'][0]['Params'] = json.dumps(comp_params)

# example inputs
im_data = numpy.zeros((416, 416, 3), dtype='uint8')
example_inputs = [
    util.collate('image', [util.prepare_input('image', im_data, {}, {})]),
    util.collate(
        'detection',
        [util.prepare_input('detection', [], {'CanvasDims': [416, 416]}, {})]),
]
util.inputs_to_device(example_inputs, device)

# example metadata
with open(os.path.join(yolo_path, 'data', 'coco.yaml'), 'r') as f:
    d = yaml.load(f, Loader=yaml.FullLoader)
    categories = d['names']
example_metadatas = [{}, {'Categories': categories}]

net = model.Net(arch, comps, example_inputs, example_metadatas, device=device)

sys.path.append(yolo_path)
Example #3
0
# example inputs
im_data = numpy.zeros((300, 300, 3), dtype='uint8')
detection_data = {
    'Metadata': {
        'CanvasDims': [300, 300]
    },
    'Detections': [{
        'Left': 100,
        'Right': 100,
        'Top': 100,
        'Bottom': 100
    }],
}
# Need to repeat the inputs twice because SSD requires batch_size>1 for normalization.
example_inputs = [
    util.collate('image', 2 * [util.prepare_input('image', im_data, {})]),
    util.collate('detection',
                 2 * [util.prepare_input('detection', detection_data, {})]),
]
util.inputs_to_device(example_inputs, device)

# example metadata
categories = [
    'aeroplane',
    'bicycle',
    'bird',
    'boat',
    'bottle',
    'bus',
    'car',
    'cat',
Example #4
0
with open('python/skyhook/pytorch/components/resnet.json', 'r') as f:
    comps = {'resnet': {'ID': 'resnet', 'Params': json.load(f)}}

# set mode
comp_params = json.loads(arch['Components'][0].get('Params', '{}'))
comp_params['mode'] = mode
arch['Components'][0]['Params'] = json.dumps(comp_params)

# example inputs
im_data = numpy.zeros((224, 224, 3), dtype='uint8')
int_data = {
    'Metadata': {},
    'Ints': 0,
}
example_inputs = [
    util.collate('image', [util.prepare_input('image', im_data, {})]),
    util.collate('int', [util.prepare_input('int', int_data, {})]),
]
util.inputs_to_device(example_inputs, device)

# example metadata
with open('scripts/prepare_pretrained/imagenet.txt', 'r') as f:
    categories = [line.strip() for line in f.readlines() if line.strip()]
example_metadatas = [{}, {'Categories': categories}]

import skyhook.pytorch.components.resnet

skyhook.pytorch.components.resnet.Pretrain = True

net = model.Net(arch, comps, example_inputs, example_metadatas, device=device)
torch.save(net.get_save_dict(), out_fname)
Example #5
0
def callback_func(*args):
    job_desc = args[0]
    args = args[1:]
    if job_desc['type'] == 'finish':
        lib.output_data_finish(job_desc['key'], job_desc['key'])
        return
    elif job_desc['type'] != 'job':
        return

    input_len = lib.data_len(meta['InputTypes'][0], args[0])
    # process the inputs one batch size at a time
    for inp_start in range(0, input_len, batch_size):
        inp_end = min(inp_start + batch_size, input_len)

        # find the dimensions of the first input image
        # we currently use this to fill canvas_dims of detection/shape outputs
        canvas_dims = None

        # get the slice corresponding to current batch from args
        # and convert it to our pytorch input form
        datas = []
        for ds_idx, arg in enumerate(args):
            t = meta['InputTypes'][ds_idx]

            if t == 'video':
                # we optimize inference over video by handling input options in golang
                # so here we just need to transpose
                data = torch.from_numpy(
                    arg[inp_start:inp_end, :, :, :]).permute(0, 3, 1, 2)
            else:
                opts = input_options.get(ds_idx, {})
                cur_datas = []
                for i in range(inp_start, inp_end):
                    input = lib.data_index(t, arg, i)
                    data = util.prepare_input(t, input, opts)
                    if canvas_dims is None and (t == 'image' or t == 'video'
                                                or t == 'array'):
                        canvas_dims = [data.shape[2], data.shape[1]]
                    cur_datas.append(data)
                data = util.collate(t, cur_datas)

            datas.append(data)
        if not canvas_dims:
            canvas_dims = [1280, 720]

        # process this batch through the model
        util.inputs_to_device(datas, device)
        y = net(*datas)

        # extract and emit outputs
        out_datas = []
        for out_idx, t in enumerate(meta['OutputTypes']):
            cur = y[out_idx]
            if t in ['image', 'video', 'array']:
                out_datas.append(cur.permute(0, 2, 3, 1).cpu().numpy())
            elif t == 'detection':
                # detections are represented as a dict
                # - cur['counts'] is # detections in each image
                # - cur['detections'] is the flat list of detections (cls, xyxy, conf)
                # - cur['categories'] is optional string category list
                # first, convert from boxes to skyhookml detections
                flat_detections = []
                for box in cur['detections'].tolist():
                    cls, left, top, right, bottom, conf = box
                    if 'categories' in cur:
                        category = cur['categories'][int(cls)]
                    else:
                        category = 'category{}'.format(int(cls))
                    flat_detections.append({
                        'Left':
                        int(left * canvas_dims[0]),
                        'Top':
                        int(top * canvas_dims[1]),
                        'Right':
                        int(right * canvas_dims[0]),
                        'Bottom':
                        int(bottom * canvas_dims[1]),
                        'Score':
                        float(conf),
                        'Category':
                        category,
                    })
                # second, group up the boxes
                prefix_sum = 0
                detections = []
                for count in cur['counts']:
                    detections.append(flat_detections[prefix_sum:prefix_sum +
                                                      count])
                    prefix_sum += count
                out_datas.append({
                    'Detections': detections,
                    'Metadata': {
                        'CanvasDims': canvas_dims,
                    },
                })
            elif t == 'int':
                out_datas.append({
                    'Ints': cur.tolist(),
                })
            else:
                out_datas.append(cur.tolist())
        lib.output_datas(job_desc['key'], job_desc['key'], out_datas)
Example #6
0
with open('exec_ops/pytorch/archs/ssd.json', 'r') as f:
    arch = json.load(f)
with open('python/skyhook/pytorch/components/ssd.json', 'r') as f:
    comps = {'ssd': {'ID': 'ssd', 'Params': json.load(f)}}

# set mode
comp_params = json.loads(arch['Components'][0].get('Params', '{}'))
comp_params['mode'] = mode
arch['Components'][0]['Params'] = json.dumps(comp_params)

# example inputs
im_data = numpy.zeros((300, 300, 3), dtype='uint8')
detection_data = [{'Left': 100, 'Right': 100, 'Top': 100, 'Bottom': 100}]
# Need to repeat the inputs twice because SSD requires batch_size>1 for normalization.
example_inputs = [
    util.collate('image', 2 * [util.prepare_input('image', im_data, {}, {})]),
    util.collate(
        'detection', 2 * [
            util.prepare_input('detection', detection_data,
                               {'CanvasDims': [300, 300]}, {})
        ]),
]
util.inputs_to_device(example_inputs, device)

# example metadata
categories = [
    'aeroplane',
    'bicycle',
    'bird',
    'boat',
    'bottle',