Exemple #1
0
def get_coco_stream(tags, ann_file, data_dir, threshold=0.1):
    coco = COCO(ann_file)
    catIds = coco.getCatIds(catNms=tags)
    imgIds = sum([coco.getImgIds(catIds=catId) for catId in catIds], [])
    stream = (coco.loadImgs(imgIds)
              | mp.as_field('meta')
              | mp.apply('meta', 'width', lambda x: x['width'])
              | mp.apply('meta', 'height', lambda x: x['height'])
              | mp.apply('meta', 'url', lambda x: x['coco_url'])
              | mp.apply('meta', 'filename', lambda x: x['file_name'])
              | mp.apply(
                  'meta', 'anns_ids', lambda x: coco.getAnnIds(
                      imgIds=x['id'], catIds=catIds, iscrowd=None))
              | mp.apply('anns_ids', 'anns', lambda x: coco.loadAnns(x))
              | mp.apply(
                  'anns', 'ground_truth', lambda x: x
                  | mp.select(lambda m: bbox_to_dict(
                      m['bbox'], coco.cats[m['category_id']]['name']))
                  | mp.as_list)
              | mp.apply(
                  'ground_truth', 'class_id',
                  lambda x: most_common(x
                                        | mp.select(lambda m: m['tag'])
                                        | mp.as_list))
              | mp.iter('meta', lambda x: coco.download(data_dir, [x['id']]))
              | mp.delfield(['meta', 'anns_ids', 'anns']))
    return stream
Exemple #2
0
def coco_to_custom_vision(stream, project_id, trainer, data_dir):
    stream = stream | mp.as_list
    tags = stream | mp.select_field('class_id') | mp.dedup() | mp.as_list
    cv_tags = {tag: trainer.create_tag(project_id, tag) for tag in tags}

    stream = (
        stream
        | mp.apply(['width', 'height', 'ground_truth'], 'ground_truth',
                   lambda x: x[2]
                   | mp.where(lambda box: (box['width'] >= x[0] * 0.1) and
                              (box['height'] >= x[1] * 0.1))
                   | mp.as_list)
        | mp.filter('ground_truth', lambda x: len(x) > 0)
        | mp.apply(
            ['width', 'height', 'ground_truth'], 'regions', lambda x: x[2]
            | mp.select(lambda box: Region(tag_id=cv_tags[box['tag']].id,
                                           left=box['x1'] / x[0],
                                           top=box['y1'] / x[1],
                                           width=box['width'] / x[0],
                                           height=box['height'] / x[1]))
            | mp.as_list)
        | mp.apply(['filename', 'regions'], 'tagged_img',
                   lambda x: ImageFileCreateEntry(
                       name=x[0],
                       contents=open(join(data_dir, x[0]), mode="rb").read(),
                       regions=x[1]))
        | mp.as_list)
    tagged_images_with_regions = stream | mp.select_field(
        'tagged_img') | mp.as_list
    for i in range(0, len(tagged_images_with_regions), 50):
        trainer.create_images_from_files(
            project_id, images=tagged_images_with_regions[i:i + 50])
Exemple #3
0
def apply_quantized_model(stream, data_dir, model, dest_field):
    return (stream
            | mp.apply(
                'filename', dest_field + '_raw',
                lambda x: model.predict_image(Image.open(join(data_dir, x))))
            | mp.apply([dest_field + '_raw', 'width', 'height'], dest_field,
                       lambda x: x[0]
                       | mp.select(lambda p: format_dict(p, x[1], x[2]))
                       | mp.as_list)
            | mp.delfield([dest_field + '_raw']))
        | stratify_sample_tt(shuffle=True)
        | summary()
        | mp.iter('filename', lambda x: print("Processing {}".format(x)))
        | mp.apply(
            'filename',
            'dflow',
            lambda x: np.load(x.replace('.resized.mp4', '.optflow.npy')),
            eval_strategy=mp.EvalStrategies.LazyMemoized)
        | mp.apply_npy('dflow', 'flowdescr', get_flow_descriptor)
        | mp.delfield('dflow')
        | mp.as_list)

# Calculate min/max to normalize
A = np.array(data
             | mp.select_field('flowdescr')
             | mp.select(lambda x: (x[..., 0].min(), x[..., 0].max(), x[..., 1]
                                    .min(), x[..., 1].max()))
             | mp.as_list)
min_ang, max_ang = min(A[:, 0]), max(A[:, 1])
min_amp, max_amp = min(A[:, 2]), max(A[:, 3])


def normalize(fd):
    fd[:, :, 0] = (fd[:, :, 0] - min_ang) / (max_ang - min_ang)
    fd[:, :, 1] = (fd[:, :, 1] - min_amp) / (max_amp - min_amp)
    return fd


trainstream, valstream = data | mp.apply('flowdescr', 'fdn',
                                         normalize) | mp.make_train_test_split

no_frames = 125
Exemple #5
0
def generate_img(data):
    x = (data
         | mp.pshuffle
         | mp.take(args.mix)
         | mp.apply(['image','landmarks'],'face',transform)
         | mp.select_field('face')
         | mp.as_list)
    return merge(x,np.random.random(len(x)))

def imprint(img):
    if args.nosign:
        return img
    overlay_image = sign[..., :3]
    mask = sign[..., 3:] / 255.0
    h,w = sign.shape[0],sign.shape[1]
    x,y=args.size-h,args.size-w
    img[y:y+h, x:x+w] = (1.0 - mask) * img[y:y+h, x:x+w] + mask * overlay_image
    return img

print(" + Generating images...")

os.makedirs(out_dir,exist_ok=True)

(range(args.num)
 | mp.select(lambda _: generate_img(data))
 | mp.select(imprint)
 | savepics(os.path.join(out_dir,args.name_template)))

print ("All done")

Exemple #6
0
imgs = (
    data
    | mp.pshuffle
    | mp.take(30)
    | mp.apply(
        'filename', 'image', lambda x: cv2.cvtColor(
            cv2.imread(os.path.splitext(x)[0] + '.jpg'), cv2.COLOR_BGR2RGB))
    | mp.apply(['image', 'descr'], 'face', transform)
    | mp.apply('face', 'facesmall',
               functools.partial(im_resize, size=(100, 150)))
    | mp.select_field('facesmall')
    | mp.as_list)

(range(10)
 | mp.select(lambda _: merge(imgs, np.random.random(len(imgs))))
 | mp.pexec(functools.partial(show_images, cols=2)))


def generate_img(data):
    n = random.randint(3, 30)
    x = (
        data
        | mp.pshuffle
        | mp.take(n)
        | mp.apply(
            'filename', 'image',
            lambda x: cv2.cvtColor(cv2.imread(os.path.splitext(x)[0] + '.jpg'),
                                   cv2.COLOR_BGR2RGB))
        | mp.apply(['image', 'descr'], 'face', transform)
        #| mp.apply('face','facesmall',functools.partial(im_resize,size=(100,150)))
Exemple #7
0
def convtime(s):
    t = s.split(':') | mp.select(float) | mp.as_list
    return t[2]+t[1]*60+t[0]*60*60