Пример #1
0
def coco_to_custom_vision(stream, project_id, trainer, data_dir):
    stream = stream | mp.as_list
    tags = stream | mp.select_field('class_id') | mp.dedup() | mp.as_list
    cv_tags = {tag: trainer.create_tag(project_id, tag) for tag in tags}

    stream = (
        stream
        | mp.apply(['width', 'height', 'ground_truth'], 'ground_truth',
                   lambda x: x[2]
                   | mp.where(lambda box: (box['width'] >= x[0] * 0.1) and
                              (box['height'] >= x[1] * 0.1))
                   | mp.as_list)
        | mp.filter('ground_truth', lambda x: len(x) > 0)
        | mp.apply(
            ['width', 'height', 'ground_truth'], 'regions', lambda x: x[2]
            | mp.select(lambda box: Region(tag_id=cv_tags[box['tag']].id,
                                           left=box['x1'] / x[0],
                                           top=box['y1'] / x[1],
                                           width=box['width'] / x[0],
                                           height=box['height'] / x[1]))
            | mp.as_list)
        | mp.apply(['filename', 'regions'], 'tagged_img',
                   lambda x: ImageFileCreateEntry(
                       name=x[0],
                       contents=open(join(data_dir, x[0]), mode="rb").read(),
                       regions=x[1]))
        | mp.as_list)
    tagged_images_with_regions = stream | mp.select_field(
        'tagged_img') | mp.as_list
    for i in range(0, len(tagged_images_with_regions), 50):
        trainer.create_images_from_files(
            project_id, images=tagged_images_with_regions[i:i + 50])
Пример #2
0
def generate_img(data):
    x = (data
         | mp.pshuffle
         | mp.take(args.mix)
         | mp.apply(['image','landmarks'],'face',transform)
         | mp.select_field('face')
         | mp.as_list)
    return merge(x,np.random.random(len(x)))
Пример #3
0
def generate_img(data):
    n = random.randint(3, 30)
    x = (
        data
        | mp.pshuffle
        | mp.take(n)
        | mp.apply(
            'filename', 'image',
            lambda x: cv2.cvtColor(cv2.imread(os.path.splitext(x)[0] + '.jpg'),
                                   cv2.COLOR_BGR2RGB))
        | mp.apply(['image', 'descr'], 'face', transform)
        #| mp.apply('face','facesmall',functools.partial(im_resize,size=(100,150)))
        #| mp.select_field('facesmall')
        | mp.select_field('face')
        | mp.as_list)
    return merge(x, np.random.random(len(x)))
Пример #4
0
    
def calc_sub(filename, new_filename, model, get_func):
    print("Processing {}".format(filename))
    clp = VideoFileClip(filename)
    frames = list(clp.iter_frames())
    boxes = pickle.load(open(filename.replace('.resized.mp4','.boxes.pickle'), 'rb'))
    poses = []
    for f, bs in zip(frames, boxes):
        fposes = []
        for box in bs:
            x1,y1,x2,y2 = box.astype(int)
            pad = abs(x2 - x1) * 0.2
            sub = f[max(y1, y1-30):max(y2, y2+30),min(x1, x1-30):max(x2,x2+30)]
            fposes.append(get_func(model, sub))
        poses.append(fposes)
    pickle.dump(poses, open(new_filename, 'wb'))                        
    clp.reader.close()
    clp.audio.reader.close_proc()


# Dense Pose Calculation

pose_model = TfPoseEstimator(get_graph_path('cmu'), target_size=(432, 368))
pcalc_pose = partial(calc_sub, model=pose_model, get_func=get_poses)

stream = (
    mp.get_datastream(data_dir, ext='.resized.mp4')
    | mp.select_field('filename')
    | cachecompute('.resized.mp4','.poses.pickle', pcalc_pose, lambda x, nx: print("Skipping {}".format(x)))    
    | execute
)
    if (len(sys.argv) > 1):
        k = int(sys.argv[1])
        n = int(sys.argv[2])
        config.base_dir = config.base_dir_batch
        config.data_dir = config.data_dir_batch
    else:
        k, n = 0, 1

    (mp.get_datastream(data_dir, ext=".full.mp4")
     | batch(k, n)
     | mp.fapply('video', resize_video.load_resize)
     | execute)

    resized_file_names = (mp.get_datastream(data_dir, ext=".resized.mp4")
                          | mp.select_field("filename")
                          | mp.as_list)

    # use only the first threshold
    scene_changes = get_scene_changes(resized_file_names, data_dir)[40]

    (mp.get_datastream(data_dir, ext=".resized.mp4")
     | mp.filter("filename", lambda f: os.path.abspath(f) not in scene_changes)
     | cachecomputex(".resized.mp4", ".optflow.npy", create_denseflow.calcflow,
                     functools.partial(skip, s="creating dense flow"))
     | cachecomputex(".resized.mp4", ".vgg.npy", create_vgg.calcvgg,
                     functools.partial(skip, s="creating VGG"))
     | cachecomputex(".resized.mp4", ".audiofeatures.npy",
                     create_audio_features.calcaudio,
                     functools.partial(skip, s="creating audio features"))
     | close_moviepy_video()
Пример #6
0
import itertools
import cv2
import math
import matplotlib.pyplot as plt

import keras
from keras.models import Sequential
from keras.layers import *
from keras.regularizers import l2

# Get list of test videos from matches.json
test_names = (from_json(os.path.join(source_dir, 'matches.json'))
              | mp.where(lambda x: 'Test' in x.keys() and int(x['Test']) > 0)
              | mp.apply(['Id', 'Half'], 'pattern',
                         lambda x: "{}_{}_".format(x[0], x[1]))
              | mp.select_field('pattern')
              | mp.as_list)

data = (mp.get_datastream(data_dir, ext=".resized.mp4")
        | datasplit_by_pattern(test_pattern=test_names)
        | stratify_sample_tt(shuffle=True)
        | summary()
        | mp.take(1000)
        | mp.iter('filename', lambda x: print("Processing {}".format(x)))
        | mp.apply(
            'filename', 'aud',
            lambda x: np.load(x.replace('.resized.mp4', '.audiofeatures.npy')))
        | normalize_npy_value('aud', interval=(-1, 1))
        | mp.as_list)

tr, te = data | mp.apply(
Пример #7
0
print(mp.__version__)

train_dir = os.path.join(base_dir, 'training_set')
test_dir = os.path.join(base_dir, 'test_set')

classes = mp.get_classes(train_dir)
# we need to explicitly get classes in order to have the same correspondence of class and int for train and test set

# Show first few images from the training set
seq = (mp.get_datastream(train_dir, classes=classes)
       | take(10)
       | mp.apply(
           'filename', 'image',
           lambda fn: mpui.im_resize_pad(cv2.imread(fn), size=(100, 100)))
       | mp.select_field('image')
       | pexec(fn.partial(mpui.show_images, cols=2)))

transform = keras.preprocessing.image.ImageDataGenerator(
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest')

scale_transform = keras.preprocessing.image.ImageDataGenerator(rescale=1. /
                                                               255)

# Show first few images from the training set with transform
Пример #8
0
def savepics(seq, fn):
    for i, im in enumerate(seq):
        cv2.imwrite(fn.format(i), cv2.cvtColor(im, cv2.COLOR_RGB2BGR))


def get_transform(descr):
    f = descr['faceLandmarks']
    mc_x = (f['mouthLeft']['x'] + f['mouthRight']['x']) / 2.0
    mc_y = (f['mouthLeft']['y'] + f['mouthRight']['y']) / 2.0
    return cv2.getAffineTransform(
        np.float32([(f['pupilLeft']['x'], f['pupilLeft']['y']),
                    (f['pupilRight']['x'], f['pupilRight']['y']),
                    (mc_x, mc_y)]), np.float32(target_triangle))


def transform(args):
    image, descr = args
    tr = get_transform(descr)
    return cv2.warpAffine(image, tr, (size, size))


(data
 | mp.apply(
     'filename', 'image', lambda x: cv2.cvtColor(
         cv2.imread(os.path.splitext(x)[0] + '.jpg'), cv2.COLOR_BGR2RGB))
 | mp.apply(['image', 'descr'], 'face', transform)
 | mp.select_field('face')
 | savepics(os.path.join(args.output_dir, args.template)))

print("All done")
    for i in range(5):
        ax[i, 0].plot(fd[i * step, :, 0])
        ax[i, 1].plot(fd[i * step, :, 1])
    plt.show()


# Plot to see how it works
# fd = get_flow_descriptor(flow)
# plot_flow_descriptor(np.log(fd))

# Get list of test videos from matches.json
test_names = (from_json(os.path.join(source_dir, 'matches.json'))
              | mp.where(lambda x: 'Test' in x.keys() and int(x['Test']) > 0)
              | mp.apply(['Id', 'Half'], 'pattern',
                         lambda x: "{}_{}_".format(x[0], x[1]))
              | mp.select_field('pattern')
              | mp.as_list)

data = (mp.get_datastream(data_dir, ext=".resized.mp4")
        | datasplit_by_pattern(test_pattern=test_names)
        | stratify_sample_tt(shuffle=True)
        | summary()
        | mp.iter('filename', lambda x: print("Processing {}".format(x)))
        | mp.apply(
            'filename',
            'dflow',
            lambda x: np.load(x.replace('.resized.mp4', '.optflow.npy')),
            eval_strategy=mp.EvalStrategies.LazyMemoized)
        | mp.apply_npy('dflow', 'flowdescr', get_flow_descriptor)
        | mp.delfield('dflow')
        | mp.as_list)
Пример #10
0
        cv2.imwrite(fn.format(i), im)


(data
 | mp.pshuffle
 | mp.take(15)
 | mp.apply(
     'filename', 'image', lambda x: cv2.cvtColor(
         cv2.imread(os.path.splitext(x)[0] + '.jpg'), cv2.COLOR_BGR2RGB))
 | mp.apply(['image', 'descr'], 'face', transform)
 | mp.apply('face', 'facesmall', functools.partial(im_resize, size=(150, 150)))
 | mp.apply(
     'descr', 'ypr', lambda x: "Y={},P={},R={}".format(
         x['faceAttributes']['headPose']['yaw'], x['faceAttributes'][
             'headPose']['pitch'], x['faceAttributes']['headPose']['roll']))
 | mp.select_field('facesmall')
 | mp.pexec(functools.partial(show_images, cols=3)))

imgs = (
    data
    | mp.pshuffle
    | mp.take(30)
    | mp.apply(
        'filename', 'image', lambda x: cv2.cvtColor(
            cv2.imread(os.path.splitext(x)[0] + '.jpg'), cv2.COLOR_BGR2RGB))
    | mp.apply(['image', 'descr'], 'face', transform)
    | mp.apply('face', 'facesmall',
               functools.partial(im_resize, size=(100, 150)))
    | mp.select_field('facesmall')
    | mp.as_list)
Пример #11
0
    season = match["Season"].replace("/", "")

    h = max(0, int(match["Half"]))

    print(f"Processing {match['Video']} ({match['Id']})\n")
    videoFilePath = os.path.join(source_dir,match["Video"])
    marksFilePath = os.path.join(source_dir,"Marks.jsonl")
    video = VideoFileClip(videoFilePath)

    data = []
    with open(marksFilePath,'r') as f:
        for cnt, line in enumerate(f):
            data.append(json.loads(line))

    # Get list of times when match halves start
    mt = data | mp.filter('eventType', lambda x: "start" in x) | mp.select_field('matchTime') | mp.as_list
    mt = convtime(mt[h])

    # Shots are fine, added also Goal which are not marked as shots
    cuts = data | mp.filter('eventType', shotFilter) | mp.apply('matchTime', 'start', convtime)

    # Consider, for negative examples, also Attacks, which are Shots nearby the goal area (or in other places of the field)
    # They are also filtered (later), removing those overlapping with shots/goals
    attacks = data | mp.filter('eventType', attackFilter) | mp.apply('matchTime', 'start', convtime)

    clipTrimTime = float(args.clipTrimTime)                       # +/- X seconds centered on event time

    clipTrimAttackTimeRange = float(args.clipTrimAttackTimeRange) # +/- X seconds centered on event time
    clipTrimAttackStart = float(args.clipTrimAttackStart)         # when attack clip starts (before event time)
    clipTrimAttackEnd = float(args.clipTrimAttackEnd)             # when attack clip ends (after event time)
def main(data_dir):
    x = (mp.get_datastream(data_dir, ext=".resized.mp4")
     | mp.select_field("filename")
     | mp.as_list
    )
    return detect_and_write(x, filename = os.path.join(data_dir, "scene.changes.pkl"))
import numpy as np
import itertools
import cv2
import math
import matplotlib.pyplot as plt

import keras
from keras.models import Sequential
from keras.layers import *
from keras.regularizers import l2

test_names = (from_json(os.path.join(source_dir, 'matches.json'))
              | mp.where(lambda x: 'Test' in x.keys() and int(x['Test']) > 0)
              | mp.apply(['Id', 'Half'], 'pattern',
                         lambda x: "{}_{}_".format(x[0], x[1]))
              | mp.select_field('pattern')
              | mp.as_list)

no_frames = 126

data = (mp.get_datastream(data_dir, ext=".resized.mp4")
        | mp.pshuffle
        | datasplit_by_pattern(test_pattern=test_names)
        | mp.apply('filename', 'vgg',
                   lambda x: np.load(x.replace('.resized.mp4', '.vgg.npy')))
        | mp.apply('vgg', 'vggflat',
                   lambda x: np.reshape(x, (no_frames, -1, 1)))
        | mp.take(500)
        | mp.as_list)

trainstream, valstream = data | mp.make_train_test_split