示例#1
0
def generate_img(data):
    x = (data
         | mp.pshuffle
         | mp.take(args.mix)
         | mp.apply(['image','landmarks'],'face',transform)
         | mp.select_field('face')
         | mp.as_list)
    return merge(x,np.random.random(len(x)))
示例#2
0
def generate_img(data):
    n = random.randint(3, 30)
    x = (
        data
        | mp.pshuffle
        | mp.take(n)
        | mp.apply(
            'filename', 'image',
            lambda x: cv2.cvtColor(cv2.imread(os.path.splitext(x)[0] + '.jpg'),
                                   cv2.COLOR_BGR2RGB))
        | mp.apply(['image', 'descr'], 'face', transform)
        #| mp.apply('face','facesmall',functools.partial(im_resize,size=(100,150)))
        #| mp.select_field('facesmall')
        | mp.select_field('face')
        | mp.as_list)
    return merge(x, np.random.random(len(x)))
示例#3
0
from keras.layers import *
from keras.regularizers import l2

# Get list of test videos from matches.json
test_names = (from_json(os.path.join(source_dir, 'matches.json'))
              | mp.where(lambda x: 'Test' in x.keys() and int(x['Test']) > 0)
              | mp.apply(['Id', 'Half'], 'pattern',
                         lambda x: "{}_{}_".format(x[0], x[1]))
              | mp.select_field('pattern')
              | mp.as_list)

data = (mp.get_datastream(data_dir, ext=".resized.mp4")
        | datasplit_by_pattern(test_pattern=test_names)
        | stratify_sample_tt(shuffle=True)
        | summary()
        | mp.take(1000)
        | mp.iter('filename', lambda x: print("Processing {}".format(x)))
        | mp.apply(
            'filename', 'aud',
            lambda x: np.load(x.replace('.resized.mp4', '.audiofeatures.npy')))
        | normalize_npy_value('aud', interval=(-1, 1))
        | mp.as_list)

tr, te = data | mp.apply(
    'aud', 'amean', lambda x: np.mean(x, axis=1)) | mp.make_train_test_split


def unzip(l):
    t1, t2 = zip(*l)
    return list(t1), list(t2)
示例#4
0
def transform(args):
    image, descr = args
    tr = get_transform(descr)
    return cv2.warpAffine(image, tr, (size, size))


@mp.Pipe
def savepics(seq, fn):
    for i, im in enumerate(seq):
        print(im)
        cv2.imwrite(fn.format(i), im)


(data
 | mp.pshuffle
 | mp.take(15)
 | mp.apply(
     'filename', 'image', lambda x: cv2.cvtColor(
         cv2.imread(os.path.splitext(x)[0] + '.jpg'), cv2.COLOR_BGR2RGB))
 | mp.apply(['image', 'descr'], 'face', transform)
 | mp.apply('face', 'facesmall', functools.partial(im_resize, size=(150, 150)))
 | mp.apply(
     'descr', 'ypr', lambda x: "Y={},P={},R={}".format(
         x['faceAttributes']['headPose']['yaw'], x['faceAttributes'][
             'headPose']['pitch'], x['faceAttributes']['headPose']['roll']))
 | mp.select_field('facesmall')
 | mp.pexec(functools.partial(show_images, cols=3)))

imgs = (
    data
    | mp.pshuffle
              | mp.where(lambda x: 'Test' in x.keys() and int(x['Test']) > 0)
              | mp.apply(['Id', 'Half'], 'pattern',
                         lambda x: "{}_{}_".format(x[0], x[1]))
              | mp.select_field('pattern')
              | mp.as_list)

no_frames = 126

data = (mp.get_datastream(data_dir, ext=".resized.mp4")
        | mp.pshuffle
        | datasplit_by_pattern(test_pattern=test_names)
        | mp.apply('filename', 'vgg',
                   lambda x: np.load(x.replace('.resized.mp4', '.vgg.npy')))
        | mp.apply('vgg', 'vggflat',
                   lambda x: np.reshape(x, (no_frames, -1, 1)))
        | mp.take(500)
        | mp.as_list)

trainstream, valstream = data | mp.make_train_test_split

no_train = data | mp.filter('split',
                            lambda x: x == mp.SplitType.Train) | mp.count
no_test = data | mp.filter('split',
                           lambda x: x == mp.SplitType.Test) | mp.count
print("Training samples = {}\nTesting samples = {}".format(no_train, no_test))
batchsize = 2

model = Sequential()
model.add(AveragePooling2D((10, 10), input_shape=(no_frames, 16384, 1)))
model.add(
    Conv2D(8, (3, 3),