예제 #1
0
파일: run_amal.py 프로젝트: ml-lab/a-mal
def list_of_vectors_of_concatenated_active_joint_gradients(vid, active_joints):
    grad = skvp.gradient(vid)
    lst = []
    for frame in grad.frames:
        vec = []
        for j in active_joints:
            vec.extend(frame[j])
        lst.append(np.array(vec))

    return lst
예제 #2
0
파일: run_amal.py 프로젝트: ml-lab/a-mal
def detect_rest_sequences(vid, active_joints, exponent=-1.5):
    der = skvp.median(skvp.gradient(vid))
    rest_index_counter = {}
    for joint in active_joints:
        stats = [np.linalg.norm(f[joint])**exponent for f in der.frames]
        stats_mean = np.mean(stats)
        rest_indices = [i for i, val in enumerate(stats) if val > stats_mean]
        margin = int(round(len(der) * 0.075))
        rest_indices = [
            val for val in rest_indices
            if val > margin and val < len(der) - margin
        ]
        for ri in rest_indices:
            if ri not in rest_index_counter:
                rest_index_counter[ri] = 0
            rest_index_counter[ri] += 1
    thresh = int(len(active_joints) * 0.3)
    rest_indices = sorted(
        [ri for ri in rest_index_counter if rest_index_counter[ri] > thresh])
    sequences = []
    first = None
    last = None
    for ind in rest_indices:
        if first == None:
            first = ind
            last = ind
            continue
        if ind - last > len(der) * 0.1:
            sequences.append((first, last))
            first = ind
        last = ind
    if last != None:
        sequences.append((first, last))

    index_trans_func = lambda x: x + 1

    return sequences, index_trans_func
예제 #3
0
파일: run_amal.py 프로젝트: ml-lab/a-mal
def train(input_dir, output_file, warp_mode, output_ref_vid):
    vid_files = [
        os.path.join(input_dir, fname) for fname in os.listdir(input_dir)
    ]
    print('Loading training videos...')
    vids = [skvp.load(path) for path in vid_files]
    print('Normalizing training videos...')
    vids = [skvp.project_to_body_plane(vid, 0, 4, 8) for vid in vids]
    connections_mean_lengths = get_connection_mean_lengths(vids)
    vids = [
        skvp.scaled_connections(vid, connections_mean_lengths) for vid in vids
    ]
    vids_original_lengths_before_filters = [len(vid) for vid in vids]
    vids_mean_len_before_filters = int(
        round(np.mean(vids_original_lengths_before_filters)))
    print('Scaling training videos to have the same length')
    vids = [
        skvp.create_length_scaled_video(
            vid, num_frames=vids_mean_len_before_filters) for vid in vids
    ]
    print('Applying filters')
    vids = [skvp.median(vid) for vid in vids]
    vid_pyramids = [skvp.pyramid(vid, [3, 3, 3], [1, 1, 1]) for vid in vids]
    vids = [pyr.get_level(1) for pyr in vid_pyramids]
    vids_new_len = len(vids[0])
    # Saving original videos before tempora alignment
    vids_nowrap = [vid[:] for vid in vids]
    print('Applying temporal alignment')
    ref_vid_index, vids, active_joints, num_rests_in_motion, ref_vid_warping_indices, all_vids_warping_sequences = our_warping_function(
        vids)
    if warp_mode == 'none':
        vids = vids_nowrap
    if warp_mode == 'dtw':
        vids = warp_using_dtw(vids_nowrap, ref_vid_index, active_joints)
    if output_ref_vid != None:
        skvp.dump(vids_nowrap[ref_vid_index], output_ref_vid)
    # Computing discrete temporal gradients
    ders = [skvp.gradient(vid) for vid in vids]
    connections = skvp.distinct_connections(vids[0])
    # Writing motion metadata into output model file
    f = open(output_file, 'w')
    f.write('ActiveJoints={0}\n'.format(str(active_joints)))
    f.write('VidLength={0:d}\n'.format(vids_mean_len_before_filters))
    f.write('NumRests={0:d}\n'.format(num_rests_in_motion))
    f.write('WarpIndices={0}\n'.format(ref_vid_warping_indices))
    f.write('ConnectionLengths={0}\n'.format(str(connections_mean_lengths)))
    f.write('--stats--\n')
    print('Calculating stats and writing to output file...')
    # Computing and writing time-related statistics - original video lengths and aligned sequence original lengths
    mean, dist_mean, dist_std = get_centroid_dist_mean_and_dist_std(
        vids_original_lengths_before_filters)
    data_unit = {
        'mean': mean,
        'dist_mean': dist_mean,
        'dist_std': dist_std,
        'type': 'video_length_frames_raw'
    }
    f.write('{0}\n'.format(str(data_unit)))
    for i in range(len(all_vids_warping_sequences[0]) + 1):
        if i == 0:
            seq_lengths = [v[0] + 1 for v in all_vids_warping_sequences]
        elif i < len(all_vids_warping_sequences[0]):
            seq_lengths = [
                v[i] - v[i - 1] + 1 for v in all_vids_warping_sequences
            ]
        else:
            seq_lengths = [
                vids_new_len - v[i - 1] + 1 for v in all_vids_warping_sequences
            ]
        mean, dist_mean, dist_std = get_centroid_dist_mean_and_dist_std(
            seq_lengths)
        data_unit = {
            'mean': mean,
            'dist_mean': dist_mean,
            'dist_std': dist_std,
            'type': 'sequence_length',
            'sequence_num': i
        }
        f.write('{0}\n'.format(str(data_unit)))
    # Computing and writing joint-related statistics
    for frame_num in range(vids_new_len):
        for joint in range(vids[0].get_num_joints()):
            points = [vid.frames[frame_num][joint] for vid in vids]
            xs = [p[0] for p in points]
            ys = [p[1] for p in points]
            zs = [p[2] for p in points]
            mean, dist_mean, dist_std = get_centroid_dist_mean_and_dist_std(xs)
            data_unit = {
                'frame': frame_num,
                'joint': joint,
                'mean': mean,
                'dist_mean': dist_mean,
                'dist_std': dist_std,
                'type': 'location_x'
            }
            f.write('{0}\n'.format(str(data_unit)))
            mean, dist_mean, dist_std = get_centroid_dist_mean_and_dist_std(ys)
            data_unit = {
                'frame': frame_num,
                'joint': joint,
                'mean': mean,
                'dist_mean': dist_mean,
                'dist_std': dist_std,
                'type': 'location_y'
            }
            f.write('{0}\n'.format(str(data_unit)))
            mean, dist_mean, dist_std = get_centroid_dist_mean_and_dist_std(zs)
            data_unit = {
                'frame': frame_num,
                'joint': joint,
                'mean': mean,
                'dist_mean': dist_mean,
                'dist_std': dist_std,
                'type': 'location_z'
            }
            f.write('{0}\n'.format(str(data_unit)))
            if frame_num < vids_new_len - 1:  # We have one frame less in the gradient
                der_points = [vid.frames[frame_num][joint] for vid in ders]
                xs = [p[0] for p in der_points]
                ys = [p[1] for p in der_points]
                zs = [p[2] for p in der_points]
                mean, dist_mean, dist_std = get_centroid_dist_mean_and_dist_std(
                    xs)
                data_unit = {
                    'frame': frame_num,
                    'joint': joint,
                    'mean': mean,
                    'dist_mean': dist_mean,
                    'dist_std': dist_std,
                    'type': 'gradient_x'
                }
                f.write('{0}\n'.format(str(data_unit)))
                mean, dist_mean, dist_std = get_centroid_dist_mean_and_dist_std(
                    ys)
                data_unit = {
                    'frame': frame_num,
                    'joint': joint,
                    'mean': mean,
                    'dist_mean': dist_mean,
                    'dist_std': dist_std,
                    'type': 'gradient_y'
                }
                f.write('{0}\n'.format(str(data_unit)))
                mean, dist_mean, dist_std = get_centroid_dist_mean_and_dist_std(
                    zs)
                data_unit = {
                    'frame': frame_num,
                    'joint': joint,
                    'mean': mean,
                    'dist_mean': dist_mean,
                    'dist_std': dist_std,
                    'type': 'gradient_z'
                }
                f.write('{0}\n'.format(str(data_unit)))
        for joint_i in range(vids[0].get_num_joints()):
            for joint_j in range(joint_i + 1, vids[0].get_num_joints()):
                if (joint_i, joint_j) in connections:
                    # Skipping neighbors!!
                    continue
                dists = [
                    np.linalg.norm(vid.frames[frame_num][joint_i] -
                                   vid.frames[frame_num][joint_j])
                    for vid in vids
                ]
                centroid, dist_mean, dist_std = get_centroid_dist_mean_and_dist_std(
                    dists)
                data_unit = {
                    'frame': frame_num,
                    'joint_i': joint_i,
                    'joint_j': joint_j,
                    'mean': float(centroid),
                    'dist_mean': dist_mean,
                    'dist_std': dist_std,
                    'type': 'joint_distance'
                }
                f.write('{0}\n'.format(str(data_unit)))
        for lower_joint_index, join_index, higher_joint_index in find_all_joint_trios(
                vids[0].get_connections()):
            angles = []
            for vid in vids:
                vec_1 = vid.frames[frame_num][lower_joint_index] - vid.frames[
                    frame_num][join_index]
                vec_2 = vid.frames[frame_num][higher_joint_index] - vid.frames[
                    frame_num][join_index]
                theta = np.arccos(
                    vec_1.dot(vec_2) /
                    (np.linalg.norm(vec_1) * np.linalg.norm(vec_2)))
                angles.append(theta)
            centroid, dist_mean, dist_std = get_centroid_dist_mean_and_dist_std(
                angles)
            data_unit = {
                'frame': frame_num,
                'joint_trio':
                [lower_joint_index, join_index, higher_joint_index],
                'mean': float(centroid),
                'dist_mean': dist_mean,
                'dist_std': dist_std,
                'type': 'joint_angles'
            }
            f.write('{0}\n'.format(str(data_unit)))
    f.close()
예제 #4
0
파일: run_amal.py 프로젝트: ml-lab/a-mal
def test(model_file, input_video_file, warp_mode, ablation, ref_vid_path):
    vid = skvp.load(input_video_file)
    vid = skvp.project_to_body_plane(vid, 0, 4, 8)
    orig_vid_len_frames = len(vid)
    vid_len, active_joints, num_rests, warp_indices, connection_lengths, stats = parse_model_file(
        model_file)
    vid = skvp.scaled_connections(vid, connection_lengths)
    vid = skvp.create_length_scaled_video(vid, num_frames=vid_len)
    vid = skvp.median(vid)
    pyr = skvp.pyramid(vid, [3, 3, 3], [1, 1, 1])
    vid = pyr.get_level(1)
    rest_sequences = []
    exponent = -1.5
    ignore_subsequence_lengths = False
    while len(rest_sequences) < num_rests:
        exponent *= 0.9
        if exponent > -0.5:
            rest_sequences = [(warp_indices[i - 1], warp_indices[i])
                              for i in range(len(warp_indices)) if i % 2 == 1]
            index_transform_function = lambda x: x + 1
            ignore_subsequence_lengths = True
            break
        rest_sequences, index_transform_function = detect_rest_sequences(
            vid, active_joints, exponent)
    vec = []
    for sq in rest_sequences:
        if sq[0] == sq[1]:
            sq = (sq[0] - 1, sq[1] + 1)
        vec.extend(sq)
    vec = [index_transform_function(val) for val in vec]
    if warp_mode in ('our'):
        vid = warp_video(vid, vec, warp_indices)
    elif warp_mode == 'dtw':
        refvid = skvp.load(ref_vid_path)
        ref_vals = list_of_vectors_of_concatenated_active_joint_gradients(
            refvid, active_joints)
        vid_vals = list_of_vectors_of_concatenated_active_joint_gradients(
            vid, active_joints)
        matches, cost, mapping_1, mapping_2, matrix = simpledtw.dtw(
            ref_vals, vid_vals)
        warped = vid[:1]  # Starting from one frame, as grad has n-1 frames
        for mapped_indices in mapping_1:
            warped.add_frame(vid.frames[mapped_indices[-1]])
        vid = warped
    der = skvp.gradient(vid)
    costs = []
    group_weights = {'ActiveJoint': 0.73, 'NonActiveJoint': 0.02, 'Time': 0.25}
    if 'active' in ablation:
        group_weights = {
            'ActiveJoint': 0.15,
            'NonActiveJoint': 0.6,
            'Time': 0.25
        }
    if 'time' in ablation:
        group_weights['ActiveJoint'] /= (1.0 - group_weights['Time'])
        group_weights['NonActiveJoint'] /= (1.0 - group_weights['Time'])
        group_weights['Time'] = 0
    for stat in stats:
        if stat['type'] == 'sequence_length':
            stat['start_frame'] = (vec[stat['sequence_num'] - 1] +
                                   1) if stat['sequence_num'] > 0 else 0
            stat['end_frame'] = vec[
                stat['sequence_num']] if stat['sequence_num'] < len(
                    vec) else len(vid) - 1
        if ignore_subsequence_lengths and stat['type'] == 'sequence_length':
            print('Ignoring seq lengths')
            continue
        if 'joint' in stat or 'joint_trio' in stat:
            stat['type_group'] = 'NonActiveJoint'
        elif 'joint_i' in stat:
            stat[
                'type_group'] = 'NoGroupIgnoreMe' if 'active' not in ablation else 'NonActiveJoint'
        else:
            stat['type_group'] = 'Time'
        if 'joint' in stat and stat['joint'] in active_joints:
            stat['type_group'] = 'ActiveJoint'
        if 'joint_i' in stat and stat[
                'joint_i'] in active_joints and 'joint_j' in stat and stat[
                    'joint_j'] in active_joints:
            stat['type_group'] = 'ActiveJoint'
        if 'joint_trio' in stat:
            for j in stat['joint_trio']:
                if j in active_joints:
                    stat['type_group'] = 'ActiveJoint'
        if stat['type'] == 'location_x':
            val = vid.frames[stat['frame']][stat['joint']][0]
        elif stat['type'] == 'location_y':
            val = vid.frames[stat['frame']][stat['joint']][1]
        elif stat['type'] == 'location_z':
            val = vid.frames[stat['frame']][stat['joint']][2]
        elif stat['type'] == 'gradient_x':
            val = der.frames[stat['frame']][stat['joint']][0]
        elif stat['type'] == 'gradient_y':
            val = der.frames[stat['frame']][stat['joint']][1]
        elif stat['type'] == 'gradient_z':
            val = der.frames[stat['frame']][stat['joint']][2]
        elif stat['type'] == 'joint_distance':
            val = np.linalg.norm(vid.frames[stat['frame']][stat['joint_i']] -
                                 vid.frames[stat['frame']][stat['joint_j']])
        elif stat['type'] == 'joint_angles':
            joint_trio = stat['joint_trio']
            vec_1 = vid.frames[stat['frame']][joint_trio[0]] - vid.frames[
                stat['frame']][joint_trio[1]]
            vec_2 = vid.frames[stat['frame']][joint_trio[2]] - vid.frames[
                stat['frame']][joint_trio[1]]
            val = np.arccos(
                vec_1.dot(vec_2) /
                (np.linalg.norm(vec_1) * np.linalg.norm(vec_2)))
        elif stat['type'] == 'video_length_frames_raw':
            val = orig_vid_len_frames
        elif stat['type'] == 'sequence_length':
            if stat['sequence_num'] == 0:
                val = vec[0] + 1
            elif stat['sequence_num'] < len(vec):
                val = vec[stat['sequence_num']] - vec[stat['sequence_num'] -
                                                      1] + 1
            else:
                val = len(vid) - vec[stat['sequence_num'] - 1] + 1
        else:
            continue
        # Avoiding division by 0 when we normalize the distance
        val = round(val, 7)
        stat['mean'] = round(stat['mean'], 7)
        stat['dist_std'] = round(stat['dist_std'], 7)
        stat['dist_mean'] = round(stat['dist_mean'], 7)
        try:
            dist = abs(val - stat['mean'])
        except:
            print('stat is: {0}'.format(str(stat)))
            print('val is: {0}'.format(str(val)))
            print('mean is: {0}'.format(str(stat['mean'])))
        if stat['dist_std'] == 0:
            if dist - stat['dist_mean'] == 0:
                dist_in_stds = 0
            else:
                dist_in_stds = np.inf
        else:
            dist_in_stds = (dist - stat['dist_mean']) / stat['dist_std']
        stat['cost'] = dist_in_stds
        stat['test_val'] = val
    costed_stats = [s for s in stats
                    if 'cost' in s]  # and s['type'] == 'joint_angles']
    costed_active_joint_stats = [
        s for s in costed_stats if s['type_group'] == 'ActiveJoint'
    ]
    costed_nonactive_joint_stats = [
        s for s in costed_stats if s['type_group'] == 'NonActiveJoint'
    ]
    costed_time_stats = [s for s in costed_stats if s['type_group'] == 'Time']
    lambdaa = 2
    group_to_stats = {
        'Time': costed_time_stats,
        'ActiveJoint': costed_active_joint_stats,
        'NonActiveJoint': costed_nonactive_joint_stats
    }

    feedback_items, avg_active_bad_segment_length, avg_param_num_active_segments, avg_param_num_nonactive_segments = produce_feedback(
        costed_stats, group_weights, ablation)

    scorelambda = 2.9
    scorelambdatime = 7.5

    for item in feedback_items:
        # Here we duplicate the weight manipulation, just to be able to sort the feedbacks
        # The same manupulation will be applied on real items (depending on ablation settings) - we don't apply ablation on feedback
        item['cost'] *= group_weights[item['type_group']]
        item['cost'] /= (scorelambdatime
                         if item['type_group'] == 'Time' else scorelambda)
        item['cost'] /= len(
            [s['cost'] for s in group_to_stats[item['type_group']]])
        if item['type_group'] == 'ActiveJoint':
            item['cost'] *= (0.75**avg_param_num_active_segments)
    feedback_items.sort(key=lambda x: x['cost'], reverse=True)

    if len(feedback_items) > 5:
        feedback_items = feedback_items[:5]
    last_cost = None
    for i, fi in enumerate(feedback_items):
        if last_cost == None or last_cost < 2 * fi['cost']:
            last_cost = fi['cost']
            continue
        # Removing irrelevant feedback
        feedback_items = feedback_items[:i]
        break
    feedback_items = [generate_feedback_text(fi, 15) for fi in feedback_items]
    if len(feedback_items) == 0:
        print('FEEDBACK: empty! congrats :)')
    for feedback_item in feedback_items:
        print('FEEDBACK: ' + str(feedback_item))

    if 'diminish' not in ablation and 'active' not in ablation:
        for s in costed_active_joint_stats:
            s['cost'] *= (0.75**avg_param_num_active_segments)

    #Vector scores
    active_joint_score = max(
        0, 1.0 - abs(
            sum(s['cost']
                for s in costed_active_joint_stats if s['cost'] > 0) /
            (scorelambda * len([s['cost']
                                for s in costed_active_joint_stats]))))
    nonactive_joint_score = max(
        0, 1.0 - abs(
            sum(s['cost']
                for s in costed_nonactive_joint_stats if s['cost'] > 0) /
            (scorelambda *
             len([s['cost'] for s in costed_nonactive_joint_stats]))))
    time_score = max(
        0, 1.0 - abs(
            sum(s['cost'] for s in costed_time_stats if s['cost'] > 2.5) /
            (scorelambdatime * len([s['cost'] for s in costed_time_stats]))))

    print('Scores: Active: {0:f}, NonActive: {1:f}, Time: {2:f}'.format(
        active_joint_score, nonactive_joint_score, time_score))
    final_score = active_joint_score * group_weights[
        'ActiveJoint'] + nonactive_joint_score * group_weights[
            'NonActiveJoint'] + time_score * group_weights['Time']

    print('Score: {0:f}'.format(final_score))