コード例 #1
0
    def get_frames(self):
        # Convert the input video to AVI, and get a list of all the frames
        convert_to_avi(self.input_video, self.input_avi,
                       self.fps,
                       self.start_sec, self.end_sec)

        self.in_file, self.out_file, self.frames = get_frames(self.input_avi, self.output_avi)
コード例 #2
0
def process_video(video_path, fps):

    video_frames = get_frames(video_path, fps)

    face_img_queue = mp.Queue()
    video_preds_queue = mp.Queue()

    face_detection_process = mp.Process(target=run_retinaface,
                                        args=(video_frames, face_img_queue))

    face_detection_process.start()
    video_face_imgs = face_img_queue.get()
    face_detection_process.join()

    emotion_detection_process = mp.Process(target=run_emotion_net,
                                           args=(video_face_imgs,
                                                 video_preds_queue))

    emotion_detection_process.start()
    video_frame_preds = video_preds_queue.get()
    emotion_detection_process.join()

    print('Video Successfully Processed')

    pred_dict = to_dict(video_frame_preds)
    r = send_response(pred_dict)

    if (r.status_code == 200):
        print("Predictions succesfully posted.")

    return video_frame_preds
コード例 #3
0
ファイル: main.py プロジェクト: igemsoftware/AMU-Poznan2013
def main(input_str):

    """
    Main function takes string input and returns the best results depending
    on scoring. Single result include sh-miR sequence,
    score and link to 2D structure from mfold program
    """
    sequence = check_input(input_str)
    seq1, seq2, shift_left, shift_right = sequence
    if not seq2:
        seq2 = reverse_complement(seq1)
    all_frames = get_all()
    if 'error' in all_frames: #database error handler
        return all_frames

    frames = get_frames(seq1, seq2, shift_left, shift_right, all_frames)
    original_frames = [Backbone(**elem) for elem in all_frames]


    frames_with_score = []
    for frame_tuple, original in zip(frames, original_frames):
        score = 0
        frame, insert1, insert2 = frame_tuple
        mfold_data = mfold(frame.template(insert1, insert2))
        if 'error' in mfold_data:
            return mfold_data
        pdf, ss = mfold_data[0], mfold_data[1]
        score += score_frame(frame_tuple, ss, original)
        score += score_homogeneity(original)
        score += two_same_strands_score(seq1, original)
        frames_with_score.append((score, frame.template(insert1, insert2), frame.name, pdf))

    sorted_frames = [elem for elem in sorted(frames_with_score,\
                        key=lambda x: x[0], reverse=True) if elem[0] > 60]
    return {'result': sorted_frames[:3]}
コード例 #4
0
def main(input_str):
    """
    Main function takes string input and returns the best results depending
    on scoring. Single result include sh-miR sequence,
    score and link to 2D structure from mfold program
    """
    sequence = check_input(input_str)
    seq1, seq2, shift_left, shift_right = sequence
    if not seq2:
        seq2 = reverse_complement(seq1)
    all_frames = get_all()
    if 'error' in all_frames:  #database error handler
        return all_frames

    frames = get_frames(seq1, seq2, shift_left, shift_right, all_frames)
    original_frames = [Backbone(**elem) for elem in all_frames]

    frames_with_score = []
    for frame_tuple, original in zip(frames, original_frames):
        score = 0
        frame, insert1, insert2 = frame_tuple
        mfold_data = mfold(frame.template(insert1, insert2))
        if 'error' in mfold_data:
            return mfold_data
        pdf, ss = mfold_data[0], mfold_data[1]
        score += score_frame(frame_tuple, ss, original)
        score += score_homogeneity(original)
        score += two_same_strands_score(seq1, original)
        frames_with_score.append(
            (score, frame.template(insert1, insert2), frame.name, pdf))

    sorted_frames = [elem for elem in sorted(frames_with_score,\
                        key=lambda x: x[0], reverse=True) if elem[0] > 60]
    return {'result': sorted_frames[:3]}
コード例 #5
0
def calculate_irr(video=None):
    annotations = utils.get_frames()
    if video is not None:
        print('Overall IRR for video', video, 'is:',
              annotations[annotations['video'] == video]['majority'].mean() / 3)

        # TODO: get more stats, like number of frames that have no, 2/3, full agreement
    else:
        print('Overall IRR is:', annotations['majority'].mean() / 3)
コード例 #6
0
ファイル: main.py プロジェクト: tambulkar/CV-HW2
def main():
    frames = get_frames('../data/rohan_input.mov')
    first_frame = frames[0]
    imwrite('../output/first_frame.png', first_frame)
    template = first_frame[670:870, 1075:1275]  #first_frame[800:1350,555:1030]
    imwrite('../output/template.png', template)
    cross_corr = match_template(image=first_frame,
                                template=template,
                                pad_input=True)
    plt.imshow(cross_corr, cmap="gray")
    plt.savefig('../output/corr_matrix.png')
    plt.clf()

    if not path.exists('../data/x_shifts.npy') or not path.exists(
            '../data/y_shifts.npy'):
        x_shifts = []
        y_shifts = []
        for i, frame in enumerate(frames):
            cross_corr = match_template(image=frame,
                                        template=template,
                                        pad_input=True)
            shift = np.unravel_index(np.argmax(cross_corr), cross_corr.shape)
            x, y = shift[::-1]
            x_shifts.append(x)
            y_shifts.append(y)
            print('Found shift for {num} of {length} frames'.format(
                num=i + 1, length=len(frames)))
    else:
        x_shifts = np.load('../data/x_shifts.npy')
        y_shifts = np.load('../data/y_shifts.npy')

    plt.plot(x_shifts, y_shifts)
    plt.xlabel('Y Pixel Shift')
    plt.ylabel('X Pixel Shift')
    plt.savefig('../output/pixel_shift.png')
    plt.clf()

    # frames = get_frames('../data/input.mov', grayscale=False)
    shifted_frames = []
    for i, frame in enumerate(frames):
        frame = np.array(frame, dtype='uint8')
        shifted_frame = frame.copy()
        x_shift = x_shifts[i]
        y_shift = y_shifts[i]
        M = np.array([[1, 0, x_shift], [0, 1, y_shift]], dtype='float32')
        warpAffine(src=frame, dst=shifted_frame, M=M, dsize=frame.shape)
        shifted_frame = np.array(shifted_frame, dtype='int32')
        shifted_frames.append(shifted_frame)
        print('Shifted {num} of {length} frames'.format(num=i + 1,
                                                        length=len(frames)))
    res = shifted_frames[0]
    for i in range(1, len(shifted_frames)):
        res += shifted_frames[i]
    res = np.divide(res, len(shifted_frames))
    imwrite('../output/output.png', res)
コード例 #7
0
def remove_frame(in_fname, out_fname, n_sample_frames=100):
    sample_frames = get_frames(in_fname, n_sample_frames)
    input_frame = get_median_frame(sample_frames)
    res = get_frame_box_coords(input_frame)
    if res is None:
        print("No border was detected in {}".format(in_fname))
        return None
    else:
        x, y, w, h = res
    clip = VideoFileClip(in_fname)
    crop_clip = crop(clip, x1=x, y1=y, x2=x + w, y2=y + h)
    crop_clip.write_videofile(out_fname)
コード例 #8
0
def file_upload():
    print('DEBUG: uploading file...')
    if request.method == 'POST':
        result = {}
        # Check username existence
        if 'username' not in request.form:
            flash("No username")
            result['result'] = 'False'
            return jsonify(result)
        username = request.form['username']
        print("DEBUG: username = ", username)
        # Check file existence
        if 'file' not in request.files:
            flash('No file part')
            result['result'] = 'False'
            return jsonify(result)
        file = request.files['file']
        if file.filename == '':
            flash('No selected file')
            result['result'] = 'False'
            return jsonify(result)
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
            path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
            clear_frames(username)
            get_frames(username, path)
            tester = Analyzer(username)
            is_me = False
            is_lively = tester.detect_liveness(path)
            if tester.identify() > THRESHOLD:
                is_me = True
            if is_me and is_lively:
                result['result'] = 'True'
            else:
                result['result'] = 'False'
            return jsonify(result)
            # return redirect(request.url)

    return render_template('upload.html')
コード例 #9
0
ファイル: eval.py プロジェクト: YeezVonway/AICourse-lab
def eval_file(model, audioFile, cfg=CONFIG.GLOBAL):

    try:
        arr, _ = librosa.load(audioFile, sr=cfg.DATA.SR)
    except:
        return None

    arr = utils.get_frames(arr, cfg.DATA)
    arr = np.expand_dims(arr, 0)
    x = torch.tensor(arr, dtype=torch.float32, device=torch.device(cfg.DEVICE))
    y: torch.Tensor = model(x)

    return y.squeeze(dim=0).cpu().detach().numpy()
コード例 #10
0
    def section_cutout(self):
        """ 
        Go through all frames on which event was detected than cutout the section of the image 
        """

        frames = get_frames(self.filename, self.start_frame, self.end_frame)
        section = self.get_section()
        min_x = (((section % (1920 // 120))) * 120) - 1
        min_y = (((section // (1920 // 120))) * 120) - 1
        max_x = (min_x + 120) - 1
        max_y = (min_y + 120) - 1

        frames = [[frame[min_y:max_y, min_x:max_x]] for frame in frames]
        return frames
コード例 #11
0
def calculate_representation():
    annotations = utils.get_frames()
    print('Currently we have',
          annotations.shape[0], 'frames.')

    # print('\nEthnicity representation:\n')
    # print(annotations['ETHNICITY'].value_counts(normalize=True) * 100)
    # print('\nGender representation:\n')
    # print(annotations['GENDER'].value_counts(normalize=True) * 100)

    print('\nRepresentation of combinations:\n')
    for index, row in annotations.groupby(['GENDER', 'ETHNICITY'])['frame'].count().reset_index().iterrows():
        print(row['GENDER'], row['ETHNICITY'], '-',
              "{0:.2%}".format(row['frame'] / len(annotations)))

    print('\nAbsolute frames of combinations including flow (only annotated):\n')
    for index, row in annotations.groupby(['FLOW_majority', 'GENDER', 'ETHNICITY'])['frame'].count().reset_index().iterrows():
        print('flow' if row['FLOW_majority'] > 0 else 'noflow',
              row['GENDER'], row['ETHNICITY'], '-', row['frame'])
コード例 #12
0
def predict_ActionLabel_to_NewVideo_new(video_path):
    frames_before_resize = get_frames(video_path, divide=1, show=False)

    frames = []
    for i in range(len(frames_before_resize)):

        image = cv2.resize(frames_before_resize[i], (960, 540))
        # print(image.shape)
        frames.append(image)

    frames = np.array(frames) / 255.0

    x_test = []
    for i in range(2, len(frames) - 3):
        motion_1 = frames[i - 1] - frames[i]
        motion_2 = frames[i + 1] - frames[i]
        motion_3 = frames[i - 2] - frames[i]
        motion_4 = frames[i + 2] - frames[i]
        x_test.append(
            np.concatenate((frames[i], motion_1, motion_2, motion_3, motion_4),
                           axis=2))

    x_test = np.array(x_test)

    print('input shape:', x_test.shape)
    print('frames shape:', frames.shape)
    model = get_resnet50_model()
    results = np.array(model.predict(x_test))
    # print(results)
    labels = []
    action = {0: 'idle', 1: 'pick', 2: 'push'}
    for result in results:
        index = result.argmax()
        labels.append(action[index])
    extract_actions(labels)

    video_name = video_path.split('/')[-1]
    video = add_ActionLabel_to_frames(labels, frames_before_resize[2:-3])
    frames_to_video(video, video_name)
コード例 #13
0
def predict_ActionLabel_to_NewVideo(video_path):
    frames_before_resize = get_frames(video_path, show=False)
    frames = []
    for i in range(len(frames_before_resize)):

        image = cv2.resize(frames_before_resize[i], (960, 540))
        # print(image.shape)
        frames.append(image)

    frames = np.array(frames)
    print(frames.shape)
    model = get_resnet50_model()
    results = np.array(model.predict(frames / 255.0))
    # print(results)
    labels = []
    action = {0: 'idle', 1: 'pick', 2: 'push'}
    for result in results:
        index = result.argmax()
        labels.append(action[index])
    extract_actions(labels)

    video_name = video_path.split('/')[-1]
    video = add_ActionLabel_to_frames(labels, frames)
    frames_to_video(video, video_name)
コード例 #14
0
from tqdm import tqdm
import ipdb as ipdb

for d in cfg.datasets:

    print('preparing training data for %s' % d)

    # get summary images
    folder = os.path.join(cfg.data_dir, 'datasets', 'images_' + d)
    
    # Define the batches
    total_frames = len(glob.glob(os.path.join(folder, '*.tif')))
    batch_inds = np.arange(0, total_frames, cfg.summary_frames)

    # Get the size of the images and initialize the iamge stack
    img0 = utils.get_frames(folder, frame_numbers=[0])
    height, width = img0.shape
    
    # Define the number of batches
    batches = min(total_frames // cfg.summary_frames, cfg.max_batches)
    
    # X = dict.fromkeys(['corr', 'mean', 'median', 'max', 'std'], np.zeros((batches, _.shape[0], _.shape[1])))
    
    # Initialize the summary images
    summary_titles = ['corr', 'mean', 'median', 'max', 'std']
    X = {key: np.zeros((batches, height, width)) for key in summary_titles}

    # get summary images for each batch in video
    for b in tqdm(range(batches)):
        img_stack = utils.get_frames(folder, 
            frame_numbers=np.arange(batch_inds[b], batch_inds[b]+cfg.summary_frames))
コード例 #15
0
try:
    # sends command line output to /dev/null when trying to open ffmpeg so it doesn't muck up our beautiful command line
    null = open("/dev/null", "w")
    # it tries to open ffmpeg
    subprocess.Popen("ffmpeg", stdout=null, stderr=null)
    # politely closes /dev/null
    null.close()

# if the OS can't find ffmpeg an error is printed and the program quits
except OSError:
    print("ffmpeg was not found. Please install it. Thanks.")
    sys.exit()

mosher = Mosher(input_video, 22)
# Refrain from converting to avi on same vid...
# mosher.get_frames()
mosher.in_file, mosher.out_file, mosher.frames = utils.get_frames(mosher.input_avi, mosher.output_avi)
# mosher.get_resolutions()

profiles = [MoshProfile(1, 5, 3, "grid.jpg"), ]  # MoshProfile(2, 3, 10), MoshProfile(5, 10, 10), MoshProfile(8, 7, 50)]

# mosher.get_first_frames()
# mosher.mutate_to_deltaframes()
# mosher.remove_deltaframes()
# mosher.remove_keyframes()
# mosher.reset_frames()
# mosher.mask(profiles)

# mosher.finish()
mosher.analyze()
コード例 #16
0
    # print(similarity)
    similarity = np.mean(similarity, axis=0)
    # print("ReID similarity score: ", similarity)
    # print(similarity.shape)
    
    scores *= np.squeeze(similarity)
    return scores, normalized_features

count = 0
first_frame = True
flag_lost = False
lost_frame_count = 0
search_instance_size = 0

import time 
for frame in get_frames(data_dir):

#         width = int(frame.shape[1] / 2)
#         height = int(frame.shape[0] / 2)

#         frame = cv2.resize(frame, (width, height))
    # print(frame.shape)
    
    count += 1

    if count % 100 == 0:

        print("progress: ", count)

    if lost_frame_count > 20: 
コード例 #17
0
def prepare_trigger_frame(trigger, size=(1920 // 120, 1080 // 120)):
    frames = get_frames(trigger.filename, trigger.start_frame,
                        trigger.end_frame)
    result = combine_frames(frames)
    result = add_marker(result, trigger)
    return result
コード例 #18
0
## write sample training images

import utils
utils.write_sample_imgs(X_contrast=(0, 99))
utils.write_sample_border_imgs(channels=['corr', 'median'], contrast=(1, 99))
# write_sample_border_imgs(channels=['corr'], contrast=(0,99))

## try auto-correlation images

dataset = 'J123'

folder = os.path.join(cfg.data_dir, 'caiman', 'datasets', 'images_' + dataset)
imgs = utils.get_frames(folder, frame_inds=np.arange(0, 1000))

##
offset = 1
acorr = np.sum(np.multiply(imgs[:-offset], imgs[offset:]), axis=0)
acorr = acorr / (np.sqrt(np.sum(np.square(imgs[:-offset]), 0)) *
                 np.sqrt(np.sum(np.square(imgs[offset:]), 0)))
# acorr = np.sum(np.multiply(imgs[:-offset], imgs[offset:]), 0)
# acorr = np.divide(acorr, np.square(np.linalg.norm(imgs, axis=0)))

# plt.close('all')
# imshow = plt.imshow(utils.scale_img(acorr))
imshow.set_data(utils.scale_img(acorr))
コード例 #19
0
ファイル: tracking.py プロジェクト: shen338/tracker
    # print(similarity.shape)
    
    scores *= np.squeeze(similarity)
    return scores, normalized_features


out = None
init_rect = list(map(int, init_string.split(',')))
count = 0
first_frame = True
flag_lost = True

search_instance_size = 0

import time 
for frame in get_frames(video_name):
    
    count += 1
    
    if flag_lost: 
        search_instance_size = LOST_INSTANCE_SIZE
        window_influence = LOST_WINDOW_INFLUENCE
        
    else: 
        search_instance_size = INSTANCE_SIZE
        window_influence = WINDOW_INFLUENCE
        
    if first_frame:
        
        frame_size = frame.shape
        init_rect = list(map(int, init_string.split(',')))
コード例 #20
0
ファイル: eval_lasot.py プロジェクト: shen338/tracker
    lost_frame_count = 0
    search_instance_size = 0

    eco = []
    acc = []

    img_path = os.path.join(folder, 'img/')
    # img_files = glob.glob(img_path)
    # print(img_path)

    filename = os.path.join(folder, "groundtruth.txt")
    with open(filename) as f:
        annotation = f.readlines()

    import time
    for frame in get_frames(img_path):

        width = int(frame.shape[1] / 2)
        height = int(frame.shape[0] / 2)

        frame = cv2.resize(frame, (width, height))
        # print(frame.shape)

        count += 1

        if count % 100 == 0:

            print(basename + " progress: ", count)

        if lost_frame_count > 20:
コード例 #21
0
if __name__ == '__main__':

    if len(sys.argv) != 3:
        sys.exit("Improper number of args")

    train_path = sys.argv[1]
    #test_path = '/Users/pallekc/Jobs/comma/speed_challenge_2017/data/test.mp4'
    train_targets = sys.argv[2]

    model = get_nvidia_model()
    if t.cuda.is_available():
        model.cuda()
    device = t.device("cuda:0" if t.cuda.is_available() else "cpu")
    print('device ', device)
    train_frames, train_frames_count = get_frames(train_path)
    train_targets, train_targets_count = get_targets(train_targets)
    assert train_frames_count == train_targets_count, 'Number of train frames != targets'

    train_processed = process_frames(
        train_frames, train_targets)  # remember the first one is missing
    train_x, train_y, test_x, test_y = split_data(train_processed)

    train_dataset = SpeedDataset(train_x, train_y)
    training_generator = t.utils.data.DataLoader(train_dataset,
                                                 batch_size=8,
                                                 shuffle=True)

    # train_x, train_y, val_x, val_y = split_data(train_x, train_y)
    # val_dataset = SpeedDataset(val_x, val_y)
    # val_generator = t.utils.data.DataLoader(val_dataset, batch_size=8, shuffle=True)