Example #1
0
def validate_video(video_filepath, ffprobe_path, video_info):
    """
    Take video file and sanity check basic info.

    Args:
        video_filepath:  Path to output video file
                         (Type: str)

        ffprobe_path:    Path to ffprobe executable
                         (Type: str)

        video_info:      Video info dictionary
                         (Type: str)
    """
    import skvideo
    import skvideo.io

    if not os.path.exists(video_filepath):
        error_msg = 'Output file {} does not exist.'.format(video_filepath)
        raise FfmpegValidationError(error_msg)

    skvideo.setFFmpegPath(os.path.dirname(ffprobe_path))

    # Check to see if we can open the file
    try:
        skvideo.io.vread(video_filepath)
    except Exception as e:
        raise FfmpegUnopenableFileError(video_filepath, e)

    ffprobe_info = ffprobe(ffprobe_path, video_filepath)
    if not ffprobe_info:
        error_msg = 'Could not analyse {} with ffprobe'
        raise FfmpegValidationError(error_msg.format(video_filepath))
    if ffprobe_info["format"]["nb_streams"] < 2:
        error_msg = '{} has no audio streams!'
        raise FfmpegValidationError(error_msg.format(video_filepath))
    # Get the video stream data
    if not ffprobe_info.get('streams'):
        error_msg = '{} has no video streams!'
        raise FfmpegValidationError(error_msg.format(video_filepath))
    ffprobe_info = next(stream for stream in ffprobe_info['streams']
                        if stream['codec_type'] == 'video')

    for k, v in video_info.items():
        output_v = ffprobe_info[k]

        # Convert numeric types to float, since we may get strings from ffprobe
        try:
            v = float(v)
        except ValueError:
            pass
        try:
            output_v = float(output_v)
        except ValueError:
            pass

        if v != output_v:
            error_msg = 'Output video {} should have {} = {}, but got {}.'.format(
                video_filepath, k, v, output_v)
            raise FfmpegValidationError(error_msg)
Example #2
0
    def create(cls, hertz, filename):
        """
        Creates a new instance of currently recording video

        window: the window from which the video is recording
        synchronized: synchronized datasources
        platform: dataset platform
        """

        skvideo.setFFmpegPath('./skvideo/')
        #TODO: Side effect, should try catch or return Result
        writer = FFmpegWriter(filename, inputdict = {'-r': str(hertz)})
        
        print(f"Creating video file {filename} in directory {os.getcwd()}...")
            
        return Video(cls.__create_key, filename, writer)
def main():
    skvideo.setFFmpegPath(
        "D:\\Computer Vision\\Homework1\\opencv_traffic_counting\\ffmpeg-20190519-fbdb3aa-win64-static\\bin"
    )
    log = logging.getLogger("main")

    base = np.zeros(SHAPE + (3, ), dtype='uint8')
    exit_mask = cv2.fillPoly(base, EXIT_PTS, (255, 255, 255))[:, :, 0]

    bg_subtractor = cv2.createBackgroundSubtractorMOG2(history=500,
                                                       detectShadows=True)

    pipeline = ProcessPipelineRunner(pipeline=[
        ContourDetection(bg_subtractor=bg_subtractor,
                         save_image=True,
                         image_dir=Image_dir),
        VehicleCounter(exit_masks=[exit_mask], y_weight=2.0),
        Vis(image_dir=Image_dir),
        write_csv(path='./', name='report.csv')
    ],
                                     log_level=logging.DEBUG)

    cap = skvideo.io.vreader(Vid_src)

    # skip num frames and train the background subtractor it will identify the background for all the frames
    bg_Subtractor_train(bg_subtractor, cap, num=1000)

    _frame_number = -1
    frame_number = -1
    for frame in cap:
        if not frame.any():
            log.error("Frame capture failed, stopping...")
            break

        _frame_number += 1

        if _frame_number % 2 != 0:
            continue

        frame_number += 1

        pipeline.set_context({
            'frame': frame,
            'frame_number': frame_number,
        })
        pipeline.run()
Example #4
0
def set_skvideo_path(ffmpeg_path=None, libav_path=None):
    """Sets the path to the FFMPEG and/or LibAV libraries.

    If scikit-video complains that either ffmpeg or libav cannot be found,
    you can set the path to the executables directly. On Unix, these binaries
    usually live in /usr/bin. On Windows, point to the directory that contains
    your ffmpeg.exe.

    Parameters
    ----------
    ffmpeg_path : str, optional, default: system's default path
        Path to ffmpeg library.
    libav_path : str, optional, default: system's default path
        Path to libav library.

    """
    if libav_path is not None:
        skvideo.setLibAVPath(libav_path)
    if ffmpeg_path is not None:
        skvideo.setFFmpegPath(ffmpeg_path)
@author: Mark Lundine
"""

import numpy as np
import cv2
import PIL
import sys
from PIL import Image
from PIL.ExifTags import TAGS
Image.MAX_IMAGE_PIXELS = None
import skvideo
import os
wd = os.getcwd()
ffmpeg_path = os.path.join(wd, 'ffmpeg', 'bin')
skvideo.setFFmpegPath(ffmpeg_path)
import skvideo.io
import glob
from PIL import ImageFilter
from os.path import join
import datetime as dt
### Code adapated from javascript code available at https://github.com/nikolajbech/underwater-image-color-correction

def getColorFilterMatrix(img_array, width, height, avg, MIN_AVG_RED, MAX_HUE_SHIFT, BLUE_MAGIC_VALUE):

    ### Magic values:
    NUM_PIXELS = width * height
    THRESHOLD_RATIO = 2000
    THRESHOLD_LEVEL = NUM_PIXELS/THRESHOLD_RATIO

    ## Objects:
Example #6
0
import skvideo
import skvideo.io as skv
import numpy as np

from bc.settings import FFMPEG_PATH

skvideo.setFFmpegPath(FFMPEG_PATH)


def write_video(frames, path):
    skv.vwrite(path, np.array(frames).astype(np.uint8))
import argparse
import torch
import networks
import skvideo
skvideo.setFFmpegPath(r'C:\Program Files\ffmpeg\bin')
import skvideo.io as io
import numpy as np
import matplotlib.cm as cm

# Argument parser
parser = argparse.ArgumentParser()
parser.add_argument('-video_in',
                    required=True,
                    help='Path to the input video file')
parser.add_argument('-video_out',
                    required=True,
                    help='Path to the output video file')
parser.add_argument('-saved_model',
                    required=True,
                    help='Path to the trained model')
parser.add_argument('-device', default=0, type=int, help='CUDA device')
parser.add_argument('-axis',
                    default=1,
                    type=int,
                    help='Axis to stack RGB image and depth prediction')
args = parser.parse_args()

# CUDA device
device = "cuda:{}".format(args.device) if torch.cuda.is_available() else 'cpu'

# Create the reader and writer
Example #8
0
import plotly.graph_objs as go

import skvideo
skvideo.setFFmpegPath('/usr/bin/')
from skvideo.io import FFmpegWriter, FFmpegReader

import tempfile

import numpy as np
import time
import imageio

import os
import warnings
import cv2

import math

from multiprocessing import Queue, Process
import datetime

import torch

PYCHARM_VISDOM = 'PYCHARM_RUN'


def instantiante_visdom(port, server='http://localhost'):
    return visdom.Visdom(port=port, server=server, use_incoming_socket=True)


if not 'NO_VISDOM' in os.environ.keys():
Example #9
0
import os,sys, posixpath
import numpy as np
import cv2
from PIL import Image
from multiprocessing import Pool
import argparse
from IPython import embed #to debug
import skvideo
skvideo.setFFmpegPath('D:\\ffmpeg\\bin')
import skvideo.io
import scipy.misc


def ToImg(raw_flow,bound):
    '''
    this function scale the input pixels to 0-255 with bi-bound

    :param raw_flow: input raw pixel value (not in 0-255)
    :param bound: upper and lower bound (-bound, bound)
    :return: pixel value scale from 0 to 255
    '''
    flow=raw_flow
    flow[flow>bound]=bound
    flow[flow<-bound]=-bound
    flow-=-bound
    flow*=(255/float(2*bound))
    return flow

def save_flows(flows,image,save_dir,num,bound):
    '''
    To save the optical flow images and raw images
sys.path.append(os.path.join(subfolder, "pose-tensorflow"))
sys.path.append(os.path.join(subfolder, "config"))
# Dependencies for video:

import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from tqdm import tqdm

from skimage.draw import circle_perimeter, circle
from VideoProcessor import VideoProcessorSK as vp

from skvideo import setFFmpegPath
setFFmpegPath('/n/helmod/apps/centos7/Core/ffmpeg/2.7.2-fasrc01/bin')

####################################################
# Loading descriptors of model
####################################################

from myconfig_analysis import videofolder, cropping, Task, date, cam_fps, \
    resnet, shuffle, trainingsiterations, pcutoff, deleteindividualframes, x1, x2, y1, y2, videotype, alphavalue, dotsize, colormap
from myconfig_analysis import scorer as humanscorer

# loading meta data / i.e. training & test files
#basefolder = os.path.join('..','pose-tensorflow','models')
#datafolder = os.path.join(basefolder , "UnaugmentedDataSet_" + Task + date)
#Data = pd.read_hdf(os.path.join(datafolder , 'data-' + Task , 'CollectedData_' + humanscorer + '.h5'),'df_with_missing')

# Name for scorer based on passed on parameters from myconfig_analysis. Make sure they refer to the network of interest.
Example #11
0
import matplotlib.pyplot as plt
import numpy as np
import scipy.misc

import skvideo.datasets

import skvideo

skvideo.setFFmpegPath('C:/Users/Zber/Documents/ffmpeg/bin')

import skvideo.io
import skvideo.motion
import cv2

try:
    xrange
except NameError:
    xrange = range


def getPlots(motionData):
    motionMagnitude = np.sqrt(np.sum(motionData**2, axis=2))

    fig = plt.figure()
    plt.quiver(motionData[::-1, :, 0], motionData[::-1, :, 1])
    fig.axes[0].get_xaxis().set_visible(False)
    fig.axes[0].get_yaxis().set_visible(False)
    plt.tight_layout()
    fig.canvas.draw()

    # Get the RGBA buffer from the figure
"""
Created on Thu Sep 24 16:30:25 2020

@author: svc_ccg
"""
import numpy as np
import matplotlib.pyplot as plt
import cv2
from sync_dataset import Dataset as sync_dataset
import probeSync
import os, glob
import json
from numba import njit

import skvideo
skvideo.setFFmpegPath(r'C:\Users\svc_ccg\Documents\ffmpeg\bin')
import skvideo.io

annotated_video_dirs = [
r"\\10.128.50.20\sd7\habituation\1050235366_530862_20200914",
r"\\10.128.50.20\sd7\habituation\1050264010_524926_20200914",
r"\\10.128.50.20\sd7\habituation\1051845969_532246_20200921",
r"\\10.128.50.20\sd7\habituation\1052096186_533537_20200922"
]

annotation_category_dict = {
                'no label': 0,
                'tongue': 1,
                'paw': 2,
                'groom': 3,
                'air lick': 4,
Example #13
0
from imutils.video import FPS
import argparse
import os
import imutils
import cv2
from keras.applications.vgg16 import VGG16
from scipy import misc # run "pip install pillow"
from imutils import face_utils
import dlib
import skvideo
import glob
import numpy as np
from keras.models import load_model
from sklearn.metrics import confusion_matrix, classification_report
import pandas
skvideo.setFFmpegPath("C:\\ffmpeg\\bin")

predictor_path = 'C:/Users/gayat/Downloads/shape_predictor_68_face_landmarks.dat\shape_predictor_68_face_landmarks.dat'
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)

RECTANGLE_LENGTH = 90

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-n", "--num-frames", type=int, default=20,
				help="# of frames to loop over for FPS test")
ap.add_argument("-d", "--display", type=int, default=-1,
				help="Whether or not frames should be displayed")
args = vars(ap.parse_args())
Example #14
0
import os
from torch.utils.data import Dataset
import librosa
import scipy.io.wavfile as scp
import torch
import math
import pickle
from utils.NoiseInjection import *
import skvideo
skvideo.setFFmpegPath('C:/FFmpeg/bin/')
import skvideo.io
import torchvision.transforms
import re

#Audio ground truth
AudioGt = {}
AudioGt["Speaker1"] = np.array(
    [[1.7, 2.7], [5, 6.9], [10.3, 14.1], [18.3, 20.0], [23.5, 25.2],
     [30.4, 32.6], [38.3, 41], [46.2, 50.1], [55.2, 57.3], [63.7, 65.2],
     [68.3, 69.2], [72.6, 74.7], [78.2, 81.4], [86, 87.5], [91.1, 93],
     [96.8, 99], [102.6, 103.6], [106.5, 108.3], [112.4, 115.9],
     [118.9, 119.9]],
    dtype=np.float32)
AudioGt["Speaker2"] = np.array(
    [[1.4, 2.4], [2.9, 5.5], [9.5, 11.4], [11.7, 13.4], [18.2, 24.6],
     [28, 28.8], [32, 35.7], [39.7, 44.9], [49.3, 50.3], [55.3, 57.1],
     [60.6, 65.2], [69.3, 72.8], [77.6, 84], [87.5, 88.8], [94, 99.4],
     [104.1, 107.7], [112.8, 116]],
    dtype=np.float32)
AudioGt["Speaker3"] = np.array(
    [[1.7, 7.2], [9.9, 19.7], [25.1, 29], [32.3, 39.5], [44.1, 48.2],
Example #15
0
import os, sys
import numpy as np
from multiprocessing import Pool

from PIL import Image
import skvideo

skvideo.setFFmpegPath(r'C:\ffmpeg-4.2-win64-shared\bin')
import skvideo.io

root_dir = r'E:\talking_heads\data\voxceleb2\vox2_dev_mp4'
output_dir = r'E:\talking_heads\data\voxceleb2\vox2_dev_frames'
k = 6
sample_count = 20


def initialize():
    global pid
    pid = os.getpid()


def main_routine(input_var):
    i, pv_id = input_var
    video_root = os.path.join(root_dir, pv_id)
    output_root = os.path.join(output_dir, pv_id)
    videos = os.listdir(video_root)

    print('%d: %d %s %d' % (pid, i, video_root, len(videos)))

    # extract
    for video in videos:
Example #16
0
import tensorflow as tf

videoPath = "C:\\Vomit\\Vomit7.mp4"

from keras.models import load_model

model = load_model('C:\\Vomit\\my_model.h5')
model.summary()
#test = np.zeros(1024,)
test = []
#test = np.expand_dims(test,axis = 0)
#predict = model.predict(test)
#predict[0]
testFrames = []
#testFrames.shape
skvideo.setFFmpegPath(r'C:\\Vomit\\ffmpeg-4.2.2-win64-static\\bin')
import skvideo.io

#testFrames = skvideo.io.vread(videoPath, height = 224, width = 224)
testFrames = skvideo.io.vread(videoPath, outputdict={"-pix_fmt":
                                                     "gray"})[:, 224, 224, 0]
i = 0
fileJson = {}
fileJson["Vomit7.mp4"] = []
print(testFrames.shape)

while (i < testFrames.shape[0]):
    #test = testFrames[i:i+5,:,:,:]

    test = testFrames[i, :, :]
    print(test.shape)
import cv2
print(cv2.__version__)
import numpy as np
import h5py
import matplotlib
matplotlib.use('PS')
import matplotlib.pyplot as plt
from PIL import Image
from tqdm import tqdm
import skvideo
skvideo.setFFmpegPath('../../usr/bin')
import skvideo.io
from matplotlib.pyplot import imshow
import os


def array_to_vid(a, output_directory, output_name):
    skvideo.io.vwrite(output_directory+'/'+output_name+".mp4", a)

def frstack_to_subdir(frame, subdir_name, output_dir):

    subdirectory = output_dir + '/' + subdir_name + '/'
    os.makedirs(subdirectory)

    frame0 = frame[0,:,:,:]
    frame2 = frame[1,:,:,:]
    frame1 = frame[2,:,:,:]

    Image.fromarray(frame0).save(subdirectory+'frame0.png')
    Image.fromarray(frame1).save(subdirectory+'frame1.png')
    Image.fromarray(frame2).save(subdirectory+'frame2.png')
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 24 17:42:10 2020

@author: svc_ccg
"""

from __future__ import division
import fileIO
import skvideo

skvideo.setFFmpegPath('C:\\Users\\svc_ccg\\Desktop\\ffmpeg\\bin')
import skvideo.io

savePath = fileIO.saveFile('Save movie as', fileType='*.mp4')

inputParams = {'-r': '30'}
outputParams = {
    '-r': '30',
    '-pxl_fmt': 'yuv420p',
    '-vcodec': 'libx264',
    '-crf': '23',
    '-preset': 'slow'
}

# '-pix_fmt': 'yuv420p' important to avoid green screen on mac; number of pixels needs to be even

v = skvideo.io.FFmpegWriter(savePath,
                            inputdict=inputParams,
                            outputdict=outputParams)
Example #19
0
import skvideo
import glob
import os
import numpy as np
from moviepy.editor import ImageClip, concatenate_videoclips, VideoFileClip
import pandas as pd
import cv2

skvideo.setFFmpegPath(r'C:\Users\Chris\Documents\projects\cs172b\ffmpeg-4.2.2-win64-shared\bin')

import skvideo.io

def convert_data(data_path):
    videos = glob.glob(data_path)
    size = 224

    for v in videos:
        # if '13_4m_r' not in v:
        #     continue
        # print (v)
        file_path = '.'.join(v.split('.')[:-2])

        videodata = skvideo.io.vread(v)
        cam = cv2.VideoCapture(v)
        fps = cam.get(cv2.CAP_PROP_FPS)
        framelength=videodata.shape[0]
        try:
            df=pd.read_csv(file_path+'.csv', delimiter=',')
        except:
            print('Failed to find csv for',os.path.basename(v))
        threshold = df.max()['ECG']*0.6
Example #20
0
# TO DO:
# (1) report potential # missed frames (maybe use counter to count Line 1 edges and write to video file)
# (2) try using ImageEvent instead of blocking GetNextImage(timeout) call
# (3) explicitly setup camera onboard buffer
# (4) use multiprocess or other package to implement better parallel processing
# (5) try FFMPEG GPU acceleration: https://developer.nvidia.com/ffmpeg
# =============================================================================

import PySpin, time, os, threading, queue
from datetime import datetime
import tkinter as tk
from PIL import Image, ImageTk
import numpy as np
import skvideo

skvideo.setFFmpegPath('C:/Anaconda3/Lib/site-packages/ffmpeg'
                      )  #set path to ffmpeg installation before importing io
import skvideo.io

#constants
SAVE_FOLDER_ROOT = 'C:/video'
FILENAME_ROOT = 'mj_'  # optional identifier
EXPOSURE_TIME = 500  #in microseconds
WAIT_TIME = 0.0001  #in seconds - this limits polling time and should be less than the frame rate period
GAIN_VALUE = 0  #in dB, 0-40;
GAMMA_VALUE = 0.4  #0.25-1
IMAGE_HEIGHT = 400  #540 pixels default; this should be divisible by 16 for H264 compressed encoding
IMAGE_WIDTH = 400  #720 pixels default; this should be divisible by 16 for H264 compressed encoding
HEIGHT_OFFSET = 72  #round((540-IMAGE_HEIGHT)/2) # Y, to keep in middle of sensor; must be divisible by 4
WIDTH_OFFSET = 160  # round((720-IMAGE_WIDTH)/2) # X, to keep in middle of sensor; must be divisible by 4
FRAMES_PER_SECOND = 250  #this is determined by triggers sent from behavior controller
FRAMES_TO_RECORD = 600 * FRAMES_PER_SECOND  #frame rate * num seconds to record; this should match # expected exposure triggers from DAQ counter output
Example #21
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 20 18:40:20 2019

@author: secret_wang
"""
import os
import cv2
import time
import torch
import numpy as np
import skvideo
skvideo.setFFmpegPath("C:/Program Files/ffmpeg/bin")
import skvideo.io
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader


def find_idx(idx, len_sep):
    start, end = 0, len(len_sep) - 1
    while start <= end:
        mid = start + (end - start) // 2
        if len_sep[mid] < idx:
            start = mid + 1
        elif len_sep[mid] > idx:
            end = mid - 1
        else:
            out1, out2 = mid, idx - len_sep[mid]
            if out2 < 0:
                out1 = out1 - 1
Example #22
0
import skvideo.io
import numpy as np
import cv2

import skvideo

skvideo.setFFmpegPath(r'D:\Program\ffmpeg-N-101375-g82131293b0-win64-gpl\bin')


def video_info_parsing(video_info, num_person_in=5, num_person_out=2):
    data_numpy = np.zeros((3, len(video_info['data']), 18, num_person_in))
    for frame_info in video_info['data']:
        frame_index = frame_info['frame_index']
        for m, skeleton_info in enumerate(frame_info["skeleton"]):
            if m >= num_person_in:
                break
            pose = skeleton_info['pose']
            score = skeleton_info['score']
            data_numpy[0, frame_index, :, m] = pose[0::2]
            data_numpy[1, frame_index, :, m] = pose[1::2]
            data_numpy[2, frame_index, :, m] = score

    # centralization
    data_numpy[0:2] = data_numpy[0:2] - 0.5
    data_numpy[0][data_numpy[2] == 0] = 0
    data_numpy[1][data_numpy[2] == 0] = 0

    sort_index = (-data_numpy[2, :, :, :].sum(axis=1)).argsort(axis=1)
    for t, s in enumerate(sort_index):
        data_numpy[:, t, :, :] = data_numpy[:, t, :, s].transpose((1, 2, 0))
    data_numpy = data_numpy[:, :, :, :num_person_out]
Example #23
0
def validate_video(video_filepath,
                   ffprobe_path,
                   video_info,
                   end_past_video_end=False):
    """
    Take video file and sanity check basic info.

    Args:
        video_filepath:  Path to output video file
                         (Type: str)

        ffprobe_path:    Path to ffprobe executable
                         (Type: str)

        video_info:      Video info dictionary
                         (Type: str)
    """
    import skvideo
    import skvideo.io

    if not os.path.exists(video_filepath):
        error_msg = 'Output file {} does not exist.'.format(video_filepath)
        raise FfmpegValidationError(error_msg)

    skvideo.setFFmpegPath(os.path.dirname(ffprobe_path))

    # Check to see if we can open the file
    try:
        skvideo.io.vread(video_filepath)
    except Exception as e:
        raise FfmpegUnopenableFileError(video_filepath, e)

    ffprobe_info = ffprobe(ffprobe_path, video_filepath)
    if not ffprobe_info:
        error_msg = 'Could not analyse {} with ffprobe'
        raise FfmpegValidationError(error_msg.format(video_filepath))

    # Get the video stream data
    if not ffprobe_info.get('streams'):
        error_msg = '{} has no video streams!'
        raise FfmpegValidationError(error_msg.format(video_filepath))
    ffprobe_info = next(stream for stream in ffprobe_info['streams']
                        if stream['codec_type'] == 'video')

    # If duration specifically doesn't match, catch that separately so we can
    # retry with a different duration
    target_duration = video_info['duration']
    try:
        actual_fr_ratio = ffprobe_info.get('r_frame_rate',
                                           ffprobe_info['avg_frame_rate'])
        fr_num, fr_den = actual_fr_ratio.split('/')
        actual_framerate = float(fr_num) / float(fr_den)
    except KeyError:
        error_msg = 'Could not get frame rate from {}'
        raise FfmpegValidationError(error_msg.format(video_filepath))
    actual_duration = float(ffprobe_info['nb_frames']) / actual_framerate
    if target_duration != actual_duration:
        if not (end_past_video_end) and actual_duration < target_duration:
            raise FfmpegIncorrectDurationError(video_filepath, target_duration,
                                               actual_duration)

    for k, v in video_info.items():
        if k == 'duration' and (end_past_video_end
                                and actual_duration < target_duration):
            continue

        output_v = ffprobe_info[k]

        # Convert numeric types to float, since we may get strings from ffprobe
        try:
            v = float(v)
        except ValueError:
            pass
        try:
            output_v = float(output_v)
        except ValueError:
            pass

        if v != output_v:
            error_msg = 'Output video {} should have {} = {}, but got {}.'.format(
                video_filepath, k, v, output_v)
            raise FfmpegValidationError(error_msg)
Example #24
0
                    help='场景识别阈值')
parser.add_argument('--rescene',
                    dest='rescene',
                    type=str,
                    default="mix",
                    help="copy/mix   帧复制/帧混合")
parser.add_argument('--exp',
                    dest='exp',
                    type=int,
                    default=1,
                    help='补2的exp次方-1帧')

args = parser.parse_args()
assert args.scale in [0.25, 0.5, 1.0, 2.0, 4.0]
spent = time.time()
skvideo.setFFmpegPath(args.ffmpeg)

if not os.path.exists(args.output):
    os.mkdir(args.output)

if args.device_id != -1:
    device = torch.device("cuda")
    torch.cuda.set_device(args.device_id)
    torch.set_grad_enabled(False)
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True
    if args.fp16:
        torch.set_default_tensor_type(torch.cuda.HalfTensor)
    try:
        from model.RIFE_HDv2 import Model
        model = Model()
import os
import sys
import time
import numpy as np
import cv2
import skvideo
skvideo.setFFmpegPath(os.path.dirname(sys.executable))
import skvideo.io
from face_detector_YOLOv2 import YoloFace
from face_tracker_DLib import DLibFaceTracker
from itertools import count
'''
Allows this file to be inported from anywhere, updated the model location to be generalizable
'''
file_path = os.path.realpath(__file__)
model_path = '/YOLO_face_detection/darknet_face_release'
end_ind = file_path.rfind(
    'detection_tracking'
)  #file_path.find(substring, string.find(substring) + 1)
end_ind = file_path[0:end_ind].rfind('detection_tracking')
#print(file_path)
#print("FINAL PATH")
#print(file_path[0:end_ind] + model_path)
#print("#########################################")
yolo_model = YoloFace(file_path[0:end_ind] + model_path)

#print()

#yolo_model = YoloFace('../YOLO_face_detection/darknet_face_release')

import pickle
from attrdict import AttrDict
#from cnn.model_cnn import TrajEstimator
#from cnn.model_cnn_moving_threshold import TrajEstimatorThreshold
from vgg_analysis.model import TrajectoryGenerator #as TrajectoryGenerator_sgan
from model_sgan_tokka import TrajectoryGenerator_sgan
from semantic_embedding.model import TrajectoryGenerator_sem
from vgg_analysis.model_rnn import TrajectoryGenerator_R
from vgg_analysis.old_model_segnet_camvid.model_segnet import TrajectoryGenerator_seg_camvid
from vgg_analysis.model_segnet import TrajectoryGenerator_seg #full segnet

from vgg_analysis.model_gt import TrajectoryGenerator_gt
from utils import relative_to_abs
import torch
import skvideo
skvideo.setFFmpegPath('C:/Users/arsal/Anaconda3/pkgs/ffmpeg-2.7.0-0/Scripts')
import skvideo.io
import cv2
#"C:\Users\arsal\PycharmProjects\SGAN-AE-master\SGAN-AE-master\scene\full-psp\out-hotel"
"""RNN-AE-PSP"""
with(open("C://Users//arsal//PycharmProjects//SGAN-AE-master//SGAN-AE-master//scene//full-psp//out-eth", 'rb')) as open_file:
        dat_seg =torch.Tensor(pickle.load(open_file))

"""VGG-16"""
path_traj_vid = "D:/traj_vid"
dat= cv2.imread(path_traj_vid+"/eth/frame0.jpg")
dat=cv2.resize(dat,(224,224))
dat=torch.tensor(dat)

"""SEG-Net-full"""
#with (open("C:/Users/arsal/PycharmProjects/SGAN-AE-master/SGAN-AE-master/trained_models/Tokka/SegNet-AE-Scene/hotel/hotel.pkl",'rb+')) as ff: (Segnet-camvid)
Example #27
0
import os
import skvideo

skvideo.setFFmpegPath("/home/ercong.cc/software/ffmpeg/bin")
import skvideo.io
import os.path
import random
import cv2
import numpy as np


def get_class_Ind():
    path = os.listdir('hmdb51')
    path.sort()
    return path


def get_train_list(path, split='_test_split1'):
    train_list = open('train_list1.txt', 'w+')
    test_list = open('test_list1.txt', 'w+')
    for i in range(len(path)):
        file_name = path[i] + split
        file_name = 'test_train_splits/' + file_name + '.txt'
        with open(file_name) as file:
            path_list = [row.strip() for row in list(file)]
            file_path_list = [row.split(' ')[0] for row in path_list]
            file_label_list = [row.split(' ')[1] for row in path_list]
            for j in range(len(file_path_list)):
                if file_label_list[j] == '1':
                    string_name = path[i] + '/' + file_path_list[
                        j] + ' ' + str(i)
import numpy as np
import pandas as pd
import pickle
import pylab as plt
from matplotlib import pyplot as plt
from datetime import datetime
from collections import OrderedDict
import skvideo
import skvideo.io
import copy

skvideo.setFFmpegPath("/usr/local/bin")

# Local import
from algorithms.mcts import MCTS, CalculateScore, GetActionPrior, SelectNextAction, SelectChild, Expand, RollOut, backup, \
    InitializeChildren, HeuristicDistanceToTarget
import envMujoco as env
import reward
from envSheepChaseWolf import stationaryWolfPolicy, WolfPolicyForceDirectlyTowardsSheep, DistanceBetweenActualAndOptimalNextPosition, \
    GetAgentPosFromTrajectory, GetAgentPos, GetAgentActionFromTrajectoryDf
from envMujoco import Reset, TransitionFunction, IsTerminal
from play import SampleTrajectory


def drawHistogram(dataDf, axForDraw):  # Line
    print('dataDf', dataDf)
    countDf = dataDf['action'].value_counts()
    countDf.plot.bar(ax=axForDraw, rot='horizontal')


class RunTrial:
Example #29
0
import numpy as np
import pandas as pd
from tqdm import tqdm
import os
import cv2

import local_processing.analysis_util.analysis_util as au
from skimage.draw import circle, line, bezier_curve
import skvideo
skvideo.setFFmpegPath('C:/Program Files/ffmpeg/bin/')
from local_processing.video_maker.video_processor import VideoProcessorSK as vp
import matplotlib.pyplot as plt

base_folder = 'Z:/Data/BarthAirPuff/'
task = 'air-puff'
date = 'Dec7'
shuffle = 1
train_fraction = 0.95
snapshot_index = 0
video_name = '9 psi.MOV'
pcutoff = 0.3
dotsize = 4
resnet = 50
snapshot = 600000

# for ts plotting
pick_bodypart = 'tip11'
def_color = [255, 0, 0]


def create_video(clip, data_frame):
Example #30
0
import os
from torch.utils.data import Dataset
import librosa
import scipy.io.wavfile as scp
import torch
import math
import pickle
from utils.NoiseInjection import *
import skvideo
skvideo.setFFmpegPath('C:/Deep/Tools/ffmpeg/bin/')
import skvideo.io
import torchvision.transforms
import re

#Audio ground truth
AudioGt = {}
AudioGt["Speaker1"] = np.array(
    [[1.7, 2.7], [5, 6.9], [10.3, 14.1], [18.3, 20.0], [23.5, 25.2],
     [30.4, 32.6], [38.3, 41], [46.2, 50.1], [55.2, 57.3], [63.7, 65.2],
     [68.3, 69.2], [72.6, 74.7], [78.2, 81.4], [86, 87.5], [91.1, 93],
     [96.8, 99], [102.6, 103.6], [106.5, 108.3], [112.4, 115.9],
     [118.9, 119.9]],
    dtype=np.float32)
AudioGt["Speaker2"] = np.array(
    [[1.4, 2.4], [2.9, 5.5], [9.5, 11.4], [11.7, 13.4], [18.2, 24.6],
     [28, 28.8], [32, 35.7], [39.7, 44.9], [49.3, 50.3], [55.3, 57.1],
     [60.6, 65.2], [69.3, 72.8], [77.6, 84], [87.5, 88.8], [94, 99.4],
     [104.1, 107.7], [112.8, 116]],
    dtype=np.float32)
AudioGt["Speaker3"] = np.array(
    [[1.7, 7.2], [9.9, 19.7], [25.1, 29], [32.3, 39.5], [44.1, 48.2],