Ejemplo n.º 1
0
def figure1k1(info,path):
    if not os.path.exists(path):
        os.makedirs(path)
        
    sessions = info.groupby(level=['subject','session'])
    for (subject,session),sinfo in sessions:
        subjectpath = os.path.join(activitymovies.datafolder,subject)
        sact = activitytables.read_subjects(subjectpath,days=[session])
        scr = activitytables.read_subjects(subjectpath,days=[session],
                                           selector = activitytables.crossings)
        features = activitytables.stepfeature(sact,scr,4,3)
        leftwards = features.side == 'leftwards'
        features.xhead[leftwards] = max_width_cm - features.xhead[leftwards]
        median = np.sort(features.xhead)[len(features)/2+1]
        median = features[features.xhead == median].iloc[-1,:]
        
        vidpaths = activitymovies.getmoviepath(sinfo)
        timepaths = activitymovies.gettimepath(info)
        movie = [video.video(mpath,mtimepath)
                 for mpath,mtimepath in zip(vidpaths,timepaths)][0]
        medianframe = movie.frame(sact.index.get_loc(median.name))
        flip = median.side == 'rightwards'
        stepindex = 3 if flip else 4
        medianframe = activitytables.cropstep(medianframe,stepindex,flip=flip)
        
        fname = str.format("{0}_session_{1}_median_step_posture.png",
                           subject, session)
        fpath = os.path.join(path,subject)
        if not os.path.exists(fpath):
            os.makedirs(fpath)
        fpath = os.path.join(fpath,fname)
        cv2.imwrite(fpath,medianframe)
def savefirstpoke(path):
    subfolders = os.walk(path)
    name = os.path.split(path)[1]
    path = next(subfolders)[0]
    path = next(subfolders)[0]
    basepath = r'C:/Users/IntelligentSystem/Desktop'
    leftrewards = os.path.join(path,'left_rewards.csv')
    videopath = os.path.join(path,'top_video.avi')
    timepath = os.path.join(path,'top_video.csv')
    frontvideo = video.video(videopath,timepath)
    with open(leftrewards) as f:
        timestamp = f.readline()
        pokeframe = frontvideo.frameindex(timestamp)
        vidutils.savemovie(videopath,0,pokeframe-120,pokeframe+120,os.path.join(basepath,name + '.avi'))
def clusterroiframes(act, roiactivity, info, leftroi, rightroi, roicenter_cm, cropframes):
    # Compute step times
    roidiff = roiactivity.diff()
    roipeaks = activitytables.findpeaks(roidiff, 1500)
    pksloc = [[roidiff.index.get_loc(peak) for peak in roi] for roi in roipeaks]

    # Tile step frames
    vidpaths = activitymovies.getmoviepath(info)
    timepaths = activitymovies.gettimepath(info)
    backpaths = activitymovies.getbackgroundpath(info)
    videos = [video.video(path, timepath) for path, timepath in zip(vidpaths, timepaths)]

    def getroiframes(roiindex, flip=False):
        roicenterxcm = roicenter_cm[roiindex][1]
        headdistance = [act.xhead[p] - roicenterxcm for p in pksloc[roiindex]]
        print headdistance
        framehead = [
            p
            for i, p in enumerate(pksloc[roiindex])
            if (-25 < headdistance[i] < -5 if not flip else 5 < headdistance[i] < 25)
        ]

        frames = [cropframes(videos[0].frame(p), roiindex) for p in framehead]
        backgrounds = [
            cropframes(activitymovies.getbackground(backpaths[0], videos[0].timestamps[p]), roiindex) for p in framehead
        ]
        frames = [cv2.subtract(f, b) for f, b in zip(frames, backgrounds)]
        if flip:
            frames = [cv2.flip(f, 1) for f in frames]
        return frames, framehead

    leftframes, leftindices = getroiframes(leftroi, False)
    rightframes, rightindices = getroiframes(rightroi, True)
    print "==========================="
    frames = np.array(leftframes + rightframes)
    frameindices = np.array(leftindices + rightindices)
    sortindices = np.argsort(frameindices)
    frames = frames[sortindices]
    frameindices = frameindices[sortindices]

    Z, R, labels, h = imgproc.cluster(frames, videos[0], frameindices)
    return frames, roidiff, roipeaks, pksloc, Z, R, labels
def roiframes(activity,crossings,info,leftroi,rightroi,roiframeindices,croproi,
               cropsize=(300,300),subtractBackground=False):
    # Tile step frames    
    vidpaths = activitymovies.getmoviepath(info)
    timepaths = activitymovies.gettimepath(info)
    backpaths = activitymovies.getbackgroundpath(info)
    videos = [video.video(path,timepath) for path,timepath in zip(vidpaths,timepaths)]
    
    frames = []
    indices,side = roiframeindices(activity,crossings,leftroi,rightroi)
    for frameindex,side in zip(indices,side):
        leftwards = side == 'leftwards'
        roiindex = leftroi if leftwards else rightroi
        
        frame = videos[0].frame(frameindex)
        background = None
        if subtractBackground:
            timestamp = videos[0].timestamps[frameindex]
            background = activitymovies.getbackground(backpaths[0],timestamp)
        frame = croproi(frame,roiindex,cropsize,background,roiindex == rightroi)
        frames.append(frame)
    return frames
def clusterstepframes(cr,info,leftstep,rightstep):
    # Compute step times
    stepactivity = cr.iloc[:,16:24]
    stepdiff = stepactivity.diff()
    steppeaks = siphon.findpeaksMax(stepdiff,1500)
    pksloc = [[stepdiff.index.get_loc(peak) for peak in step] for step in steppeaks]    
    
    # Tile step frames
    vidpaths = activitymovies.getmoviepath(info)
    timepaths = activitymovies.gettimepath(info)
    backpaths = activitymovies.getbackgroundpath(info)
    videos = [video.video(path,timepath) for path,timepath in zip(vidpaths,timepaths)]

    def getstepframes(stepindex,flip=False):
        stepcenterxcm = stepcenters[stepindex][1] * preprocess.width_pixel_to_cm
        framehead = [p for p in pksloc[stepindex]
                     if (cr.xhead[p] < stepcenterxcm if not flip else cr.xhead[p] > stepcenterxcm)]
        
        frames = [imgproc.croprect(stepcenters[stepindex],(200,200),videos[0].frame(p))
                  for p in framehead]
        backgrounds = [imgproc.croprect(stepcenters[stepindex],(200,200),activitymovies.getbackground(backpaths[0],videos[0].timestamps[p]))
                       for p in framehead]
        frames = [cv2.subtract(f,b) for f,b in zip(frames,backgrounds)]
        if flip:
            frames = [cv2.flip(f,1) for f in frames]
        return frames,framehead

    leftframes,leftindices = getstepframes(leftstep,False)
    rightframes,rightindices = getstepframes(rightstep,True)
    frames = np.array(leftframes + rightframes)
    frameindices = np.array(leftindices + rightindices)
    sortindices = np.argsort(frameindices)
    frames = frames[sortindices]
    frameindices = frameindices[sortindices]
    
    R,labels,h = imgproc.cluster(frames,videos[0],frameindices)
    return frames,stepdiff,steppeaks,pksloc,R,labels
Ejemplo n.º 6
0
    def get_videos(self):
        """Docstring for get_videos.

        :returns: returns video frames in each sub folder of vot directory

        """

        logger = self.logger
        vot_folder = self.vot_folder
        sub_vot_dirs = self.find_subfolders(vot_folder)
        for vot_sub_dir in sub_vot_dirs:
            video_path = glob.glob(
                os.path.join(vot_folder, vot_sub_dir, '*.jpg'))
            objVid = video(video_path)
            list_of_frames = sorted(video_path)
            if not list_of_frames:
                # logger.error('vot folders should contain only .jpg images')
                print('ERROR: vot folders should contain only .jpg images')

            objVid.all_frames = list_of_frames
            bbox_gt_file = os.path.join(vot_folder, vot_sub_dir,
                                        'groundtruth.txt')
            with open(bbox_gt_file, 'r') as f:
                for i, line in enumerate(f):
                    co_ords = line.strip().split(',')
                    co_ords = [(float(co_ord)) for co_ord in co_ords]
                    ax, ay, bx, by, cx, cy, dx, dy = co_ords
                    x1 = min(ax, min(bx, min(cx, dx))) - 1
                    y1 = min(ay, min(by, min(cy, dy))) - 1
                    x2 = max(ax, max(bx, max(cx, dx))) - 1
                    y2 = max(ay, max(by, max(cy, dy))) - 1
                    bbox = BoundingBox(x1, y1, x2, y2)
                    bbox.frame_num = i
                    objVid.annotations.append(bbox)
            self.videos[vot_sub_dir] = [objVid.all_frames, objVid.annotations]
        return self.videos
Ejemplo n.º 7
0
from playlist import Plist
from video import video
import sys

while True:
    isVidOrPlay = input(
        'Do you need to download a playlist or Video: ').lower()

    if isVidOrPlay == 'pl':
        url = input('Enter the playlist URL: ')
        Plist(url)

    elif isVidOrPlay == 'vi':
        url = input('Enter the video URL: ')
        video(url)
    else:
        print('please enter video or playlist')
        continue
    repeat = input(
        'Do you need to download an other Video or Playlist [enter Y for yes or N for No]: '
    ).lower()

    x = 1
    while repeat != 'y' and repeat != 'n' and x <= 3:
        repeat = input("please enter 'Y' or 'N': ").lower()
        if x == 3:
            print('Sorry.. Invalide enter')
            sys.exit()
        x += 1
    if repeat == 'y':
        continue
Ejemplo n.º 8
0
#coding=utf-8
import feature,  video
import sys, os, logging
import numpy as np
from sqlite3 import dbapi2 as sqlite3

logging.basicConfig(level=logging.DEBUG,
    format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
    datefmt='%a, %d %b %Y %H:%M:%S',
    filename='./log/classify.log',
    filemode='w')

if len(sys.argv) > 3:
    name = sys.argv[1]
    out = sys.argv[2]
    fname = sys.argv[3]
else :
    exit();
v = video.video(name)
v.run(out)


pwd = os.getcwd()
rv = sqlite3.connect(pwd + '/tasklist.db')

rv.execute('update tasks set prograss = "1" where filename = "%s"' %fname)
rv.commit()
rv.close()

Ejemplo n.º 9
0
def escuchar(gui):

    session_user = servidor_descubrimiento.read_session_user()

    # Comprobacion de argumentos
    if (not session_user['ip']) or (not session_user['puerto']):
        return -1

    # Volvemos a copiar de servidor_descubrimiento
    my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

    address = (session_user['ip'], int(session_user['puerto']))
    my_socket.bind(address)

    # Supongo que solo escucharé una peticion
    my_socket.listen(1)

    conn, address = my_socket.accept()

    # El loop
    while (1):
        # Recogemos la peticion
        peticion = conn.recv(1024)  # Espero que baste
        """
            Posibilidades:
                -CALLING <usuario>:
                    + Rechazamos => CALL_DENIED <nuestro nickname>
                    + Aceptamos => CALL_ACCEPTED <nuestro nickname>
                      Le buscamos
                      empezamos a transmitir video.
                
                -CALL_BUSY:
                    + informamos y a otra cosa
                
                -CALL_END:
                    + informamos y a otra cosa
                
                -CALL_DENIED:
                    + informamos y a otra cosa
        """

        peticion_aux = peticion.decode(encoding='utf-8').split(" ")

        # Caso CALLING
        if peticion_aux[0] == "CALLING":

            # Estamos recibiendo una llamada, respndemos con si o no
            acepta = gui.app.yesNoBox(
                "LLAMADA", "Aceptar la llamada de " + peticion_aux[1] + " ?")
            # Caso no acepta
            if acepta == False:
                msg = "CALL_DENIED " + session_user['nickname']

                conn.sendall(msg.encode(encoding='utf-8'))
            # Caso acepta
            else:
                calling_user = servidor_descubrimiento.query(peticion_aux[1])

                msg = "CALL_ACCEPTED " + session_user['nickname'] + " " + str(
                    session_user['puerto_udp_escucha']
                )  # TODO str(5100) = puerto UDP escucha
                calling_user['puerto_udp_escucha'] = int(peticion_aux[2])
                servidor_descubrimiento.save_calling_user(calling_user)
                conn.sendall(msg.encode(encoding='utf-8'))

                # iniciar transmision de video
                my_video = video.video(gui)
                gui.cola_imagenes = my_video.cola_imagenes
                gui.video = my_video
                my_video.iniciar()

                return 1

        # Caso CALL_BUSY
        elif peticion_aux[0] == "CALL_BUSY":
            gui.app.infoBox("LLAMADA", "CALL_BUSY.")
            return 1

        # Caso CALL_END
        elif peticion_aux[0] == "CALL_END":
            gui.video.finalizar()
            gui.app.infoBox("LLAMADA", "CALL_END.")
            return 1

        # Caso CALL_DENIED
        elif peticion_aux[0] == "CALL_DENIED":
            gui.app.infoBox("LLAMADA", "CALL_DENIED.")
            return 1
Ejemplo n.º 10
0
import Util
from video import video
from inputFile import inputFile
from stego import stuff, extract

if __name__ == "__main__":
    video1 = video('./flame.avi')
    file1 = inputFile('./tes.txt')
    fileName = 'kudukudu'
    key = 'diar'
    isRandom = True

    stuffer = stuff(video1, file1, key, isRandom)
    stuffer.insertLength()
    stuffer.insertExtension()
    stuffer.insertFile()
    stuffer.write('tes.avi')

    video2 = video('./tes.avi')
    extractor = extract(video2, key, isRandom, fileName)
    print('length :', extractor.readBitLength(), "bit")
    print('extension :', extractor.readExtension())
    extractor.readBits()
Ejemplo n.º 11
0
	def showVideo(self):
		vid = video()
		vid.show_webcam()
		msg = Alert("Suspicious Activity has been Detected")
Ejemplo n.º 12
0
Archivo: v.py Proyecto: Lihhhan/tianji
#coding=utf-8
import video, sys
if len(sys.argv) > 1:
    print 'file %s'%sys.argv[1]
    i = video.video(sys.argv[1])
    while(True):
#        print i.snow_region( ) 
        i.Temporal_feature()

N_vn = len(videoName_chunks_list)

vn = 4 * 13 + 8  # video Number

videoName = videoName_chunks_list[vn]
fileName = videoName[:-4] + "-obj-" + str(obj_id) + "-BBox-continued.npz"
print("Feading File: ", fileName)

#videoName = '1439328827509_000000_AZ324hrsno5and8_1-30000.flv'
#fileName = '1439328827509_000000_AZ324hrsno5and8_1-30000-BBox-obj-0.npz'

in_res = [1200, 500]  # input video resolution
out_res = [32, 32]  # output boxed video resolution

v = video(path + videoName)
out = cv2.VideoWriter(
    path + videoName[:-4] + "-obj-" + str(obj_id) + '-Boxed.avi', -1, 25.0,
    (out_res[0], out_res[1]))
#fourcc = cv2.cv.CV_FOURCC(*'XVID')
#out = cv2.VideoWriter(path+videoName[:-4]+'-Boxed.avi',fourcc, 25.0, (out_res[0],out_res[1]))

fopen = np.load(path + fileName)

fn_list = fopen["fn_list"]
pt_0_0_list = fopen["pt_0_0_list"]  #top-left x
pt_0_1_list = fopen["pt_0_1_list"]  #top-left y
pt_1_0_list = fopen["pt_1_0_list"]  #bottom-right x
pt_1_1_list = fopen["pt_1_1_list"]  #bottom-right y

print("data have been read!")
Ejemplo n.º 14
0
	def __init__(self):
		self.HG=histogramas()
		self.Video=video()
Ejemplo n.º 15
0
from video import video
import os
import json
if (os.path.exists("conf.json") == False):
    print("conf.json不存在")
    os.system("pause")
f = open('conf.json', 'r', encoding='gb2312')
conf = json.load(f)
print(conf)
f.close()
username = ''
password = ''
course = ''
chapter = ''
driverpath = ''
username = conf['username']
password = conf['password']
course = conf['coursename']
chapter = conf['chapter']
driverpath = conf['chromedriver']
log = Loginer()
log.login(username, password)
log.findcourse(course)
if (log.courseurl is not ''):
    video = video()
    video.Input(log.courseurl, log.cookielist, driverpath)
    video.Read(chapter)
    print(
        "You have finished!Please thank coder wangzw and give him some money.")
os.system("pause")
Ejemplo n.º 16
0
 def video(self):
     v = video.video()
     v.videotest()
Ejemplo n.º 17
0
def sessionsummary(path):
    labelh5path = labelpath(path)
    activity = activitytables.read_activity(path)
    crossings = activitytables.read_crossings(path, activity)
    rewards = activitytables.read_rewards(path)
    #steptimes = activitytables.steptimes(activity)
    vidpath = os.path.join(path, 'front_video.avi')
    vid = video.video(vidpath)

    selected = []

    def onselect(ind):
        selector.ind[:] = ind
        selector.updateselection()
        clearhandles(selected)
        if len(ind) <= 0:
            return

        for s in crossings.slices[ind]:
            h = axs[0, 1].plot(activity.xhead[s], activity.yhead[s], 'r')
            selected.append(h)

    markers = []

    def updateplots():
        onselect([])
        clearcollection(markers)
        valid = crossings.label == 'valid'

        axs[0, 1].clear()
        trajectoryplot(activity, crossings[valid], axs[0, 1], alpha=0.2)

        axs[1, 2].clear()
        slowdownsummary(crossings[valid], axs[1, 2])

        axs[1, 1].clear()

        invalid = crossings.label == 'invalid'
        if invalid.any():
            rows = crossings[invalid]
            pts = axs[0, 2].scatter(rows.duration,
                                    rows.yhead_max,
                                    s=20,
                                    marker='x',
                                    facecolors='none',
                                    edgecolors='r')
            markers.append(pts)
        fig.canvas.draw_idle()

    def onkeypress(evt):
        label = None
        if evt.key == 'q':
            crossings.label.to_hdf(labelh5path, 'label')
        if evt.key == 'x':
            label = 'invalid'
        if evt.key == 'c':
            label = 'valid'
        if evt.key == 'z' and len(selector.ind) == 1:
            frameslice = crossings.iloc[selector.ind[0], :].slices
            video.showmovie(vid,
                            frameslice.start,
                            fps=frames_per_second,
                            frameend=frameslice.stop)
        if label != None:
            crossings.label[selector.ind] = label
            updateplots()

    fig, axs = plt.subplots(3, 3)
    fpshist(activity, axs[0, 0])
    selector = featuresummary(crossings, axs[0, 2], onselect)
    updateplots()
    rewardrate(rewards, axs[1, 0])
    fig.canvas.mpl_connect('key_press_event', onkeypress)

    plt.tight_layout()
    return activity, crossings, rewards, selector
Ejemplo n.º 18
0
 def __init__(self, frameVideo):
     video(frameVideo, '/alpheus_cam/bottom/image_raw', 1, 1, 'Raw Image')
     video(frameVideo, '/alpheus_cam/bottom/image_processed', 1, 2, 'Processed Image')
     hsvDynClient(frameVideo, 1, 3, 'HSV Range')
Ejemplo n.º 19
0
import numpy as np
import cv2
from video import video

if __name__ == '__main__':
    my_video = video()
    my_video.readVideo2()
Ejemplo n.º 20
0
#! /usr/local/bin/python
# This is the inference algorithm to determine if the attributes at the selected 
# points are up to par with the known target. I will pepper this code with comments
# for ease of understanding. 

import cv, cv2, numpy, math, edge, contrast, convert, random, MonteCarlo, array, video
feed = video.video() #NOTE: feed is in color & is unchanged video frame
lines = edge.main() #REMEMBER THIS VARIABLE!!!!
#print lines

def parallel(lines):
	good_lines = [line for line in lines if (line[2] - line[0]) != 0]	
	sort_slopes = sorted(good_lines, key = slope)
	return sort_slopes 		
		
def slope(line):
	return float(line[3] - line[1]) / float(line [2] - line[0])

#we are going to assume that q is our image

#DO NOT FORGET TO REMOVE ERROR CHECKING PARTS OF CODE!!!
k = []
q = contrast.main()
k = q#q is name of working contrast frame passed from module

slopearray = []
for line in parallel(lines[0]):
	orderedSlope = slope(line)
	slopearray.append(orderedSlope)

#below I find value from orderedSlope
# import original modules
import video as v

m = 1.0  # mass [kg]
k = 10.0  # spring constant [N/m]
g = 9.8  # gravitational accelaration[m/s^2]
c = 1.0

params = [m, k, g]  # parameters


def MassSpring(p, t):

    x, dx = p

    ddx = ((-k * x - c * dx) / m)

    return [dx, ddx]


# initial conditions(x0, dx0)
max_t = 10.0  # max_time [s]
dt = 0.01  # dt [s]

t = v.np.arange(0.0, max_t, dt)  # time seeies 0.0 to max_t (with dt intervals)
x0 = [0.5, 1.0]  # initial variables x0=0.5, x1=1.0
p = odeint(MassSpring, x0, t)  # ode calculation

v.video(p, dt, max_t, params)
Ejemplo n.º 22
0
        default="DJI_0074.MP4",
        help="Path to videos input, overwrite device input if used")
    ap.add_argument('-w',
                    '--num-workers',
                    dest='num_workers',
                    type=int,
                    default=1,
                    help='Number of workers.')
    ap.add_argument('-q-size',
                    '--queue-size',
                    dest='queue_size',
                    type=int,
                    default=5,
                    help='Size of the queue.')
    ap.add_argument('-l',
                    '--logger-debug',
                    dest='logger_debug',
                    type=int,
                    default=0,
                    help='Print logger debug')
    ap.add_argument('-f',
                    '--fullscreen',
                    dest='full_screen',
                    type=int,
                    default=0,
                    help='enable full screen')
    args = vars(ap.parse_args())

    video(args)
    print("progress over")
Ejemplo n.º 23
0
# import the necessary packages
from __future__ import print_function
from video import video
from imutils.video import VideoStream
import argparse
import time

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-o",
                "--output",
                required=True,
                help="path to output directory to store snapshots")
ap.add_argument("-p",
                "--picamera",
                type=int,
                default=-1,
                help="whether or not the Raspberry Pi camera should be used")
args = vars(ap.parse_args())

# initialize the video stream and allow the camera sensor to warmup
print("[INFO] warming up camera...")
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2.0)

# start the app
pba = video(vs, args["output"])
pba.root.mainloop()
Ejemplo n.º 24
0
 def __init__(self, frameVideo):
     video(frameVideo, '/image/compressed', 1, 1, 'Raw Image')
     #video(frameVideo, '/alpheus_cam/front/image_processed', 1, 2, 'Processed Image')
     video(frameVideo, '/front/gate', 1, 2, 'Processed Image')
     hsvDynClient(frameVideo, 1, 3, 'HSV Range')
            # Write groundtruth to txt
            groundtruth_log.write(' ' + str(center[0]) + ' ' + str(center[1]))
            # Update Gaussian map
            raw_Gaussian_map = np.dstack(
                (raw_Gaussian_map,
                 heatmap_object.generate_Gaussian_map(center[0], center[1],
                                                      config.variance_x,
                                                      config.variance_y)))
            # Plot a red circle on frame
            cv2.circle(img,
                       center,
                       radius=5,
                       color=(0, 0, 255),
                       thickness=2,
                       lineType=8,
                       shift=0)
            # Save plotted frames
            cv2.imwrite(config.dir_to_save_groundtruth + 'frame%s.jpg' % index,
                        img)
            # Convert Gaussian map to heatmap using 'maximum pixels'
            heatmap = np.amax(raw_Gaussian_map, axis=2)
            # Save as npz
            np.savez(config.dir_to_save_heatmap + 'heatmap%s' % index,
                     heatmap=heatmap)

    groundtruth_log.close()
    ''' Generate a groundtruth video '''
    video_object = video.video(config.dir_to_load_frames_for_video,
                               config.dir_write_video)
    video_object.to_video()
def sessionsummary(path):
    labelh5path = labelpath(path)
    activity = activitytables.read_activity(path)
    crossings = activitytables.read_crossings(path, activity)
    rewards = activitytables.read_rewards(path)
    # steptimes = activitytables.steptimes(activity)
    vidpath = os.path.join(path, "front_video.avi")
    vid = video.video(vidpath)

    selected = []

    def onselect(ind):
        selector.ind[:] = ind
        selector.updateselection()
        clearhandles(selected)
        if len(ind) <= 0:
            return

        for s in crossings.slices[ind]:
            h = axs[0, 1].plot(activity.xhead[s], activity.yhead[s], "r")
            selected.append(h)

    markers = []

    def updateplots():
        onselect([])
        clearcollection(markers)
        valid = crossings.label == "valid"

        axs[0, 1].clear()
        trajectoryplot(activity, crossings[valid], axs[0, 1], alpha=0.2)

        axs[1, 2].clear()
        slowdownsummary(crossings[valid], axs[1, 2])

        axs[1, 1].clear()

        invalid = crossings.label == "invalid"
        if invalid.any():
            rows = crossings[invalid]
            pts = axs[0, 2].scatter(rows.duration, rows.yhead_max, s=20, marker="x", facecolors="none", edgecolors="r")
            markers.append(pts)
        fig.canvas.draw_idle()

    def onkeypress(evt):
        label = None
        if evt.key == "q":
            crossings.label.to_hdf(labelh5path, "label")
        if evt.key == "x":
            label = "invalid"
        if evt.key == "c":
            label = "valid"
        if evt.key == "z" and len(selector.ind) == 1:
            frameslice = crossings.iloc[selector.ind[0], :].slices
            video.showmovie(vid, frameslice.start, fps=frames_per_second, frameend=frameslice.stop)
        if label != None:
            crossings.label[selector.ind] = label
            updateplots()

    fig, axs = plt.subplots(3, 3)
    fpshist(activity, axs[0, 0])
    selector = featuresummary(crossings, axs[0, 2], onselect)
    updateplots()
    rewardrate(rewards, axs[1, 0])
    fig.canvas.mpl_connect("key_press_event", onkeypress)

    plt.tight_layout()
    return activity, crossings, rewards, selector
Ejemplo n.º 27
0
"""
This file does the actual mosaicing of the two streams.

@author: Arnov
"""

import video
from mosaicBuilder import mosaic
from motion import motion
import imutils
import time
import cv2

leftVideo = video.video(src=0).start()  #The webcam attached to the laptop
rightVideo = video.video(src=1).start()  #The inbuilt camera of the laptop

panImage = mosaic()
motion = motion(area=500)

time.sleep(2.0)

while True:
    l = leftVideo.read()
    r = rightVideo.read()
    left = imutils.resize(l, width=400)
    right = imutils.resize(r, width=400)

    result = panImage.pack(left, right)

    if result is None:
        print("Something went wrong, no results :(")
Ejemplo n.º 28
0
Archivo: 1.py Proyecto: Lihhhan/tianji
import video

a = video.video('snow.avi')
b = video.video('snow.avi')
a = a.run()
b = b.Temporal_feature()

print a-b


Ejemplo n.º 29
0
    u = up(uid)
    videos = list(set(u.getvideos()))
    videos = ';'.join(videos)
    try:
        cursor = conn.cursor()
        sql = "insert into bvlist(UID,BVlist)values('%s','%s')" % (uid, videos)
        cursor.execute(sql)
        conn.commit()
        print('up视频列表爬取成功!!!!')
    except Exception as e:
        print(e)
    listvideo=list(set(u.getvideos()))

    # 爬取视频基本信息
    for av in listvideo:
        av1=video(av)
        bv = av1.putbv()
        videos = video(bv)
        # print(video.getbasic())
        basic = videos.getbasic()
        # print(basic.keys())
        # print(basic.values())
        # for i in basic.values():
        #     print(type(i))
        videolist = u.moreinfo
        last = videolist[int(av)]
        typeid_union = list(last.values())#获取除video以外的视频信息
        values = list(basic.values())
        print(values)
        values.extend(typeid_union)
        try: