Exemple #1
0
def identify(filename):
    info = AVI.identify(filename)
    if info <> None:
        video = Video()
        video.privatedata = info
        video.format = 'AVI (%s)' % info['format']
        video.size = info['video_size']
        video.frames = info['frames']
        video.plugin = 'AVI'
        video.moviefile = filename
        return video

    info = QuickTime.identify(filename)
    if info <> None:
        video = Video()
        video.privatedata = info
        video.format = 'QuickTime (%c%c%c%c)' % (
            ((info['format'] >> 24) & 255), ((info['format'] >> 16) & 255),
            ((info['format'] >> 8) & 255), ((info['format']) & 255))
        video.size = (info['width'], info['height'])
        video.frames = info['frames']
        video.plugin = 'QuickTime'
        video.moviefile = filename
        return video

    raise IOError("Format not recognized")
Exemple #2
0
    def __init__(self, parent=None):
        super(VideoWindow, self).__init__(parent)
        self.setWindowTitle("Video Annotator")

        self.video1 = Video(self)
        self.video2 = Video(self)

        self.playButton = QPushButton()
        self.playButton.setEnabled(False)
        self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
        self.playButton.clicked.connect(self.play)

        self.positionSlider = QSlider(Qt.Horizontal)
        self.positionSlider.setRange(0, 0)
        self.positionSlider.sliderMoved.connect(self.setPosition)

        self.errorLabel = QLabel()
        self.errorLabel.setSizePolicy(QSizePolicy.Preferred,
                                      QSizePolicy.Maximum)

        # Create new action
        openAction1 = QAction(QIcon('open.png'), '&Open1', self)
        openAction2 = QAction(QIcon('open.png'), '&Open2', self)

        openAction1.triggered.connect(self.video1.openFile)
        openAction2.triggered.connect(self.video2.openFile)

        # Create exit action
        exitAction = QAction(QIcon('exit.png'), '&Exit', self)
        exitAction.setShortcut('Ctrl+Q')
        exitAction.setStatusTip('Exit application')
        exitAction.triggered.connect(self.exitCall)

        # Create menu bar and add action
        menuBar = self.menuBar()
        fileMenu = menuBar.addMenu('&File')
        #fileMenu.addAction(newAction)
        fileMenu.addAction(openAction1)
        fileMenu.addAction(openAction2)
        fileMenu.addAction(exitAction)

        # Create a widget for window contents
        wid = QWidget(self)
        self.setCentralWidget(wid)

        # Create layouts to place inside widget
        controlLayout = QHBoxLayout()
        controlLayout.setContentsMargins(0, 0, 0, 0)
        controlLayout.addWidget(self.playButton)
        controlLayout.addWidget(self.positionSlider)

        layout = QVBoxLayout()
        layout.addWidget(self.video1.videoWidget)
        layout.addWidget(self.video2.videoWidget)
        layout.addLayout(controlLayout)
        layout.addWidget(self.errorLabel)

        # Set widget to contain window contents
        wid.setLayout(layout)
def get_url2(url_list, a_href_line):
    # String variable that holds the current URL
    url = ''
    # Boolean that states whether the first quote of the tag has been found. If false, then we have not found a quote, but if true then we have found the first quote
    first_quote_found = False
    # Variable that holds the position of the first letter after the first quote
    beginning = 0
    # Looping through current line to isolate the URL
    for i in range(0, len(a_href_line)):
        # If a double quote is found, determine whether its the first or second
        if a_href_line[i] == '"':
            # If its the first then set the appropriate variable, record where the first quote is
            if first_quote_found == False:
                first_quote_found = True
                beginning = i + 1
            # Otherwise slice the current line and record the URL part
            else:
                url = website_name + a_href_line[beginning:i]
                id = url.rpartition('/')[2].partition('?')[0]
                number = -1
                for j in id.split('-'):
                    if j.isdigit():
                        number = int(j)
                        break
                if number in download_list:
                    print('adding episode: ' + str(number))
                    video_list.insert(0, Video.Video(url, id, number))
                break
Exemple #4
0
    def run(self,vid):
        
        print("Processing: " + str(vid))
        if not os.path.exists(vid):
            raise "Video does not exist at specified path"        
            
        if self.args.threaded:

            #load tensorflow session and model if in threaded mode to keep thread safe
            if self.args.tensorflow:
                import tensorflow as tf
                print("Loading Tensorflow model")
                self.sess=tf.Session()
                tf.saved_model.loader.load(self.sess,[tf.saved_model.tag_constants.SERVING], self.args.path_to_model)
                print("Complete")
    
            else:
                sess=None
            
        else:
            sess=None
        
        #Create Video Class
        self.video_instance=Video.Video(vid,args=self.args,tensorflow_session=self.sess)
        
        #close tensorflow session to keep thread safe
        if self.args.threaded:
            if self.args.tensorflow:
                self.sess.close()
Exemple #5
0
    def __init__(self, *, w: int, h: int, file_name: str, scaled_w: int = 100, stretch: float = 1, gradient: typing.Union[int, str] = 0):
        self.w = w
        self.h = h

        self.aspect_ratio = self.w/self.h
        self.stretch = stretch

        self.sw = scaled_w
        self.sh = int(math.ceil(self.aspect_ratio*self.sw))

        self.sw = math.ceil(self.sw*self.stretch)

        self.file_name = file_name
        self.video = Video(w=self.w, h=self.h, file_name=self.file_name)
        self.video_input = self.video.video_input.video
        self.fps = self.video.fps

        self.process = None

        self.frames = None
        self.viewer = None

        if isinstance(gradient, int):
            if 0 <= gradient < len(gradients):
                self.grad = gradients[gradient]
            else:
                self.grad = gradients[0]
        else:
            self.grad = gradient
Exemple #6
0
def dodajT(app, url, value=None):

    if value == None:
        provjera = provjeriURL(url)
    else:
        provjera = value

    br = 0

    if provjera == 'video':
        video = Video(url)

        tekst = video.title

        app.lista.insert(app.lista.size(), tekst)
        app.inp.delete(0, "end")
        ##updateaj status
        updateStatus(app, tekst)
        ##dodaj u dictionary
        app.mainDict[tekst] = video
    elif provjera == 'playlist':
        lista = izradiListu(
            url
        )  ##parsiraj html i za svaki link pokreni proceduru za dodavanje
        for i in lista:
            dodajT(app, i, 'video')

    elif provjera == 'playlist_dorada':  ##doradi link i pokreni rekurzivno kao da se poslao originalni playlist link
        dodajT(app, "http://www.youtube.com/playlist?" + url.split('&')[1],
               'playlist')
    else:
        updateStatus(app, 'Uneseni URL nije validan!')
        app.inp.delete(0, "end")
 def count_frame(self):
     """This function only work after defined exactly filename"""
     if self.use_image:
         print ("Not implemented yet")
     else:
         for vid in self.name:
             video = Video(vid)
             num_fr, fps = video.get_frame_count()
             self.num_frames.append(num_fr)
Exemple #8
0
    def get_random_setup(self, num_apps, stream_fps):

        apps = []

        video = Video.Video(stream_fps)

        for i in range(num_apps):
            app = self.get_random_app()
            apps.append(app)

        return Setup(apps, video)
Exemple #9
0
    def get_random_setup(self, num_apps, stream_fps):

        budget = random.choice(self.budget_options)
        apps = []

        video = Video.Video(stream_fps)

        for i in range(num_apps):
            app = self.get_random_app()
            apps.append(app)

        return Setup(apps, budget, video)
Exemple #10
0
def main():
	"""The main program."""

	# Initialize Pygame

	pygame.init()

	# First we set some stuff up.

	prefs = Preferences.Preferences()
	prefs.loadPreferences()
	video = Video.Video(prefs)
	video.prepareVideo()

	carObject = Car.Car()
	groundObject = Map.Map()

	groundObject.loadMapData(test_map.t_d, test_map.t_w, test_map.t_h)
	groundObject.setupObjects()

	carObject.setXYPosition(8.0, 9.0)

	carDir = 0.0

	while 1:

		for event in pygame.event.get():
			if event.type == pygame.QUIT:
				sys.exit()

		carDir += 1.0
		if (carDir >= 360.0):
			carDir -= 360.0
		carObject.setAngle(carDir)

		video.prepareNewFrame()
		video.setupCamera()
		video.setupLights()

		# Drawing happens here

		glLoadIdentity()

#		drawReference(0.0, 0.0, 0.0, 1.0)

		groundObject.draw()
		carObject.draw()

		video.finishNewFrame()

		pygame.time.wait(33)
Exemple #11
0
def main():
    """The main program."""

    # Initialize Pygame

    pygame.init()

    # First we set some stuff up.

    prefs = Preferences.Preferences()
    prefs.loadPreferences()
    video = Video.Video(prefs)
    video.prepareVideo()

    carObject = Car.Car()
    groundObject = Ground.Ground()

    carDir = 0.0

    while 1:

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                sys.exit()

        carDir += 1.0
        if (carDir >= 360.0):
            carDir -= 360.0
        carObject.setAngle(carDir)

        video.prepareNewFrame()
        video.setupCamera()
        video.setupLights()

        # Drawing happens here

        glLoadIdentity()

        drawReference(0.0, 0.0, 0.0, 1.0)

        video.drawSimulationObject(groundObject)
        video.drawSimulationObject(carObject)

        video.finishNewFrame()

        pygame.time.wait(33)
def MotionMeerkat():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-gcs_uri',
        help='The Google Cloud Storage URI of the video.',
        default='gs://api-project-773889352370-ml/Videos/jantest.mp4')
    args = parser.parse_args()

    #create instance
    video_instance = Video.Video(args.gcs_uri)

    #send to google for labels
    video_instance.label()

    #download file to play locally
    video_instance.download()

    #show video
    video_instance.write()
Exemple #13
0
def run(path, keep, write, view, vidpath=""):

    #create instance
    video_instance = Video.Video(path=path,
                                 vidpath=vidpath,
                                 keep=keep,
                                 write=write,
                                 view=view)

    #send to google for labels
    video_instance.label()

    #download file to play locally
    video_instance.download()

    #show video with annotations
    video_instance.show()

    #cleanup video staging file
    video_instance.cleanup()
Exemple #14
0
    def parse(self,filename):
        inputfile = open(filename, 'r')
        firstline = inputfile.readline()
        param = firstline[:-1].split(' ')
        num_videos, num_endpoints, num_descriptions, num_caches, capacity = int(param[0]), int(param[1]), int(param[2]), int(param[3]), int(param[4])
        videoline = inputfile.readline()
        videos = videoline[:-1].split(' ')

        for size in videos:
            vid = Video(size)
            self.videoList.append(vid)

        for i in range(num_caches):
            cache = Cache(capacity)
            self.cacheList.append(cache)



        for endpoint in num_endpoints:
            ep_line = inputfile.readline()[:-1]
            ep_line = ep_line.split(' ')
            datacenter_latency = ep_line[0]
            cache_count = ep_line[1]
            ep = EndPoint(datacenter_latency)
            for cache in cache_count:
                cache_line = inputfile.readline()[:-1]
                cache_line = cache_line.split(' ')
                cache_id = cache_line[0]
                cache_latency = cache_line[1]
                ep.addCache(cache_id, cache_latency)

            self.endPointList.append(ep)


        for i in range(num_descriptions):
            request_line = inputfile.readline()[:-1]
            request_line = request_line.split(' ')
            id_vid, id_ep, num= int(request_line[0]), int(request_line[1]), int(request_line[2])
            req = Request(id_vid,id_ep,num)
            self.requestList.append(req)
    def letGoButton(self):

        SpeechDuration = self.durationInMinutes.get(
        ) * 60 + self.durationInSeconds.get()

        if SpeechDuration != "" and self.title.get(
        ) != "" and self.location != "":
            name = re.compile(r'^[a-zA-Z][a-zA-Z ]*$')
            if name.match(self.title.get()):
                self.root.destroy()
                from Files.datastore import datastore
                import Video
                datastore.name = self.enter
                datastore.title = self.title.get()
                Video.Video(self.location, SpeechDuration, self.title.get(),
                            self.name)
            else:
                messagebox.showinfo(
                    "Error Message",
                    "Please,You must Write Only English Letters")
        else:
            messagebox.showinfo("Error Message",
                                "Please,You must Enter Video Title")
Exemple #16
0
def Dancify(source_video, target,
            source_beats=None, target_beats=None,
            synch_video_beat=0, synch_audio_beat=0,
            beat_offset = 0, leadin = None, nbeats=None,
            unfold_to_n=None,
            source_harmonic = None, source_harmonic_offset=None,
            target_harmonic = None, target_harmonic_offset=None,
            force_recompute=None, warp_type = 'quad',
            name_tag=None, name_tag_prefix=None, output_path = None,
            momentum = 0.1,
            **kwargs):
    """

    :param source_video:
    :param target:
    :param source_beats:
    :param target_beats:
    :param synch_video_beat:
    :param synch_audio_beat:
    :param beat_offset:
    :param leadin:
    :param nbeats:
    :param unfold_to_n:
    :param source_harmonic:
    :param target_harmonic:
    :param source_harmonic_offset:
    :param target_harmonic_offset:
    :param force_recompute:
    :param warp_type:
    :param name_tag:
    :param name_tag_prefix:
    :param output_path:
    :param momentum:
    :param kwargs:
    :return:
    """


    if((output_path is not None) and (not force_recompute)):
        if(os.path.exists(output_path)):
            return Video(output_path);

    if(isinstance(target, Video)):
        target_audio = target.getAudio();
    else:
        target_audio = target;


    synchaudio = synch_audio_beat;
    synchvideo = synch_video_beat;
    lead_in = leadin;
    if(lead_in is None):
        lead_in = min(synchaudio, synchvideo);
    elif(isinstance(lead_in, str) and lead_in[0]=='<'):
        # lead_in = min(synchaudio, synchvideo, int(lead_in));
        lead_in = min(synchaudio, int(lead_in));

    start_audio_beat = synchaudio-lead_in;
    start_video_beat = synchvideo-lead_in;

    if(beat_offset and beat_offset>0):
        start_audio_beat = start_audio_beat+beat_offset;
        start_video_beat = start_video_beat+beat_offset;

    print("Warping {} to {}".format(source_video.getName(), target_audio.getName()));
    bitrate = None;
    vbeats = source_beats;
    if(source_beats is None):
        vbeats = source_video.getVisualBeats();


    tbeats = target_beats;
    if(target_beats is None):
        tbeats = target_audio.getBeatEvents();

    if(start_video_beat < 0):
        if(synchvideo == 0):
            vbeats = [vbeats[0].clone()]+vbeats;
            vbeats[0].start = vbeats[0].start-(vbeats[2].start-vbeats[1].start);
        vbadd = Event.SubdivideIntervals(vbeats[:2], -start_video_beat);
        vbeats = vbadd+vbeats[2:];
        start_video_beat = 0;


    vbeats = vbeats[start_video_beat:];
    tbeats = tbeats[start_audio_beat:];

    if (source_harmonic == 'half'):
        vbeats = Event.Half(vbeats, source_harmonic_offset);
    elif (source_harmonic == 'third'):
        vbeats = Event.Third(vbeats, source_harmonic_offset);
    elif (source_harmonic == 'double'):
        vbeats = Event.Double(vbeats);

    if (target_harmonic == 'half'):
        tbeats = Event.Half(tbeats, target_harmonic_offset);
    elif (target_harmonic == 'third'):
        tbeats = Event.Third(tbeats, target_harmonic_offset);
    elif (target_harmonic == 'double'):
        tbeats = Event.Double(tbeats);


    if(nbeats):
        print("Rendering {} beats of result".format(nbeats))
        if(len(vbeats)>nbeats):
            vbeats = vbeats[:nbeats];
            print(len(vbeats))


    if(unfold_to_n):
        vbeats = Event.UnfoldToN(vbeats, unfold_to_n, momentum=momentum);

    if (len(tbeats) > len(vbeats)):
        tbeats = tbeats[:len(vbeats)];

    if(warp_type is 'weight'):
        vbeats = source_video.visualBeatsFromEvents(vbeats);

    if(name_tag is None):
        name_tag = warp_type+'_sab_'+str(start_audio_beat)+'_svb_'+str(start_video_beat);
    if(name_tag_prefix is not None):
        name_tag = name_tag+name_tag_prefix;

    warp_args = dict(target=target_audio,
                     source_events=vbeats,
                     target_events = tbeats,
                     warp_type=warp_type,
                     force_recompute=force_recompute,
                     name_tag = name_tag)
    if(bitrate):
        warp_args.update(dict(bitrate=bitrate));

    warp_args.update(kwargs);
    warped_result = source_video.getWarped(**warp_args);

    if(output_path):
        final_output_path = output_path;
        if(os.path.isfile(final_output_path)):
            output_filename = os.path.basename(output_path);
            name_parts = os.path.splitext(output_filename);
            output_filename_base = name_parts[0];
            output_directory_path = os.path.dirname(output_path);
            if (output_directory_path == ''):
                output_directory_path = '.'
            output_ext = name_parts[1];
            ntry = 1;
            tryname = output_filename_base+ '_' + str(ntry);
            while (os.path.isfile(os.path.join(output_directory_path, tryname+output_ext)) and ntry<100):
                ntry = ntry+1;
                tryname = output_filename_base + '_' + str(ntry);

            final_output_path = os.path.join(output_directory_path, tryname + output_ext);
        shutil.copy2(src=warped_result.getPath(), dst=final_output_path);
        n_frames_total = warped_result.num_frames_total;
        warp_used = warped_result.getInfo('warp_used');
        warped_result_final = Video(path = final_output_path, num_frames_total=n_frames_total);
        warped_result_final.setInfo(label='warp_used', value=warp_used);
        os.remove(warped_result.getPath())
        warped_result = warped_result_final;
    return warped_result;



    def getVBSegments(self,source_video,
                      source_beats = None,
                      search_tempo=None,
                      search_window=None,
                      max_height=None,
                      beat_limit=None,
                      n_return=None,
                      unary_weight=None,
                      binary_weight=None,
                      break_on_cuts=None,
                      peak_vars=None):

        source = source_video;
        if(source_beats is None):
            if (peak_vars is not None):
                vbeats = source.getFeature('simple_visual_beats', **peak_vars);
            else:
                vbeats = source.getFeature('simple_visual_beats');
        else:
            vbeats = source_beats;

        #

        if (search_tempo is not None):
            tempo = search_tempo;
            beat_time = np.true_divide(60.0, tempo);
            clips = VisualBeat.PullOptimalPaths_Basic(vbeats, target_period=beat_time, unary_weight=unary_weight,
                                                      binary_weight=binary_weight, break_on_cuts=break_on_cuts,
                                                      window_size=search_window);
        else:
            clips = VisualBeat.PullOptimalPaths_Autocor(vbeats, unary_weight=unary_weight, binary_weight=binary_weight,
                                                        break_on_cuts=break_on_cuts, window_size=search_window);

        if (beat_limit is None):
            beat_limit = 2;

        print("There were {} candidates".format(len(vbeats)));
        nclips = 0;
        segments = [];

        for S in clips:
            if (len(S) > beat_limit):
                nclips = nclips + 1;
                segments.append(S);

        if (n_return is not None):
            segments.sort(key=len, reverse=True);
            segments = segments[:n_return];
        return segments;
Exemple #17
0
def main():
    """The main program."""

    print ""

    # Initialize Pygame

    print "Starting Pygame..."

    pygame.init()

    # First we set some stuff up.

    print "Initializing stuff..."

    prefs = Preferences.Preferences()
    prefs.loadPreferences()
    video = Video.Video(prefs)
    video.prepareVideo()

    simulationObject = Simulation.Simulation()

    carObject = Car.Car()
    groundObject = Map.Map()

    print "Unpickleing dictionaries..."

    nodeFile = open("Objects/Map/node_list.dat", "r")
    distFile = open("Objects/Map/node_distances.dat", "r")

    nodeDict = pickle.load(nodeFile)
    distDict = pickle.load(distFile)

    nodeFile.close()
    distFile.close()

    print "initializing more stuff..."

    groundObject.loadMapData(test_map.t_d, test_map.t_w, test_map.t_h,
                             nodeDict, distDict)
    groundObject.setupObjects()

    simulationObject.setMap(groundObject)
    simulationObject.addObject(carObject)

    print "Preparing stuff for the car..."

    carObject.setXYPosition(2.25, 0.25)
    carObject.setCurrentNode("0200TL")
    carObject.setVelocity(2.0)
    carObject.getNextTarget()

    # Now we do things

    print "Preparing lists for drawing paths..."

    (points, lines) = VideoRoutines.drawPathsPrep(nodeDict, distDict)

    print "Starting main loop..."

    while 1:

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                sys.exit()

        simulationObject.update(33.0 / 1000.0)

        video.prepareNewFrame()
        video.setupCamera()
        video.setupLights()

        # Drawing happens here

        glLoadIdentity()

        #		groundObject.draw()
        #		carObject.draw()

        simulationObject.draw()

        glLoadIdentity()

        #		VideoRoutines.drawPaths(points, lines, 20.0, 1.0, 3.0)
        VideoRoutines.drawSpecialPath(carObject.getPath(), nodeDict, 20.0, 1.0,
                                      3.0)

        video.finishNewFrame()

        pygame.time.wait(33)

    print "Exiting..."
    print ""
Exemple #18
0
from Video import *
from cv2 import *

if __name__ == '__main__':
    vide = Video("direccion del video a modificar")
    im = cv2.imread("direccion de imagen para cargar", cv2.IMREAD_COLOR)
    #recibe segundo de inicio, segundo de terminacion, posicion en fila, posicion en columna y la imagen
    #cabe resaltar que se le pueden meter tantas imagenes se quieran para modificar el video
    vide.anadirModificacion(4, 8, 60, 60, im)
    vide.anadirModificacion(15, 20, 120, 120, im)
    #recibe direccion para cargar la cancion
    vide.setMusica("mrdm.mp3")
    #guarda el video modificado
    vide.guardarVideoModificado("./plytchingon.mp4")
Exemple #19
0
def main():
    """The main program."""

    # Initialize Pygame

    print "Starting Pygame..."

    pygame.init()

    # First we set some stuff up.

    print "Initializing stuff..."

    prefs = Preferences.Preferences()
    prefs.loadPreferences()
    video = Video.Video(prefs)
    video.prepareVideo()

    carObject = Car.Car()
    groundObject = Map.Map()

    groundObject.loadMapData(test_map.t_d, test_map.t_w, test_map.t_h)
    groundObject.setupObjects()

    carObject.setXYPosition(8.0, 9.0)

    carDir = 0.0

    print "Unpickleing dictionaries..."

    nodeFile = open("Objects/Map/node_list.dat", "r")
    distFile = open("Objects/Map/node_distances.dat", "r")

    nodeDict = pickle.load(nodeFile)
    distDict = pickle.load(distFile)

    nodeFile.close()
    distFile.close()

    # Now we do things

    print "Preparing lists for drawing paths..."

    (points, lines) = VideoRoutines.drawPathsPrep(nodeDict, distDict)

    print "Starting main loop..."

    while 1:

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                sys.exit()

        carDir += 1.0
        if (carDir >= 360.0):
            carDir -= 360.0
        carObject.setAngle(carDir)

        video.prepareNewFrame()
        video.setupCamera()
        video.setupLights()

        # Drawing happens here

        glLoadIdentity()

        groundObject.draw()
        carObject.draw()

        glLoadIdentity()

        VideoRoutines.drawPaths(points, lines, 20.0, 1.0, 3.0)

        video.finishNewFrame()

        pygame.time.wait(33)
Exemple #20
0
 def record_and_send(self):
     self.video = Video(self.vidPath, self.length)
     self.video.taking_video()
     self.email(self.receiver, self.video.completePath, self.video.vidList)
Exemple #21
0
def Dancefer(source_video, target,
             synch_video_beat=0, synch_audio_beat=0,
             beat_offset = 0, leadin = None, nbeats=None,
             source_harmonic = None, target_harmonic = None, source_harmonic_offset=None, target_harmonic_offset=None,
             force_recompute=None, warp_type = 'quad',
             name_tag=None, name_tag_prefix=None, output_path = None,
             **kwargs):
    """

    :param source_video: video to warp
    :param target: music to warp to
    :param synch_video_beat: integer specifying a beat (as in the nth beat) from the video to synchronize with synch_audio_beat
    :param synch_audio_beat: integer specifying a beat (as in the nth beat) from the video to synchronize with synch_video_beat
    :param beat_offset: Lets you offset which beats you want to render. This is mostly for testing different parts of an output.
    :param leadin: how many beats before the synch beats to render
    :param nbeats: lets you restrict output to rendering n beats
    :param source_harmonic: can be None, 'half', or 'double'. 'half' will use every other beat, which you can offset with source_harmonic_offset. 'double' will add an additional beat between every consecutive beat. update - added 'third' for waltzes.
    :param target_harmonic: can be None, 'half', or 'double'. 'half' will use every other beat, which you can offset with source_harmonic_offset. 'double' will add an additional beat between every consecutive beat. update - added 'third' for waltzes.
    :param source_harmonic_offset: optional offset for harmonic
    :param target_harmonic_offset: optional offset for harmonic
    :param force_recompute:
    :param warp_type:
    :param name_tag:
    :param name_tag_prefix:
    :param output_path:
    :param kwargs:
    :return:
    """


    if((output_path is not None) and (not force_recompute)):
        if(os.path.exists(output_path)):
            return Video(output_path);

    if(isinstance(target, Video)):
        target_audio = target.getAudio();
    else:
        target_audio = target;


    synchaudio = synch_audio_beat;
    synchvideo = synch_video_beat;
    lead_in = leadin;
    if(lead_in is None):
        lead_in = min(synchaudio, synchvideo);
    elif(isinstance(lead_in, str) and lead_in[0]=='<'):
        # lead_in = min(synchaudio, synchvideo, int(lead_in));
        lead_in = min(synchaudio, int(lead_in));

    start_audio_beat = synchaudio-lead_in;
    start_video_beat = synchvideo-lead_in;

    if(beat_offset and beat_offset>0):
        start_audio_beat = start_audio_beat+beat_offset;
        start_video_beat = start_video_beat+beat_offset;

    print("Warping {} to {}".format(source_video.getName(), target_audio.getName()));
    bitrate = None;
    vbeats = source_video.audio.getBeatEvents()
    tbeats = target_audio.getBeatEvents()

    if(start_video_beat < 0):
        if(synchvideo == 0):
            vbeats = [vbeats[0].clone()]+vbeats;
            vbeats[0].start = vbeats[0].start-(vbeats[2].start-vbeats[1].start);
        vbadd = Event.SubdivideIntervals(vbeats[:2], -start_video_beat);
        vbeats = vbadd+vbeats[2:];
        start_video_beat = 0;


    vbeats = vbeats[start_video_beat:];
    tbeats = tbeats[start_audio_beat:];

    if(source_harmonic=='half'):
        vbeats = Event.Half(vbeats, source_harmonic_offset);
    elif (source_harmonic == 'third'):
        vbeats = Event.Third(vbeats, source_harmonic_offset);
    elif(source_harmonic == 'double'):
        vbeats = Event.Double(vbeats);

    if (target_harmonic == 'half'):
        tbeats = Event.Half(tbeats, target_harmonic_offset);
    elif (target_harmonic == 'third'):
        tbeats = Event.Third(tbeats, target_harmonic_offset);
    elif (target_harmonic == 'double'):
        tbeats = Event.Double(tbeats);


    if(nbeats):
        print("Rendering {} beats of result".format(nbeats))
        if(len(vbeats)>nbeats):
            vbeats = vbeats[:nbeats];
            print(len(vbeats))
        if(len(tbeats)>nbeats):
            tbeats = tbeats[:nbeats];
            print(len(tbeats))
    else:
        if(vbeats[-1].start<source_video.getDuration()):
            print(tbeats)
            print("length of tbeats is: {}".format(len(tbeats)));
            print("start_video_beat: {}, start_audio_beat: {}".format(start_video_beat, start_audio_beat))
            newbeat = vbeats[-1].clone();
            deltatime = source_video.getDuration()-newbeat.start;
            newbeat.start = source_video.getDuration();
            target_newbeat = tbeats[-1].clone();
            target_newbeat.start = min(target_newbeat.start+deltatime, target_audio.getDuration());
            tbeats.append(target_newbeat);

    if(warp_type is 'weight'):
        vbeats = source_video.visualBeatsFromEvents(vbeats);

    if(name_tag is None):
        name_tag = warp_type+'_sab_'+str(start_audio_beat)+'_svb_'+str(start_video_beat);
    if(name_tag_prefix is not None):
        name_tag = name_tag+name_tag_prefix;

    warp_args = dict(target=target_audio,
                     source_events=vbeats,
                     target_events = tbeats,
                     warp_type=warp_type,
                     force_recompute=force_recompute,
                     name_tag = name_tag)
    if(bitrate):
        warp_args.update(dict(bitrate=bitrate));

    warp_args.update(kwargs);
    warped_result = source_video.getWarped(**warp_args);

    if(output_path):
        final_output_path = output_path;
        if(os.path.isfile(final_output_path)):
            output_filename = os.path.basename(output_path);
            name_parts = os.path.splitext(output_filename);
            output_filename_base = name_parts[0];
            output_directory_path = os.path.dirname(output_path);
            if (output_directory_path == ''):
                output_directory_path = '.'
            output_ext = name_parts[1];
            ntry = 1;
            tryname = output_filename_base+ '_' + str(ntry);
            while (os.path.isfile(os.path.join(output_directory_path, tryname+output_ext)) and ntry<100):
                ntry = ntry+1;
                tryname = output_filename_base + '_' + str(ntry);

            final_output_path = os.path.join(output_directory_path, tryname + output_ext);
        shutil.copy2(src=warped_result.getPath(), dst=final_output_path);
        n_frames_total = warped_result.num_frames_total;
        warp_used = warped_result.getInfo('warp_used');
        warped_result_final = Video(path = final_output_path, num_frames_total=n_frames_total);
        warped_result_final.setInfo(label='warp_used', value=warp_used);
        os.remove(warped_result.getPath())
        warped_result = warped_result_final;
    return warped_result;
Exemple #22
0
 def run(self):
     for vid in self.queue:
         video_instance = Video.Video(vid, self.args)
         video_instance.analyze()
         video_instance.clip()
         video_instance.write()
Exemple #23
0
def Dancify(source_video,
            target,
            source_beats=None,
            target_beats=None,
            synch_video_beat=0,
            synch_audio_beat=0,
            beat_offset=0,
            leadin=None,
            nbeats=None,
            unfold_to_n=None,
            source_harmonic=None,
            source_harmonic_offset=None,
            target_harmonic=None,
            target_harmonic_offset=None,
            force_recompute=None,
            warp_type="quad",
            name_tag=None,
            name_tag_prefix=None,
            output_path=None,
            momentum=0.1,
            **kwargs):
    """

    :param source_video:
    :param target:
    :param source_beats:
    :param target_beats:
    :param synch_video_beat:
    :param synch_audio_beat:
    :param beat_offset:
    :param leadin:
    :param nbeats:
    :param unfold_to_n:
    :param source_harmonic:
    :param target_harmonic:
    :param source_harmonic_offset:
    :param target_harmonic_offset:
    :param force_recompute:
    :param warp_type:
    :param name_tag:
    :param name_tag_prefix:
    :param output_path:
    :param momentum:
    :param kwargs:
    :return:
    """

    if (output_path is not None) and (not force_recompute):
        if os.path.exists(output_path):
            return Video(output_path)

    if isinstance(target, Video):
        target_audio = target.getAudio()
    else:
        target_audio = target

    synchaudio = synch_audio_beat
    synchvideo = synch_video_beat
    lead_in = leadin
    if lead_in is None:
        lead_in = min(synchaudio, synchvideo)
    elif isinstance(lead_in, str) and lead_in[0] == "<":
        # lead_in = min(synchaudio, synchvideo, int(lead_in))
        lead_in = min(synchaudio, int(lead_in))

    start_audio_beat = synchaudio - lead_in
    start_video_beat = synchvideo - lead_in

    if beat_offset and beat_offset > 0:
        start_audio_beat = start_audio_beat + beat_offset
        start_video_beat = start_video_beat + beat_offset

    print("Warping {} to {}".format(source_video.getName(),
                                    target_audio.getName()))
    bitrate = None
    vbeats = source_beats
    if source_beats is None:
        vbeats = source_video.getVisualBeats()

    tbeats = target_beats
    if target_beats is None:
        tbeats = target_audio.getBeatEvents()

    if start_video_beat < 0:
        if synchvideo == 0:
            vbeats = [vbeats[0].clone()] + vbeats
            vbeats[0].start = vbeats[0].start - (vbeats[2].start -
                                                 vbeats[1].start)
        vbadd = Event.SubdivideIntervals(vbeats[:2], -start_video_beat)
        vbeats = vbadd + vbeats[2:]
        start_video_beat = 0

    vbeats = vbeats[start_video_beat:]
    tbeats = tbeats[start_audio_beat:]

    if source_harmonic == "half":
        vbeats = Event.Half(vbeats, source_harmonic_offset)
    elif source_harmonic == "third":
        vbeats = Event.Third(vbeats, source_harmonic_offset)
    elif source_harmonic == "double":
        vbeats = Event.Double(vbeats)

    if target_harmonic == "half":
        tbeats = Event.Half(tbeats, target_harmonic_offset)
    elif target_harmonic == "third":
        tbeats = Event.Third(tbeats, target_harmonic_offset)
    elif target_harmonic == "double":
        tbeats = Event.Double(tbeats)

    if nbeats:
        print("Rendering {} beats of result".format(nbeats))
        if len(vbeats) > nbeats:
            vbeats = vbeats[:nbeats]
            print(len(vbeats))

    if unfold_to_n:
        vbeats = Event.UnfoldToN(vbeats, unfold_to_n, momentum=momentum)

    if len(tbeats) > len(vbeats):
        tbeats = tbeats[:len(vbeats)]

    if warp_type is "weight":
        vbeats = source_video.visualBeatsFromEvents(vbeats)

    if name_tag is None:
        name_tag = (warp_type + "_sab_" + str(start_audio_beat) + "_svb_" +
                    str(start_video_beat))
    if name_tag_prefix is not None:
        name_tag = name_tag + name_tag_prefix

    warp_args = dict(
        target=target_audio,
        source_events=vbeats,
        target_events=tbeats,
        warp_type=warp_type,
        force_recompute=force_recompute,
        name_tag=name_tag,
    )
    if bitrate:
        warp_args.update(dict(bitrate=bitrate))

    warp_args.update(kwargs)
    warped_result = source_video.getWarped(**warp_args)

    if output_path:
        final_output_path = output_path
        if os.path.isfile(final_output_path):
            output_filename = os.path.basename(output_path)
            name_parts = os.path.splitext(output_filename)
            output_filename_base = name_parts[0]
            output_directory_path = os.path.dirname(output_path)
            if output_directory_path == "":
                output_directory_path = "."
            output_ext = name_parts[1]
            ntry = 1
            tryname = output_filename_base + "_" + str(ntry)
            while (os.path.isfile(
                    os.path.join(output_directory_path, tryname + output_ext))
                   and ntry < 100):
                ntry = ntry + 1
                tryname = output_filename_base + "_" + str(ntry)

            final_output_path = os.path.join(output_directory_path,
                                             tryname + output_ext)
        shutil.copy2(src=warped_result.getPath(), dst=final_output_path)
        n_frames_total = warped_result.num_frames_total
        warp_used = warped_result.getInfo("warp_used")
        warped_result_final = Video(path=final_output_path,
                                    num_frames_total=n_frames_total)
        warped_result_final.setInfo(label="warp_used", value=warp_used)
        os.remove(warped_result.getPath())
        warped_result = warped_result_final
    return warped_result

    def getVBSegments(
        self,
        source_video,
        source_beats=None,
        search_tempo=None,
        search_window=None,
        max_height=None,
        beat_limit=None,
        n_return=None,
        unary_weight=None,
        binary_weight=None,
        break_on_cuts=None,
        peak_vars=None,
    ):

        source = source_video
        if source_beats is None:
            if peak_vars is not None:
                vbeats = source.getFeature("simple_visual_beats", **peak_vars)
            else:
                vbeats = source.getFeature("simple_visual_beats")
        else:
            vbeats = source_beats

        #

        if search_tempo is not None:
            tempo = search_tempo
            beat_time = np.true_divide(60.0, tempo)
            clips = VisualBeat.PullOptimalPaths_Basic(
                vbeats,
                target_period=beat_time,
                unary_weight=unary_weight,
                binary_weight=binary_weight,
                break_on_cuts=break_on_cuts,
                window_size=search_window,
            )
        else:
            clips = VisualBeat.PullOptimalPaths_Autocor(
                vbeats,
                unary_weight=unary_weight,
                binary_weight=binary_weight,
                break_on_cuts=break_on_cuts,
                window_size=search_window,
            )

        if beat_limit is None:
            beat_limit = 2

        print("There were {} candidates".format(len(vbeats)))
        nclips = 0
        segments = []

        for S in clips:
            if len(S) > beat_limit:
                nclips = nclips + 1
                segments.append(S)

        if n_return is not None:
            segments.sort(key=len, reverse=True)
            segments = segments[:n_return]
        return segments
Exemple #24
0
def Dancefer(source_video,
             target,
             synch_video_beat=0,
             synch_audio_beat=0,
             beat_offset=0,
             leadin=None,
             nbeats=None,
             source_harmonic=None,
             target_harmonic=None,
             source_harmonic_offset=None,
             target_harmonic_offset=None,
             force_recompute=None,
             warp_type="quad",
             name_tag=None,
             name_tag_prefix=None,
             output_path=None,
             **kwargs):
    """

    :param source_video: video to warp
    :param target: music to warp to
    :param synch_video_beat: integer specifying a beat (as in the nth beat) from the video to synchronize with synch_audio_beat
    :param synch_audio_beat: integer specifying a beat (as in the nth beat) from the video to synchronize with synch_video_beat
    :param beat_offset: Lets you offset which beats you want to render. This is mostly for testing different parts of an output.
    :param leadin: how many beats before the synch beats to render
    :param nbeats: lets you restrict output to rendering n beats
    :param source_harmonic: can be None, 'half', or 'double'. 'half' will use every other beat, which you can offset with source_harmonic_offset. 'double' will add an additional beat between every consecutive beat. update - added 'third' for waltzes.
    :param target_harmonic: can be None, 'half', or 'double'. 'half' will use every other beat, which you can offset with source_harmonic_offset. 'double' will add an additional beat between every consecutive beat. update - added 'third' for waltzes.
    :param source_harmonic_offset: optional offset for harmonic
    :param target_harmonic_offset: optional offset for harmonic
    :param force_recompute:
    :param warp_type:
    :param name_tag:
    :param name_tag_prefix:
    :param output_path:
    :param kwargs:
    :return:
    """

    if (output_path is not None) and (not force_recompute):
        if os.path.exists(output_path):
            return Video(output_path)

    if isinstance(target, Video):
        target_audio = target.getAudio()
    else:
        target_audio = target

    synchaudio = synch_audio_beat
    synchvideo = synch_video_beat
    lead_in = leadin
    if lead_in is None:
        lead_in = min(synchaudio, synchvideo)
    elif isinstance(lead_in, str) and lead_in[0] == "<":
        # lead_in = min(synchaudio, synchvideo, int(lead_in))
        lead_in = min(synchaudio, int(lead_in))

    start_audio_beat = synchaudio - lead_in
    start_video_beat = synchvideo - lead_in

    if beat_offset and beat_offset > 0:
        start_audio_beat = start_audio_beat + beat_offset
        start_video_beat = start_video_beat + beat_offset

    print("Warping {} to {}".format(source_video.getName(),
                                    target_audio.getName()))
    bitrate = None
    vbeats = source_video.audio.getBeatEvents()
    tbeats = target_audio.getBeatEvents()

    if start_video_beat < 0:
        if synchvideo == 0:
            vbeats = [vbeats[0].clone()] + vbeats
            vbeats[0].start = vbeats[0].start - (vbeats[2].start -
                                                 vbeats[1].start)
        vbadd = Event.SubdivideIntervals(vbeats[:2], -start_video_beat)
        vbeats = vbadd + vbeats[2:]
        start_video_beat = 0

    vbeats = vbeats[start_video_beat:]
    tbeats = tbeats[start_audio_beat:]

    if source_harmonic == "half":
        vbeats = Event.Half(vbeats, source_harmonic_offset)
    elif source_harmonic == "third":
        vbeats = Event.Third(vbeats, source_harmonic_offset)
    elif source_harmonic == "double":
        vbeats = Event.Double(vbeats)

    if target_harmonic == "half":
        tbeats = Event.Half(tbeats, target_harmonic_offset)
    elif target_harmonic == "third":
        tbeats = Event.Third(tbeats, target_harmonic_offset)
    elif target_harmonic == "double":
        tbeats = Event.Double(tbeats)

    if nbeats:
        print("Rendering {} beats of result".format(nbeats))
        if len(vbeats) > nbeats:
            vbeats = vbeats[:nbeats]
            print(len(vbeats))
        if len(tbeats) > nbeats:
            tbeats = tbeats[:nbeats]
            print(len(tbeats))
    else:
        if vbeats[-1].start < source_video.getDuration():
            print(tbeats)
            print("length of tbeats is: {}".format(len(tbeats)))
            print("start_video_beat: {}, start_audio_beat: {}".format(
                start_video_beat, start_audio_beat))
            newbeat = vbeats[-1].clone()
            deltatime = source_video.getDuration() - newbeat.start
            newbeat.start = source_video.getDuration()
            target_newbeat = tbeats[-1].clone()
            target_newbeat.start = min(target_newbeat.start + deltatime,
                                       target_audio.getDuration())
            tbeats.append(target_newbeat)

    if warp_type is "weight":
        vbeats = source_video.visualBeatsFromEvents(vbeats)

    if name_tag is None:
        name_tag = (warp_type + "_sab_" + str(start_audio_beat) + "_svb_" +
                    str(start_video_beat))
    if name_tag_prefix is not None:
        name_tag = name_tag + name_tag_prefix

    warp_args = dict(
        target=target_audio,
        source_events=vbeats,
        target_events=tbeats,
        warp_type=warp_type,
        force_recompute=force_recompute,
        name_tag=name_tag,
    )
    if bitrate:
        warp_args.update(dict(bitrate=bitrate))

    warp_args.update(kwargs)
    warped_result = source_video.getWarped(**warp_args)

    if output_path:
        final_output_path = output_path
        if os.path.isfile(final_output_path):
            output_filename = os.path.basename(output_path)
            name_parts = os.path.splitext(output_filename)
            output_filename_base = name_parts[0]
            output_directory_path = os.path.dirname(output_path)
            if output_directory_path == "":
                output_directory_path = "."
            output_ext = name_parts[1]
            ntry = 1
            tryname = output_filename_base + "_" + str(ntry)
            while (os.path.isfile(
                    os.path.join(output_directory_path, tryname + output_ext))
                   and ntry < 100):
                ntry = ntry + 1
                tryname = output_filename_base + "_" + str(ntry)

            final_output_path = os.path.join(output_directory_path,
                                             tryname + output_ext)
        shutil.copy2(src=warped_result.getPath(), dst=final_output_path)
        n_frames_total = warped_result.num_frames_total
        warp_used = warped_result.getInfo("warp_used")
        warped_result_final = Video(path=final_output_path,
                                    num_frames_total=n_frames_total)
        warped_result_final.setInfo(label="warp_used", value=warp_used)
        os.remove(warped_result.getPath())
        warped_result = warped_result_final
    return warped_result
Exemple #25
0
    def __init__(self, window, window_title, video_source=0):
        '''Initializer da classe
		'''
        self.window = window
        self.window.title(window_title)

        BUTTON_FONT_STYLE = tkFont.Font(self.window,
                                        family="Helvetica",
                                        size=10)
        BUTTON_PADX = (3, 3)
        BUTTON_PADY = (5, 20)

        self.timer = 1
        self.already_started = False
        self._elapsedtime = self._start = 0.0
        self.folder_to_compare = ["--"]

        self.vid = Video(video_source)

        self.vid.add_to_lower('red', (166, 84, 141))
        self.vid.add_to_upper('red', (186, 255, 255))

        self.vid.add_to_lower('green', (25, 189, 118))
        self.vid.add_to_upper('green', (95, 255, 255))

        self.take_number = tk.Label(self.window,
                                    text="Tomada número 1",
                                    font=tkFont.Font(family="Helvetica",
                                                     size=15))
        self.take_number.pack(anchor=tk.CENTER)
        self.counter = tk.Label(self.window,
                                text="Timer: 00:00:00",
                                font=BUTTON_FONT_STYLE)
        self.counter.pack(anchor=tk.CENTER, padx=BUTTON_PADX, pady=BUTTON_PADY)

        self.canvas = tk.Canvas(window,
                                width=self.vid.width,
                                height=self.vid.height)
        self.canvas.pack(anchor=tk.CENTER)

        take_snapshot = tk.Button(self.window,
                                  text="Iniciar",
                                  command=self.start_monitoring,
                                  font=BUTTON_FONT_STYLE)
        take_snapshot.pack(side=tk.LEFT, padx=BUTTON_PADX, pady=BUTTON_PADY)

        reset = tk.Button(self.window,
                          text="Reset",
                          command=self.reset_monitoring,
                          font=BUTTON_FONT_STYLE)
        reset.pack(side=tk.LEFT, padx=BUTTON_PADX, pady=BUTTON_PADY)

        show_takes = tk.Button(self.window,
                               text="Comparar",
                               command=self.show_takes,
                               font=BUTTON_FONT_STYLE)
        show_takes.pack(side=tk.LEFT, padx=BUTTON_PADX, pady=BUTTON_PADY)

        self.path_to_compare = tk.StringVar(self.window)

        self.drop_menu = tk.OptionMenu(self.window, self.path_to_compare,
                                       *self.folder_to_compare)
        self.drop_menu.config(width=15)
        self.drop_menu.pack(side=tk.LEFT, padx=BUTTON_PADX, pady=BUTTON_PADY)

        self.update_list()
        self.update()
        self.window.mainloop()
Exemple #26
0
import numpy as np

from Image import *
from Webcam import *
from Video import *

from config import *

ex = Image(ASSETS_PATH_IMAGE, "Output")
ex.show()

ex = Video(ASSETS_PATH_VIDEO, "Output")
ex.show()

ex = Webcam(
    "Output", {
        "width": WIDTH_WEBCAM_VALUE,
        "height": HEIGHT_WEBCAM_VALUE,
        "brightness": BRIGHTNESS_WEBCAM_VALUE
    })
ex.show()
'''
img = cv2.imread("assets/pug.jpg")
kernel = np.ones((5, 5), np.uint8)

imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (7, 7), 0)
imgCanny = cv2.Canny(img, 150, 200)
imgDialation = cv2.dilate(imgCanny, kernel, iterations=1)
imgEroded = cv2.erode(imgDialation, kernel, iterations=1)
Exemple #27
0
D = 4
M = 3
buffer_size = 5
delta = 5
gamma = 2
t_0 = delta / 10
wait_time = t_0
bandwidth_error = 0.10
sizes = np.array([i for i in range(M)]) * 10
v_coeff = 1
values = np.array([i for i in range(M)]) * 10

min_band = 0.1
max_band = 10

video = Video(N, delta, D, values, sizes, buffer_size)
bandwidth = Bandwidth(min_band, max_band, delta=t_0 / 1000, error_rate=bandwidth_error)
headMovements = HeadMoves(N, D)

# bola3d = Bola3d(video, gamma, v_coeff)
# ddp = DDP(video,buffer_size, bandwidth, gamma, t_0)
# ddp_online = DDPOnline(video,buffer_size, bandwidth, gamma, t_0)






print("Calculating optimal offline")
# ddp.train(headMovements.get_all_probs())
# optimal_offline  = ddp.get_optimal_reward()