Ejemplo n.º 1
0
    def averageRGBChannel(self, colorChannel, reduceFrameBy=100):
        """
        Create a track based on the track parameters and the average red, green or blue color of the video.

        reduceFrameBy :  reduce every frame of the video, if the frame is 1000x1000
                            the resulting frame will be 10x10
        colorChannel :      0 = blue, 1 = green, 2 = red
        """

        # Init vars
        cap = self.videoCap(self.videoName)
        notes = []

        imagesCounter = 0
        imagesCounterTot = 0
        notesCounter = 0
        currentTrack = Track(None, None, None)

        if (cap.isOpened()):
            ret, frame = cap.read()
            imagesCounterTot += 1
            imagesCounter += 1

            averageChannelNorm = self.__averageRGBChoiceOneFrame(
                frame, colorChannel, reduceFrameBy) / 255

            notesNbPerBloc, noteDuration, totNbImgInClip, everyNImages, currentTrack = self.__computeTrack(
                currentTrack, averageChannelNorm)

        pbar = tqdm(total=totNbImgInClip)
        pbar.update(1)
        # For each frame in the video
        while (cap.isOpened()):
            ret, frame = cap.read()
            imagesCounterTot += 1
            imagesCounter += 1
            pbar.update(1)

            if frame is None:
                break
            else:
                if imagesCounter % everyNImages == 0:
                    note = int(
                        self.__averageRGBChoiceOneFrame(
                            frame, colorChannel, reduceFrameBy) / 4)

                    notesCounter += 1
                    notes.append(
                        currentTrack.createNoteVolTuple(note, self.volume))

                    # Test if one bloc can be done
                    if notesCounter % notesNbPerBloc == 0:
                        currentTrack.addBlocInfo(noteDuration, self.tempo)
                        currentTrack.addNotes(notes)
                        notes = []
                        notesCounter = 0
                        imagesCounter = 0

                        if (cap.isOpened()):
                            self.__resetTrackParams(self.num, self.instru,
                                                    self.blocDuration)
                            ret, frame = cap.read()
                            imagesCounter += 1

                            averageChannelNorm = self.__averageRGBChoiceOneFrame(
                                frame, colorChannel, reduceFrameBy) / 255

                            notesNbPerBloc, noteDuration, totNbImgInClip, everyNImages, currentTrack = self.__computeTrack(
                                currentTrack, averageChannelNorm)

        if len(notes) > 0:
            currentTrack.addBlocInfo(noteDuration, self.tempo)
            currentTrack.addNotes(notes)

        self.tracks.append(currentTrack)
        self.__resetTrackParams()
Ejemplo n.º 2
0
    def exemple(self, reduceFrameBy=100):
        """
        This method is an exemple

        reduceFrameBy :  reduce every frame of the video, if the frame is 1000x1000
                            the resulting frame will be 10x10
        """

        # Init vars
        cap = self.videoCap(self.videoName)
        notes = []

        imagesCounter = 0
        imagesCounterTot = 0
        notesCounter = 0
        currentTrack = Track(None, None, None)

        if (cap.isOpened()):
            ret, frame = cap.read()
            imagesCounterTot += 1
            imagesCounter += 1

            # TODO : Prepare and give a normed value with your algo, based on one frame
            normedValue = 0

            notesNbPerBloc, noteDuration, totNbImgInClip, everyNImages, currentTrack = self.__computeTrack(
                currentTrack, normedValue)

        pbar = tqdm(total=totNbImgInClip)
        pbar.update(1)
        # For each frame in the video
        while (cap.isOpened()):
            ret, frame = cap.read()
            imagesCounterTot += 1
            imagesCounter += 1
            pbar.update(1)

            if frame is None:
                break
            else:
                if imagesCounter % everyNImages == 0:
                    # TODO : Prepare and give a note, the value of the note should be an int and between [0 ; ~60-70]
                    note = 64

                    notesCounter += 1
                    notes.append(
                        currentTrack.createNoteVolTuple(note, self.volume))

                    # Test if one bloc can be done
                    if notesCounter % notesNbPerBloc == 0:
                        currentTrack.addBlocInfo(noteDuration, self.tempo)
                        currentTrack.addNotes(notes)
                        notes = []
                        notesCounter = 0
                        imagesCounter = 0

                        if (cap.isOpened()):
                            self.__resetTrackParams(self.num, self.instru,
                                                    self.blocDuration)
                            ret, frame = cap.read()
                            imagesCounter += 1

                            # TODO : Prepare and give a normed value with your algo, based on one frame - could be computed like the previous one
                            normedValue = 0

                            notesNbPerBloc, noteDuration, totNbImgInClip, everyNImages, currentTrack = self.__computeTrack(
                                currentTrack, normedValue)

        if len(notes) > 0:
            currentTrack.addBlocInfo(noteDuration, self.tempo)
            currentTrack.addNotes(notes)

        self.tracks.append(currentTrack)
        self.__resetTrackParams()
Ejemplo n.º 3
0
    def convolution(self,
                    factor,
                    reduceFrameBy=100,
                    kernel=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]):
        """
        This method is an exemple

        factor :        applied on the note, should be big if the video is generaly smooth,
                            should be low if the video is generaly edgy
        reduceFrameBy :  reduce every frame of the video, if the frame is 1000x1000
                            the resulting frame will be 10x10
        kernel :        2D array specifying the kernel to apply on every frames
        """

        # Init vars
        cap = self.videoCap(self.videoName)
        notes = []

        imagesCounter = 0
        imagesCounterTot = 0
        notesCounter = 0
        currentTrack = Track(None, None, None)

        if (cap.isOpened()):
            ret, frame = cap.read()
            imagesCounterTot += 1
            imagesCounter += 1

            # Implementation
            normedValue = self.__applyKernelToFrame(frame, reduceFrameBy,
                                                    kernel, factor) / 255

            if normedValue > 1:
                normedValue = 1
            if normedValue < 0:
                normedValue = 0

            notesNbPerBloc, noteDuration, totNbImgInClip, everyNImages, currentTrack = self.__computeTrack(
                currentTrack, normedValue)

        pbar = tqdm(total=totNbImgInClip)
        pbar.update(1)
        # For each frame in the video
        while (cap.isOpened()):
            ret, frame = cap.read()
            imagesCounterTot += 1
            imagesCounter += 1
            pbar.update(1)

            if frame is None:
                break
            else:
                if imagesCounter % everyNImages == 0:
                    # Implementation
                    note = int(
                        self.__applyKernelToFrame(frame, reduceFrameBy, kernel,
                                                  factor) / 4)

                    notesCounter += 1
                    notes.append(
                        currentTrack.createNoteVolTuple(note, self.volume))

                    # Test if one bloc can be done
                    if notesCounter % notesNbPerBloc == 0:
                        currentTrack.addBlocInfo(noteDuration, self.tempo)
                        currentTrack.addNotes(notes)
                        notes = []
                        notesCounter = 0
                        imagesCounter = 0

                        if (cap.isOpened()):
                            self.__resetTrackParams(self.num, self.instru,
                                                    self.blocDuration)
                            ret, frame = cap.read()
                            imagesCounter += 1

                            # Implementation
                            normedValue = self.__applyKernelToFrame(
                                frame, reduceFrameBy, kernel, factor) / 255

                            if normedValue > 1:
                                normedValue = 1
                            if normedValue < 0:
                                normedValue = 0

                            notesNbPerBloc, noteDuration, totNbImgInClip, everyNImages, currentTrack = self.__computeTrack(
                                currentTrack, normedValue)

        if len(notes) > 0:
            currentTrack.addBlocInfo(noteDuration, self.tempo)
            currentTrack.addNotes(notes)

        self.tracks.append(currentTrack)
        self.__resetTrackParams()
Ejemplo n.º 4
0
    def diffBetween2Images(self, factor, th=127, maxNote=64):
        """
        Create a track based on the track parameters and the differences between two consecutives frame of the video.

        factor :        applied on the note, should be big if the video is generaly quiet,
                            should be low if the video is generaly agitated
        th :            the threshold [0 ; 255]
        maxNote :       max value that can be returned for a note
        """

        # Init vars
        cap = self.videoCap(self.videoName)
        notes = []

        imagesCounter = 0
        imagesCounterTot = 0
        notesCounter = 0
        currentTrack = Track(None, None, None)

        previousNbOfDiff = -1

        if (cap.isOpened()):
            ret, frame = cap.read()

            if (cap.isOpened()):
                previousFrame = frame
                ret, frame = cap.read()

                previousNbOfDiff, note = self.__diffBetween2Images(
                    maxNote, factor, previousNbOfDiff, previousFrame, frame,
                    th)

                if (cap.isOpened()):
                    previousFrame = frame
                    ret, frame = cap.read()
                    imagesCounterTot += 3
                    imagesCounter += 3

                    previousNbOfDiff, note = self.__diffBetween2Images(
                        maxNote, factor, previousNbOfDiff, previousFrame,
                        frame, th)
                    normedValue = note / maxNote

                    notesNbPerBloc, noteDuration, totNbImgInClip, everyNImages, currentTrack = self.__computeTrack(
                        currentTrack, normedValue)

        pbar = tqdm(total=totNbImgInClip)
        pbar.update(3)
        # For each frame in the video
        while (cap.isOpened()):
            ret, frame = cap.read()
            imagesCounterTot += 1
            imagesCounter += 1
            pbar.update(1)

            if frame is None:
                break
            else:
                if imagesCounter % everyNImages == 0:
                    # Uncomment line bellow to create image
                    #self.__diffBetween2Images_display(previousFrame, frame, th, imagesCounterTot)
                    previousNbOfDiff, note = self.__diffBetween2Images(
                        maxNote, factor, previousNbOfDiff, previousFrame,
                        frame, th)

                    notesCounter += 1
                    notes.append(
                        currentTrack.createNoteVolTuple(note, self.volume))

                    # Test if one bloc can be done
                    if notesCounter % notesNbPerBloc == 0:
                        currentTrack.addBlocInfo(noteDuration, self.tempo)
                        currentTrack.addNotes(notes)
                        notes = []
                        notesCounter = 0
                        imagesCounter = 0

                        if (cap.isOpened()):
                            self.__resetTrackParams(self.num, self.instru,
                                                    self.blocDuration)
                            ret, frame = cap.read()
                            imagesCounterTot += 1
                            imagesCounter += 1
                            pbar.update(1)

                            previousNbOfDiff, note = self.__diffBetween2Images(
                                maxNote, factor, previousNbOfDiff,
                                previousFrame, frame, th)
                            normedValue = note / maxNote

                            notesNbPerBloc, noteDuration, totNbImgInClip, everyNImages, currentTrack = self.__computeTrack(
                                currentTrack, normedValue)

        if len(notes) > 0:
            currentTrack.addBlocInfo(noteDuration, self.tempo)
            currentTrack.addNotes(notes)

        self.tracks.append(currentTrack)
        self.__resetTrackParams()