Ejemplo n.º 1
0
 def initFilter(self):
     self.chain = FilterChain()
     self.chain._filters.append(
         Filter(FilterType.LShelving, self.LOW_EQ, 0, 1, enabled=True))
     # self.chain._filters.append(Filter(FilterType.HShelving, deffs[4], 0, 1, enabled = True))
     # self.chain._filters.append(Filter(FilterType.Peak, deffs[0], 0, 1, enabled = True))
     self.chain._filters.append(
         Filter(FilterType.Peak, self.HIGH_EQ, 0, 1, enabled=True))
     # self.chain._filters.append(Filter(FilterType.LPButter, deffs[3], 0, 1, enabled = True))
     # self.chain._filters.append(Filter(FilterType.HPButter, deffs[3], 0, 1, enabled = True))
     self.chain.reset()
Ejemplo n.º 2
0
def concat_videos(list, outdir=None, ffmpeg='ffmpeg', audio=True):
    dir = outdir if outdir else os.path.dirname(os.path.realpath(__file__))
    videos = _download_file_list(list, dir)
    if bool(videos) == False:
        return None
    
    # make the video files list
    file_name = os.path.normpath(os.path.join(dir, str(uuid.uuid4())))
    with open(file_name, 'w') as file:
        for video in videos:
            file.write("file '" + video + "'\n")

    # concatenate the videos
    output = os.path.normpath(os.path.join(dir, "video.mp4"))
    ff = FFmpeg(
        executable = ffmpeg,
        global_options = ["-y", "-f" ,"concat", "-safe", "0", "-protocol_whitelist", "file,http,https,tcp,tls"],
        inputs = {file_name: None},
        outputs = {output: "-c copy"}
	)
    #print ff.cmd
    out = ff.run()

    # if audio background is requested we will try to get duration of movie and matching audio file
    if audio == True:
        # collect data for concatenated movie total duration
        length = time.strptime(re.findall("(?<=time\\=)[0-9.:]+", out)[-1],"%H:%M:%S.%f")
        lenght_t = datetime.timedelta(hours=length.tm_hour,minutes=length.tm_min,seconds=length.tm_sec).total_seconds()
        inputs = OrderedDict([(output, None)])
        applied_filters = ["[0:v]null[video]"]
        audio_track = _get_audio(lenght_t, dir)
        # build the filter chain and execute it
        audioseq = FilterChain([
            ReplicateAudioFilter(repetitions = int(math.ceil(lenght_t / float(audio_track[1])))), 
            ConcatFilter(is_video = False, outputtag = "caf"),
            TrimAudioFilter(length = lenght_t),
            FadeOutAudioFilter(start = lenght_t-AUDIO_FADE_OUT_T, length = AUDIO_FADE_OUT_T, outstreamprefix="audio")
        ])
        applied_filters += audioseq.generate(["1:a"])[0]
        # add the audio track to the inputs collection
        inputs.update({audio_track[0]: None})

        # build the video
        output = os.path.normpath(os.path.join(dir, "videoa.mp4"))
        ff = FFmpeg(
            executable = ffmpeg,
            global_options = ["-y"],
            inputs = inputs,
            outputs = {output: "-filter_complex \"" + ";".join(applied_filters) + "\" -map \"[video]\" -map \"[audio]\""}
	    )
        #print ff.cmd
        ff.run()

    return output
Ejemplo n.º 3
0
def initFilter():
    global deffs, chain, fs
    chain = FilterChain()
    chain._filters.append(
        Filter(FilterType.LShelving, LOW_EQ, 0, 1, enabled=True))
    # chain._filters.append(Filter(FilterType.HShelving, deffs[4], 0, 1, enabled = True))
    # chain._filters.append(Filter(FilterType.Peak, deffs[0], 0, 1, enabled = True))
    chain._filters.append(Filter(FilterType.Peak, HIGH_EQ, 0, 1, enabled=True))
    # chain._filters.append(Filter(FilterType.LPButter, deffs[3], 0, 1, enabled = True))
    # chain._filters.append(Filter(FilterType.HPButter, deffs[3], 0, 1, enabled = True))
    chain.reset()
Ejemplo n.º 4
0
Archivo: gen.py Proyecto: ralic/splat-1
    def __init__(self, frag=None, filters=None):
        """The ``frag`` argument must be a :py:class:`splat.data.Fragment`
        instance.  If `None`, a default empty fragment will be automatically
        created (2 channels, 48kHz).

        A chain of ``filters`` can also be initialised here with a list of
        filter functions and internally create a
        :py:meth:`splat.filters.FilterChain` object.  This can be altered later
        via :py:attr:`splat.gen.Generator.filters`.
        """
        self.frag = frag
        self._filter_chain = FilterChain(filters)
        self._levels = 1.0
Ejemplo n.º 5
0
class ToneControl:
    def __init__(self):
        self.LOW_EQ_FREQ = 80.0
        self.HIGH_EQ_FREQ = 8000.0
        self.HIGH_EQ = (2 * self.HIGH_EQ_FREQ) / gv.SAMPLERATE
        self.LOW_EQ = (2 * self.LOW_EQ_FREQ) / gv.SAMPLERATE

        self.filterTypes = OrderedDict({
            FilterType.LPButter: 'Low Pass (Flat)',
            FilterType.LPBrickwall: 'Low Pass (Brickwall)',
            FilterType.HPButter: 'High Pass (Flat)',
            FilterType.HPBrickwall: 'High Pass (Brickwall)',
            FilterType.LShelving: 'Low Shelf',
            FilterType.HShelving: 'High Shelf',
            FilterType.Peak: 'Peak'
        })

        self.eps = 0.0000001
        self.TYPE = 1
        self.F = 2
        self.G = 3
        self.Q = 4
        self.deffs = [80, 1000, 3000, 5000, 15000]

        self.chain = None
        self.initFilter()

    def initFilter(self):
        self.chain = FilterChain()
        self.chain._filters.append(
            Filter(FilterType.LShelving, self.LOW_EQ, 0, 1, enabled=True))
        # self.chain._filters.append(Filter(FilterType.HShelving, deffs[4], 0, 1, enabled = True))
        # self.chain._filters.append(Filter(FilterType.Peak, deffs[0], 0, 1, enabled = True))
        self.chain._filters.append(
            Filter(FilterType.Peak, self.HIGH_EQ, 0, 1, enabled=True))
        # self.chain._filters.append(Filter(FilterType.LPButter, deffs[3], 0, 1, enabled = True))
        # self.chain._filters.append(Filter(FilterType.HPButter, deffs[3], 0, 1, enabled = True))
        self.chain.reset()

    def updateFilter(self, i, fc, g, Q):
        oldf = self.chain._filters[i]
        type = oldf._type
        # print oldf._type, oldf._fc, oldf._g, oldf._Q

        # fc_val = fc * 2 / fs
        # print fc_val, g, Q

        f = Filter(type, fc, g, Q)
        self.chain.updateFilt(i, f)
        # chain.changeFilt(i, type, fc, g, Q)
        self.chain.reset()
Ejemplo n.º 6
0
def concat_videos(list, outdir=None, ffmpeg='ffmpeg', audio=True):
    dir = outdir if outdir else os.path.dirname(os.path.realpath(__file__))
    videos = _download_file_list(list, dir)
    if bool(videos) == False:
        return None

    # make the video files list
    file_name = os.path.normpath(os.path.join(dir, str(uuid.uuid4())))
    with open(file_name, 'w') as file:
        for video in videos:
            file.write("file '" + video + "'\n")

    # concatenate the videos
    output = os.path.normpath(os.path.join(dir, "video.mp4"))
    ff = FFmpeg(executable=ffmpeg,
                global_options=[
                    "-y", "-f", "concat", "-safe", "0", "-protocol_whitelist",
                    "file,http,https,tcp,tls"
                ],
                inputs={file_name: None},
                outputs={output: "-c copy"})
    #print ff.cmd
    out = ff.run()

    # if audio background is requested we will try to get duration of movie and matching audio file
    if audio == True:
        # collect data for concatenated movie total duration
        length = time.strptime(
            re.findall("(?<=time\\=)[0-9.:]+", out)[-1], "%H:%M:%S.%f")
        lenght_t = datetime.timedelta(hours=length.tm_hour,
                                      minutes=length.tm_min,
                                      seconds=length.tm_sec).total_seconds()
        inputs = OrderedDict([(output, None)])
        applied_filters = ["[0:v]null[video]"]
        audio_track = _get_audio(lenght_t, dir)
        # build the filter chain and execute it
        audioseq = FilterChain([
            ReplicateAudioFilter(
                repetitions=int(math.ceil(lenght_t / float(audio_track[1])))),
            ConcatFilter(is_video=False, outputtag="caf"),
            TrimAudioFilter(length=lenght_t),
            FadeOutAudioFilter(start=lenght_t - AUDIO_FADE_OUT_T,
                               length=AUDIO_FADE_OUT_T,
                               outstreamprefix="audio")
        ])
        applied_filters += audioseq.generate(["1:a"])[0]
        # add the audio track to the inputs collection
        inputs.update({audio_track[0]: None})

        # build the video
        output = os.path.normpath(os.path.join(dir, "videoa.mp4"))
        ff = FFmpeg(executable=ffmpeg,
                    global_options=["-y"],
                    inputs=inputs,
                    outputs={
                        output:
                        "-filter_complex \"" + ";".join(applied_filters) +
                        "\" -map \"[video]\" -map \"[audio]\""
                    })
        #print ff.cmd
        ff.run()

    return output
Ejemplo n.º 7
0
def _make(images, scene_duration, dir, ffmpeg, width, height, audio, effect,
          transition, batch_mode):
    # exit if no images were found
    if bool(images) == False:
        return None

    scene_duration_f = scene_duration * FPS
    w = width / 2 * 2 if width != None else -2 if height != None else OUTPUT_VIDEO_WIDTH
    h = height / 2 * 2 if height != None else -2 if width != None else OUTPUT_VIDEO_HEIGHT

    # build the animation dictionary of filters and first slide handling flag
    animations = {
        "zoompan": (CombiningFilter([
            ZoompanEffectFilter(maxzoom=MAX_ZOOM, frames=scene_duration_f),
            ImageSlideFilter(duration=scene_duration, width=w, height=h)
        ],
                                    outstreamprefix="zpaf"), False),
        "fadeinout": (CombiningFilter([
            FadeTransitionFilter(transition_duration=TRANSITION_T,
                                 total_duration=scene_duration),
            ImageSlideFilter(duration=scene_duration, width=w, height=h)
        ],
                                      outstreamprefix="faf"), False),
        "zoompanfadeinout": (CombiningFilter([
            ZoompanEffectFilter(maxzoom=MAX_ZOOM, frames=scene_duration_f),
            FadeTransitionFilter(transition_duration=TRANSITION_T,
                                 total_duration=scene_duration),
            ImageSlideFilter(duration=scene_duration, width=w, height=h)
        ],
                                             outstreamprefix="zpfaf"), False),
        "slidein": (FilterChain([
            ImageSlideFilter(duration=scene_duration, width=w, height=h),
            SlideTransitionFilter(
                transition_duration=TRANSITION_T,
                preserve_first=batch_mode != BatchMode.non_initial_batch)
        ]), True),
        "zoompanslidein": (ZoompanSlideInTransitionFilter(
            transition_duration=TRANSITION_T,
            total_duration=scene_duration,
            fps=FPS,
            width=w,
            height=h,
            maxzoom=MAX_ZOOM,
            preserve_first=batch_mode != BatchMode.non_initial_batch), True)
    }
    animationkey = (effect if effect else "") + (transition
                                                 if transition else "")
    animation = animations[animationkey] if animationkey in animations else None

    # determines if transition is requested and how to interpret the inputs list
    preserve_first_slide = animation[1] if animation else False
    if batch_mode != BatchMode.non_initial_batch:
        slides = images
        lenght_t = scene_duration * len(slides)
    elif preserve_first_slide:
        slides = images
        lenght_t = scene_duration * (len(slides) - 1)
    else:
        slides = images[1:]
        lenght_t = scene_duration * len(slides)

    inputs = OrderedDict([(i, "-loop 1") for i in slides])

    # create the video filter chain
    videoseq = FilterChain()
    if animation:
        videoseq.append(animation[0])
    else:
        videoseq.append(
            ImageSlideFilter(duration=scene_duration, width=w, height=h))
    videoseq.append(ConcatFilter(True, "video"))
    applied_filters = videoseq.generate(
        ["%d:v" % i for (i, x) in enumerate(inputs)])[0]

    # load audio track if requested
    if audio == True:
        audio_track = _get_audio(lenght_t, dir)
        # build the filter chain and execute it
        audioseq = FilterChain([
            ReplicateAudioFilter(
                repetitions=int(math.ceil(lenght_t / float(audio_track[1])))),
            ConcatFilter(is_video=False, outputtag="caf"),
            TrimAudioFilter(length=lenght_t),
            FadeOutAudioFilter(start=lenght_t - AUDIO_FADE_OUT_T,
                               length=AUDIO_FADE_OUT_T,
                               outstreamprefix="audio")
        ])
        applied_filters += audioseq.generate(["%d:a" % len(inputs)])[0]
        # add the audio track to the inputs collection
        inputs.update({audio_track[0]: None})

    # build the video
    output = "video.mp4"
    output = dir + "/" + output if dir else output
    ff = FFmpeg(executable=ffmpeg,
                global_options=["-y"],
                inputs=inputs,
                outputs={
                    output:
                    "-filter_complex \"" + ";".join(applied_filters) +
                    "\" -map \"[video]\"" +
                    (" -map \"[audio]\"" if audio == True else "") +
                    " -c:v libx264 -pix_fmt yuvj420p -q:v 1"
                })
    #print ff.cmd
    ff.run()
    return output
Ejemplo n.º 8
0
Archivo: gen.py Proyecto: ralic/splat-1
 def filters(self, filters):
     self._filter_chain = FilterChain(filters)
Ejemplo n.º 9
0
    def __init__(self, *args):
        QWidget.__init__(self, *args)

        self.setFixedSize(1000,500)
        self.setWindowTitle('EQ')
        self.show()

        layout = QVBoxLayout(self)

        #--------- track controls -----------
        open_btn = QPushButton('Load file')
        open_btn.clicked.connect(self.onOpenBtnClick)
        self.path_label = QLabel('')
        self.loop_box = QCheckBox('Loop')
        play_btn = QPushButton('Play')
        play_btn.clicked.connect(self.onPlayBtnClick)
        stop_btn = QPushButton('Stop')
        stop_btn.clicked.connect(self.onStopBtnClick)
        save_btn = QPushButton('Apply EQ and save')
        save_btn.clicked.connect(self.onSaveBtnClick)

        trackctrl_layout = QHBoxLayout()
        trackctrl_layout.addWidget(open_btn)
        trackctrl_layout.addWidget(self.path_label)       
        trackctrl_layout.addWidget(play_btn)
        trackctrl_layout.addWidget(stop_btn)
        trackctrl_layout.addWidget(self.loop_box)
        trackctrl_layout.addSpacing(50)
        trackctrl_layout.addWidget(save_btn)        
        layout.addLayout(trackctrl_layout)

        #--------- plot ------------
        
        self.plotwin = PlotWin(self)
        layout.addWidget(self.plotwin)

        #--------- filter controls ----------
        sub_layout = QHBoxLayout()
        labels_layout = QVBoxLayout()
        labels_layout.addWidget(QLabel('Filter type'))
        labels_layout.addWidget(QLabel('Cutoff/Center'))
        labels_layout.addWidget(QLabel('Gain'))
        labels_layout.addWidget(QLabel('Q or Slope'))
        sub_layout.addLayout(labels_layout)

        self.nodes = []
        deffs = [100, 1000, 3000, 5000, 15000]
        for i in range(0,5):
            filter_list = QComboBox()
            filter_list.addItems(list(filterTypes.values()))
            if i == 0:
                filter_list.setCurrentIndex(FilterType.HPBrickwall)
            elif i == 4:
                filter_list.setCurrentIndex(FilterType.LPBrickwall)
            else:
                filter_list.setCurrentIndex(FilterType.Peak)

            checkbox = QCheckBox('On')
            freq_txt = QLineEdit(str(deffs[i]))
            freq_txt.setValidator(QIntValidator(self.plotwin.xaxis.min,
                                               self.plotwin.xaxis.max, self))
            gain_txt = QLineEdit('0')
            gain_txt.setValidator(QDoubleValidator(-12, 12, 1, self))
            q_slider = QSlider(Qt.Horizontal)
            node = NodeLayout(i, self)
            node.addControls(checkbox, filter_list, freq_txt, gain_txt, q_slider)
            node.setControlsEnabled(False)
            self.nodes.append(node)
            sub_layout.addLayout(node)
            
            node.enabled.connect(self.onFilterEnableChange)
            node.updated.connect(self.paramChanged)

        layout.addLayout(sub_layout)
        #------------------------------------
        self.setLayout(layout)

        #----------- Filters ----------------
        self.chain = FilterChain()
        deffs = [fc * 2 / fs for fc in deffs]
        self.chain._filters.append(Filter(FilterType.HPBrickwall, deffs[0], enabled = False))
        self.chain._filters.append(Filter(FilterType.Peak, deffs[1], enabled = False))
        self.chain._filters.append(Filter(FilterType.Peak, deffs[2], enabled = False))
        self.chain._filters.append(Filter(FilterType.Peak, deffs[3], enabled = False))
        self.chain._filters.append(Filter(FilterType.LPBrickwall, deffs[4], enabled = False))
        self.updateChainTF()
        self.plotwin.updateHandles()

        self.stream = None
        self.wf = None
Ejemplo n.º 10
0
class MainWindow(QWidget):
    def __init__(self, *args):
        QWidget.__init__(self, *args)

        self.setFixedSize(1000,500)
        self.setWindowTitle('EQ')
        self.show()

        layout = QVBoxLayout(self)

        #--------- track controls -----------
        open_btn = QPushButton('Load file')
        open_btn.clicked.connect(self.onOpenBtnClick)
        self.path_label = QLabel('')
        self.loop_box = QCheckBox('Loop')
        play_btn = QPushButton('Play')
        play_btn.clicked.connect(self.onPlayBtnClick)
        stop_btn = QPushButton('Stop')
        stop_btn.clicked.connect(self.onStopBtnClick)
        save_btn = QPushButton('Apply EQ and save')
        save_btn.clicked.connect(self.onSaveBtnClick)

        trackctrl_layout = QHBoxLayout()
        trackctrl_layout.addWidget(open_btn)
        trackctrl_layout.addWidget(self.path_label)       
        trackctrl_layout.addWidget(play_btn)
        trackctrl_layout.addWidget(stop_btn)
        trackctrl_layout.addWidget(self.loop_box)
        trackctrl_layout.addSpacing(50)
        trackctrl_layout.addWidget(save_btn)        
        layout.addLayout(trackctrl_layout)

        #--------- plot ------------
        
        self.plotwin = PlotWin(self)
        layout.addWidget(self.plotwin)

        #--------- filter controls ----------
        sub_layout = QHBoxLayout()
        labels_layout = QVBoxLayout()
        labels_layout.addWidget(QLabel('Filter type'))
        labels_layout.addWidget(QLabel('Cutoff/Center'))
        labels_layout.addWidget(QLabel('Gain'))
        labels_layout.addWidget(QLabel('Q or Slope'))
        sub_layout.addLayout(labels_layout)

        self.nodes = []
        deffs = [100, 1000, 3000, 5000, 15000]
        for i in range(0,5):
            filter_list = QComboBox()
            filter_list.addItems(list(filterTypes.values()))
            if i == 0:
                filter_list.setCurrentIndex(FilterType.HPBrickwall)
            elif i == 4:
                filter_list.setCurrentIndex(FilterType.LPBrickwall)
            else:
                filter_list.setCurrentIndex(FilterType.Peak)

            checkbox = QCheckBox('On')
            freq_txt = QLineEdit(str(deffs[i]))
            freq_txt.setValidator(QIntValidator(self.plotwin.xaxis.min,
                                               self.plotwin.xaxis.max, self))
            gain_txt = QLineEdit('0')
            gain_txt.setValidator(QDoubleValidator(-12, 12, 1, self))
            q_slider = QSlider(Qt.Horizontal)
            node = NodeLayout(i, self)
            node.addControls(checkbox, filter_list, freq_txt, gain_txt, q_slider)
            node.setControlsEnabled(False)
            self.nodes.append(node)
            sub_layout.addLayout(node)
            
            node.enabled.connect(self.onFilterEnableChange)
            node.updated.connect(self.paramChanged)

        layout.addLayout(sub_layout)
        #------------------------------------
        self.setLayout(layout)

        #----------- Filters ----------------
        self.chain = FilterChain()
        deffs = [fc * 2 / fs for fc in deffs]
        self.chain._filters.append(Filter(FilterType.HPBrickwall, deffs[0], enabled = False))
        self.chain._filters.append(Filter(FilterType.Peak, deffs[1], enabled = False))
        self.chain._filters.append(Filter(FilterType.Peak, deffs[2], enabled = False))
        self.chain._filters.append(Filter(FilterType.Peak, deffs[3], enabled = False))
        self.chain._filters.append(Filter(FilterType.LPBrickwall, deffs[4], enabled = False))
        self.updateChainTF()
        self.plotwin.updateHandles()

        self.stream = None
        self.wf = None

    @Slot()
    def onOpenBtnClick(self):
        dialog = QFileDialog(self)
        dialog.setFileMode(QFileDialog.ExistingFile)
        dialog.setNameFilter('Audio (*.wav)')
        if dialog.exec_():
            file_name = dialog.selectedFiles()[0]
            self.path_label.setText(file_name)
            self.wf = wave.open(file_name,'rb')
            self.openStream()

    @Slot()
    def onPlayBtnClick(self):
        if self.stream:
            if self.wf.tell() == self.wf.getnframes() * self.wf.getnchannels():
                self.wf.rewind()
                self.openStream()
            else:
                self.stream.start_stream()


    @Slot()
    def onStopBtnClick(self):
        if self.stream:
            self.stream.stop_stream()

    @Slot()
    def onSaveBtnClick(self):
        dialog = QFileDialog(self)
        dialog.setFileMode(QFileDialog.AnyFile)
        dialog.setNameFilter('Audio (*.wav)')
        if dialog.exec_():
            file_name = dialog.selectedFiles()[0] + '.wav'
            ww = wave.open(file_name,'wb')
            wf = self.wf
            ww.setframerate(wf.getframerate())
            ww.setsampwidth(wf.getsampwidth())
            ww.setnchannels(wf.getnchannels())
            self.wf.rewind()
            self.chain.reset()

            data = wf.readframes(wf.getnframes())
            s = self.chain.filter(pcmToFloat(byteToPCM(data,wf.getsampwidth())))
            ww.writeframes(bytes(floatToPCM(s)))

    @Slot()
    def onFilterEnableChange(self, i):        
        enabled = self.nodes[i].ctrls[0].isChecked()
        if enabled:
            ftype = self.nodes[i].ctrls[1].currentIndex()
            self.updateControls(i, ftype)
            self.adjustSliderRange(i, ftype)
            self.updateSliderLabel(i)
        else:
            self.nodes[i].setControlsEnabled(False)
        
        self.chain.setFiltEnabled(i, enabled)
        self.plotwin.updateHandles() 
        self.updateChainTF()

    @Slot()
    def paramChanged(self, i, param, val):
        self.updateFilter(i, param, val)       
        self.updateChainTF()
        self.plotwin.updateHandles() 

    @Slot()
    def focusChanged(self, old, new):
        if new is not None:
            for node in self.nodes:
                if node.indexOf(new) != -1:
                    self.plotwin.focused = node.index
                    self.plotwin.update()
           
    def updateControls(self, i, ftype):
        node = self.nodes[i]
        node.setControlsEnabled(True)
        if ftype == FilterType.LPBrickwall or ftype == FilterType.HPBrickwall:
            node.setControlEnabled(3,False)
            node.setControlEnabled(4,False)
        elif ftype == FilterType.LPButter or ftype == FilterType.HPButter:
            node.setControlEnabled(3,False)

        
    def updateFilter(self, i, param, val):
        oldf = self.chain._filters[i]
        type = oldf._type
        fc = oldf._fc
        g = oldf._g
        Q = oldf._Q

        if param == Params.TYPE:
            type = val
            Q = 1                      
        elif param == Params.F:
            fc = int(self.nodes[i].ctrls[2].text()) * 2 / fs
        elif param == Params.G:
            g = float(self.nodes[i].ctrls[3].text())
        elif param == Params.Q:
            if type == FilterType.LPButter or type == FilterType.HPButter:
                Q = val
            elif type == FilterType.Peak:
                Q = val / 10
            elif type == FilterType.LShelving or FilterType.HShelving:
                Q = val / 100

        self.chain.updateFilt(i, Filter(type, fc, g, Q))
        if param == Params.TYPE:            
            self.updateControls(i, type)
            self.adjustSliderRange(i, type) 

        self.updateSliderLabel(i)    

    def adjustSliderRange(self, index, type):
        slider = self.nodes[index].ctrls[4]
        if slider.isEnabled() == False:
            return 
        Q = self.chain._filters[index]._Q
        if type == FilterType.HPButter or type == FilterType.LPButter:
            slider.setRange(1, 3)
            slider.setValue(Q)
        elif type == FilterType.Peak:
            slider.setRange(1, 300)
            slider.setValue(Q * 10)
        elif type == FilterType.LShelving or type == FilterType.HShelving:
            slider.setRange(10, 100)
            slider.setValue(Q * 100)
    
    def updateSliderLabel(self, index):
        slider = self.nodes[index].ctrls[4]
        if slider.isEnabled() == False:
            return
        type = self.chain._filters[index]._type
        Q = self.chain._filters[index]._Q
        if type == FilterType.HPButter or type == FilterType.LPButter:
            text = str(2 ** Q * 6) + ' dB/oct'
        else:
            text = str(Q)

        self.nodes[index].slider_label.setText(text)
    
    def openStream(self):

        wf = self.wf
        frate = wf.getframerate()
        sampw = wf.getsampwidth()
        nchan = wf.getnchannels()
        def callback(in_data, frame_count, time_info, status):

            data = wf.readframes(frame_count)
            if type(data) == type(''):
                data = str.encode(data)            
            if len(data) < frame_count * sampw * nchan:
                if self.loop_box.isChecked():
                    wf.rewind()                    
                    data = b''.join([data,
                                     wf.readframes(frame_count - int(len(data) / (sampw * nchan)))])
                    self.chain.reset()
                elif len(data) == 0:
                    return data, pyaudio.paComplete
 
            filtered = self.chain.filter(pcmToFloat(byteToPCM(data,sampw)))
            self.plotwin.updateSpectrum(np.fft.rfft(filtered))                
                
            return bytes(floatToPCM(filtered)), pyaudio.paContinue
        
        chunk_size = np.int(frate / self.plotwin.refresh_rate)
        self.stream = pya.open(format = pya.get_format_from_width(wf.getsampwidth()),
                                    channels = wf.getnchannels(),
                                    rate = frate,
                                    frames_per_buffer = chunk_size,
                                    output = True,
                                    stream_callback = callback)

        self.chain.reset()

    def updateChainTF(self):
   
        w, H = sosfreqz(self.chain.sos(), self.plotwin.wor)
        self.plotwin.TFcurv.setData(w * 0.5 / np.pi * fs, 20 * np.log10(np.abs(H) + eps))
        self.plotwin.update()
Ejemplo n.º 11
0
def scrape(data):
    us_apps_filter = Filter(
        field='app_store_url',
        op=operator.contains,
        value='/us/'
    )
    filter_chain = FilterChain()
    filter_chain.add_filter(us_apps_filter)

    us_apps = filter_chain.filter(data)

    gathered_data = gather_data(us_apps)

    spanish_and_tagalog_filter = ListContainedinListFilter(
        field='languages',
        op=operator.contains,
        value=[u'Spanish', u'Tagalog']
    )

    filter_chain = FilterChain()

    filter_chain.add_filter(spanish_and_tagalog_filter)

    spanish_and_tagalog_data = filter_chain.filter(gathered_data)

    insta_in_name_filter = CaseInsensitiveStringFilter(
        field='name',
        op=operator.contains,
        value='insta'
    )

    filter_chain = FilterChain()

    filter_chain.add_filter(insta_in_name_filter)

    insta_in_name_data = filter_chain.filter(gathered_data)

    filtered_data = {
        'apps_in_spanish_and_tagalog': [_d.get('app_identifier') for _d in spanish_and_tagalog_data],
        'apps_with_insta_in_name': [_d.get('app_identifier') for _d in insta_in_name_data]
    }
    write_json_to_file(filtered_data, 'filtered_apps.json')
    write_json_to_file(gathered_data, 'apps.json')
Ejemplo n.º 12
-1
def _make(images, scene_duration, dir, ffmpeg, width, height, audio, effect, transition, batch_mode):
    # exit if no images were found
    if bool(images) == False:
        return None

    scene_duration_f = scene_duration * FPS
    w = width/2*2 if width != None else -2 if height != None else OUTPUT_VIDEO_WIDTH
    h = height/2*2 if height != None else -2 if width != None else OUTPUT_VIDEO_HEIGHT

    # build the animation dictionary of filters and first slide handling flag
    animations = {
        "zoompan": (
            CombiningFilter(
                [
                    ZoompanEffectFilter(maxzoom = MAX_ZOOM, frames = scene_duration_f),
                    ImageSlideFilter(duration = scene_duration, width = w, height = h)
                ],
                outstreamprefix = "zpaf"),
            False
        ),
        "fadeinout": (
            CombiningFilter([
                    FadeTransitionFilter(transition_duration = TRANSITION_T, total_duration = scene_duration),
                    ImageSlideFilter(duration = scene_duration, width = w, height = h)
                ],
                outstreamprefix = "faf"),
            False
        ),
        "zoompanfadeinout": (
            CombiningFilter(
                [
                    ZoompanEffectFilter(maxzoom = MAX_ZOOM, frames = scene_duration_f),
                    FadeTransitionFilter(transition_duration = TRANSITION_T, total_duration = scene_duration),
                    ImageSlideFilter(duration = scene_duration, width = w, height = h)
                ],
                outstreamprefix = "zpfaf"),
            False
        ),
        "slidein": (
            FilterChain(
                [
                    ImageSlideFilter(duration = scene_duration, width = w, height = h),
                    SlideTransitionFilter(transition_duration = TRANSITION_T, preserve_first = batch_mode != BatchMode.non_initial_batch)
                ]),
            True
        ),
        "zoompanslidein": (
            ZoompanSlideInTransitionFilter(transition_duration = TRANSITION_T, total_duration = scene_duration, fps = FPS, width = w, height = h, maxzoom = MAX_ZOOM, preserve_first = batch_mode != BatchMode.non_initial_batch),
            True
        )
    }
    animationkey = (effect if effect else "") + (transition if transition else "")
    animation = animations[animationkey] if animationkey in animations else None

    # determines if transition is requested and how to interpret the inputs list
    preserve_first_slide = animation[1] if animation else False
    if batch_mode != BatchMode.non_initial_batch:
        slides = images
        lenght_t = scene_duration * len(slides)
    elif preserve_first_slide:
        slides = images
        lenght_t = scene_duration * (len(slides) - 1)
    else:
        slides = images[1:]
        lenght_t = scene_duration * len(slides)
               
    inputs = OrderedDict([(i, "-loop 1") for i in slides])

    # create the video filter chain
    videoseq = FilterChain()
    if animation: 
        videoseq.append(animation[0])
    else:
        videoseq.append(ImageSlideFilter(duration = scene_duration, width = w, height = h))
    videoseq.append(ConcatFilter(True, "video"))
    applied_filters = videoseq.generate(["%d:v" % i for (i,x) in enumerate(inputs)])[0]
    
    # load audio track if requested
    if audio == True:
        audio_track = _get_audio(lenght_t, dir)
        # build the filter chain and execute it
        audioseq = FilterChain([
            ReplicateAudioFilter(repetitions = int(math.ceil(lenght_t / float(audio_track[1])))), 
            ConcatFilter(is_video = False, outputtag = "caf"),
            TrimAudioFilter(length = lenght_t),
            FadeOutAudioFilter(start = lenght_t-AUDIO_FADE_OUT_T, length = AUDIO_FADE_OUT_T, outstreamprefix="audio")
        ])
        applied_filters += audioseq.generate(["%d:a" % len(inputs)])[0]
        # add the audio track to the inputs collection
        inputs.update({audio_track[0]: None})

    # build the video
    output = "video.mp4"
    output = dir + "/" + output if dir else output
    ff = FFmpeg(
        executable = ffmpeg,
        global_options = ["-y"],
        inputs = inputs,
        outputs = {output: "-filter_complex \"" + ";".join(applied_filters) + "\" -map \"[video]\"" + (" -map \"[audio]\"" if audio == True else "") + " -c:v libx264 -pix_fmt yuvj420p -q:v 1"}
	)
    #print ff.cmd
    ff.run()
    return output