Пример #1
0
def main():
    filename = sys.argv[1]
    model = sys.argv[2]

    sound = AudioSegment.from_file(filename)
    lenght_in_miliseconds = len(sound)
    sound = sound.set_frame_rate(16000)
    sound = sound.apply_gain(-18 - sound.dBFS)

    # Trying to normalize to 85 db
    # Note: When REPLAYGAIN_REFERENCE_LOUDNESS is not provided the *_GAIN tags are interpreted relative to an assumed target of -18 dBFS.
    max_db = 20 * math.log(sound.apply_gain(-18 - sound.dBFS).max, 10)

    print("The file was normalized so now it has {} db as peak value".format(
        round(max_db), 2))

    silence = AudioSegment.silent(duration=5000)
    sound = silence + sound + silence
    sound.export(filename, format="wav")

    netA_path = "{}trained_model/{}/generator_ab.npz".format(ROOT_PATH, model)
    netB_path = "{}trained_model/{}/generator_ba.npz".format(ROOT_PATH, model)

    convert(netA_path, netB_path, filename)

    sound = AudioSegment.from_file(filename)

    sound = sound[len(silence):-len(silence)]

    padding_audio = AudioSegment.silent(duration=lenght_in_miliseconds -
                                        len(sound))
    sound = sound + padding_audio

    sound.export(filename, format="wav")
Пример #2
0
 def test_success(self):
     """
     successful scenario
     """
     import convertor, StringIO
     io = StringIO.StringIO()
     args = ["convertor.py", "sample.py", "--reports=n",  "--output-format=parseable"]
     expected_result = "%s/sample.py:6:0: [C] More than one statement on a single line\n" % PROJECT_FOLDER
     convertor.convert(args, io)
     result = io.getvalue()
     self.assertEqual(expected_result, result)
Пример #3
0
 def test_error(self):
     import convertor, StringIO
     help_text = "This is help test"
     convertor.HELP_TEXT = help_text
     original_parse_module_name = convertor.parse_module_name
     convertor.parse_module_name = MainTest.parse_module_name_mock
     io = StringIO.StringIO()
     convertor.convert([], io)
     result = io.getvalue()
     convertor.parse_module_name = original_parse_module_name
     expected_result = "Error: exception\n" + help_text
     self.assertEqual(expected_result, result)
Пример #4
0
 def test_error(self):
     import convertor, StringIO
     help_text = "This is help test"
     convertor.HELP_TEXT = help_text
     original_parse_module_name = convertor.parse_module_name
     convertor.parse_module_name = MainTest.parse_module_name_mock
     io = StringIO.StringIO()
     convertor.convert([], io)
     result = io.getvalue()
     convertor.parse_module_name = original_parse_module_name
     expected_result = "Error: exception\n"+help_text
     self.assertEqual(expected_result, result)
Пример #5
0
    def test_success(self):
        """
        successful scenario
        """
        import convertor, StringIO

        io = StringIO.StringIO()
        args = ["convertor.py", "sample.py", "--reports=n"]
        expected_result = """************* Module pylint_pycharm.sample
%s/sample.py:6:6: [C0321(multiple-statements), ] More than one statement on a single line
""" % PROJECT_FOLDER
        convertor.convert(args, io)
        result = io.getvalue()
        self.assertEqual(expected_result, result)
Пример #6
0
 def test_success(self):
     """
     successful scenario
     """
     import convertor, StringIO
     io = StringIO.StringIO()
     args = [
         "convertor.py", "sample.py", "--reports=n",
         "--output-format=parseable"
     ]
     expected_result = "%s/sample.py:6:0: [C] More than one statement on a single line\n" % PROJECT_FOLDER
     convertor.convert(args, io)
     result = io.getvalue()
     self.assertEqual(expected_result, result)
Пример #7
0
def main():
    print("start to clean data")
    # load data file
    Csv_Dics = readfile()
    #print(Csv_Dics.keys())
    Lists_Labeled_data = {}
    i = 0
    for key in Csv_Dics.keys():
        # data["Read_time_Bin"]=CategorizeData(data['x0_time_read_time_total'], cut_points, labels)

        print(key)
        data_binded = CategorizeData(Csv_Dics[key], 0)
        nameOfthefile = path_leaf(key)
        print(nameOfthefile)
        path = "/Users/rezakhoshkangini/Documents/Drexel_Documents/Work/Mat-Code/newExperiment_Trento/Sections/Sections_new_features/Labeled/"
        #data_binded=CategorizeData(data)
        #Clustring
        data_labaled, data_confid = Clustering_new(data_binded, i,
                                                   nameOfthefile)
        WritCsv(data_confid, path + str('Conf_' + nameOfthefile))
        #   data_labaled=Culstring_Players(data_binded)
        TmpContiner = pd.DataFrame(data_labaled)
        Lists_Labeled_data[
            i] = TmpContiner  # adding all the labled data into a list
        #writing bindeded data to CSV file

        WritCsv(data_labaled, path + str('Result_' + nameOfthefile))
        i = i + 1
    #
#calling classification class
# Sub_Machine()
    Difrentiate_Labled_Data(Lists_Labeled_data)
    #converting csv file into arff
    Converted_Data_arff = convert()
Пример #8
0
    def post(self):
        newtext = ""
        text = self.request.get('text')
        if text:
            newtext = convert(str(text))

        self.render("convert.html", text=newtext)
Пример #9
0
def final_handler(callback_query):
    message = callback_query.message
    data = callback_query.data

    VALUTE_DICT['cur_to'] = data

    set_cur_to(cbr_currencies[data])

    result = convert(int(RESULT_DICT["amount"]), RESULT_DICT["cur_from"],
                     RESULT_DICT["cur_to"])

    bot.send_message(
        message.chat.id,
        text=f'{RESULT_DICT["amount"]} {VALUTE_DICT["cur_from"]} в'
        f' {VALUTE_DICT["cur_to"]}:\n'
        f'{result} {VALUTE_DICT["cur_to"]}')

    bot.send_message(
        message.chat.id,
        text='Перевод завершен, для нового перевода отправь мне любое сообщение'
    )

    update_state(message, START)
Пример #10
0
def main(**kwargs):

    if "lyrics" in kwargs:
        lyrics = kwargs['lyrics']
    else:
        lyrics = "oo "

    if "notes" in kwargs:
        notes = list(map(int, kwargs['notes'].split(",")))
    else:
        notes = [0]

    if "dur" in kwargs:
        durations = list(map(float, kwargs['dur'].split(",")))
    else:
        durations = [1]

    if 'file' in kwargs:
        filename = kwargs['file']
    else:
        filename = 'output.wav'

    if 'tempo' in kwargs:
        tempo = kwargs['tempo']
    else:
        tempo = 100

    print(kwargs)
    if 'lang' in kwargs:
        languageCode = kwargs['lang']
    else:
        languageCode = "es"

    if 'scale' in kwargs:
        scale = list(map(int, kwargs['scale'].split(",")))
    else:
        scale = [0, 2, 4, 5, 7, 9, 11]  # Major Scale

    if 'root' in kwargs:
        root_note = int(kwargs['root'])
    else:
        root_note = 0  # C is the root note

    if 'octave' in kwargs:
        octave = int(kwargs['octave'])
    else:
        octave = 5  # C is the root note

    if 'model' in kwargs:
        renderizeVoice(filename, lyrics, notes, durations, tempo, scale,
                       root_note, octave, languageCode)

        model = kwargs['model']

        sound = AudioSegment.from_file(filename)
        lenght_in_miliseconds = len(sound)

        silence = AudioSegment.silent(duration=5000)

        sound = silence + sound + silence

        sound = sound.set_frame_rate(16000)
        sound = sound.apply_gain(-18 - sound.dBFS)

        # Trying to normalize to 85 db
        # Note: When REPLAYGAIN_REFERENCE_LOUDNESS is not provided the *_GAIN tags are interpreted relative to an assumed target of -18 dBFS.
        max_db = 20 * math.log(sound.apply_gain(-18 - sound.dBFS).max, 10)

        print(
            "The file was normalized so now it has {} db as peak value".format(
                round(max_db), 2))
        sound.export(filename, format="wav")

        netA_path = "{}trained_model/{}/generator_ab.npz".format(
            ROOT_PATH, model)
        netB_path = "{}trained_model/{}/generator_ba.npz".format(
            ROOT_PATH, model)

        convert(netA_path, netB_path, filename)

        sound = AudioSegment.from_file(filename)

        sound = sound[len(silence):-len(silence)]

        padding_audio = AudioSegment.silent(duration=lenght_in_miliseconds -
                                            len(sound))
        sound = sound + padding_audio

        sound.export(filename, format="wav")
    else:
        renderizeVoice(filename, lyrics, notes, durations, tempo, scale,
                       root_note, octave, languageCode)