Beispiel #1
0
def motorStage3():
    stage3_1 = mp(target=trayBackward)
    stage3_2 = mp(target=rollerStage2)
    stage3_1.start()
    stage3_2.start()
    # Concurrency
    stage3_1.join()
    stage3_2.join()
    return stage3
Beispiel #2
0
def motorStage2():
    stage2_1 = mp(target=trayForward)
    stage2_2 = mp(target=rollerStage1)
    stage2_1.start()
    stage2_2.start()
    # Concurrency
    stage2_1.join()
    stage2_2.join()
    return stage2
Beispiel #3
0
def _convert_audio_and_split_sentences(extracted_dir, data_set, dest_dir, dest_dir2):
    source_dir = os.path.join(extracted_dir, data_set)
    target_dir = os.path.join(extracted_dir, dest_dir)
    target_txt_dir = os.path.join(extracted_dir, dest_dir2) 
    if not os.path.exists(target_dir):
        os.makedirs(target_dir)
    if not os.path.exists(target_txt_dir):
        os.makedirs(target_txt_dir)
    processers = mp(8)
    checkClass = phoneDict('cmudict-0.7b')
    res = r"(.*)\s+(\d+)\s+(.*)\s+(.*)\s+(.*)\s+(<.*>)\s+(.*)"
    for root, dirnames, filenames in os.walk(source_dir):
        print(root, dirnames, len(filenames))
        for filename in fnmatch.filter(filenames, '*.stm'):
            trans_filename = os.path.join(root, filename)
            sph_filename = trans_filename.replace('stm', 'sph')
            # print(sph_filename,trans_filename)
            lines = open(trans_filename, 'r').readlines()
            for index, line in enumerate(lines):
                # print(line)
                processers.apply_async(_check_and_save_wav, args=(sph_filename, target_dir, target_txt_dir, checkClass, line, res, index))
                # _check_and_save_wav(sph_filename, target_dir, target_txt_dir, checkClass, line, res, index)
    processers.close()
    processers.join()
    files = []
    wav_filenames = [i[:-4] for i in os.listdir(target_dir)]
    txt_filenames = [i[:-4] for i in os.listdir(target_txt_dir)]
    file_names = list(set(wav_filenames) & set(txt_filenames))
    for file_name in file_names:
        files.append((os.path.abspath(os.path.join(target_dir, file_name+".wav")), os.path.abspath(os.path.join(target_txt_dir, file_name+".txt"))))
    return pandas.DataFrame(data=files, columns=["wav_filename", "txt_filename"])
Beispiel #4
0
    def _provision_obor(nodes=nodes):
        log_green('running _provision_obor')
        local('chmod 600 nixos-vagrant-configs/vagrant.priv')

        count = 1
        while True or count > 3:
            jobs = []
            for node, hostdir in nodes:
                jobs.append(
                    mp(target=local,
                       args=("fab -i nixos-vagrant-configs/vagrant.priv " +
                             "-H %s update:" % node +
                             "host_dir=%s," % hostdir + "rsync='yes'," +
                             "nix_gc='no'," + "nix_release='18.09'," +
                             "switch='no'" + "> log/`date '+%Y%m%d%H%M%S'`." +
                             "%s.provision.log 2>&1" % node, )))
            for job in jobs:
                job.start()

            exit_code = 0
            for job in jobs:
                job.join()
                exit_code = exit_code + job.exitcode

            if exit_code == 0:
                break
            count = count + 1

        log_green('_provision_obor completed')
Beispiel #5
0
def clean():
    """ destroy all VMs """
    log_green('running clean')

    jobs = []
    jobs.append(mp(target=destroy_railtrack))
    jobs.append(
        mp(target=local,
           args=("vagrant destroy -f "
                 "> log/`date '+%Y%m%d%H%M%S'`.vagrant.destroy.log 2>&1", )))
    for job in jobs:
        job.start()

    exit_code = 0
    for job in jobs:
        job.join()
        exit_code = exit_code + job.exitcode

    if exit_code != 0:
        raise Exception('clean failed')
    log_green('running clean completed')
Beispiel #6
0
def _preprocess_data(data_dir, output_dir):
    print("Splitting transcriptions and convert wav...")
    processers = mp(32)
    # texts_df = pandas.read_table(os.path.join(data_dir, 'text.csv'), sep=',', names=['filename', 'text', 'score'])
    # text_len = len(texts_df)
    # checkClass = phoneDict('cmudict-0.7b')
    target_wav_dir = os.path.join(output_dir, "synthesis-wav")
    target_txt_dir = os.path.join(output_dir, "synthesis-txt")
    if not os.path.exists(target_wav_dir):
        os.makedirs(target_wav_dir)
    if not os.path.exists(target_txt_dir):
        os.makedirs(target_txt_dir)

    total_num = 0
    for root, root_dir_names, root_file_names in os.walk(data_dir):
        print(root, root_dir_names, len(root_file_names))
        for file_name in fnmatch.filter(root_file_names, '*.txt'):
            total_num += 1
            file_path = os.path.join(root, file_name)
            output_txt_file = os.path.join(target_txt_dir,
                                           str(total_num).zfill(6) + '.txt')
            mp3file_path = os.path.abspath(
                os.path.join(root, file_name[:-4] + '.mp3'))
            output_wav_file = os.path.join(target_wav_dir,
                                           str(total_num).zfill(6) + '.wav')
            # print(file_path, mp3file_path, output_wav_file, output_txt_file)
            processers.apply_async(_convert_audio,
                                   args=(file_path, mp3file_path,
                                         output_wav_file, output_txt_file))
    processers.close()
    processers.join()
    files = []
    wav_filenames = [i[:-4] for i in os.listdir(target_wav_dir)]
    txt_filenames = [i[:-4] for i in os.listdir(target_txt_dir)]
    file_names = list(set(wav_filenames) & set(txt_filenames))
    for file_name in file_names:
        files.append(
            (os.path.abspath(os.path.join(target_wav_dir, file_name + ".wav")),
             os.path.abspath(os.path.join(target_txt_dir,
                                          file_name + ".txt"))))
    # Write sets to disk as CSV files
    data_df = pandas.DataFrame(data=files,
                               columns=["wav_filename", "txt_filename"])
    data_df.to_csv(os.path.join(output_dir, "synthesis.csv"),
                   index=False,
                   header=False)
Beispiel #7
0
def _preprocess_data(data_dir, output_dir):
    print("Splitting transcriptions and convert wav...")
    processers = mp(4)
    texts_df = pandas.read_table(os.path.join(data_dir, 'text.csv'),
                                 sep=',',
                                 names=['filename', 'text', 'score'])
    text_len = len(texts_df)
    checkClass = phoneDict('cmudict-0.7b')
    target_dir = os.path.join(output_dir, "wav")
    target_txt_dir = os.path.join(output_dir, "txt")
    if not os.path.exists(target_dir):
        os.makedirs(target_dir)
    if not os.path.exists(target_txt_dir):
        os.makedirs(target_txt_dir)
    for i in range(text_len):
        tmp_line = texts_df.iloc[i, :]
        processers.apply_async(_convert_audio_and_split_sentences,
                               args=(data_dir, target_dir, target_txt_dir,
                                     checkClass, tmp_line[0], tmp_line[1]))
    processers.close()
    processers.join()
    files = []
    wav_filenames = [i[:-4] for i in os.listdir(target_dir)]
    txt_filenames = [i[:-4] for i in os.listdir(target_txt_dir)]
    file_names = list(set(wav_filenames) & set(txt_filenames))
    for file_name in file_names:
        files.append(
            (os.path.abspath(os.path.join(target_dir, file_name + ".wav")),
             os.path.abspath(os.path.join(target_txt_dir,
                                          file_name + ".txt"))))
    # Write sets to disk as CSV files
    data_df = pandas.DataFrame(data=files,
                               columns=["wav_filename", "txt_filename"])
    data_df.to_csv(os.path.join(output_dir, "WEB.csv"),
                   index=False,
                   header=False)
Beispiel #8
0
                    netaddr.IPNetwork(item2)
                ]
                for subnet in subnets:
                    # find indices to insert start and end addresses
                    first = bisect.bisect_left(ranges, subnet.first)
                    last = bisect.bisect_right(ranges, subnet.last)
                    # check the overlap conditions and return if one is met
                    if first == last == 1 and item[1] == item2[1]:
                        item_result = item
                        v_subnet.remove(item2)
                    if first == 0 and last == 2 and item[1] == item2[1]:
                        item_result = item2
            result.append(item_result)
    return (result)


with open("inet.0_active-full.json", "r") as f:
    inet0 = json.load(f)['Rows']
with open("vrf_active.json", "r") as f:
    itepremium = json.load(f)

pool = mp(20)
pool.map(subnets_overlap, itepremium)
pool.close()
pool.join()

with open("result.json", "w") as f:
    json.dump(result, f)

    # print(subnet[0] + " : " + subnet[1] + " | " +  g_subnet[0] + " : " + g_subnet[1] +  " have overlapping")
Beispiel #9
0
# TODO: Write alert parameter function
#       Automate code storage
#       Dockerise

if __name__ == '__main__':

    configLocation = './../config.yaml'
    config = app.createConfigFrom(configLocation)

    # If there isn't a code, we must get one
    if config['app']['spotify']['code'] == None:
        handWrapped = app.App(config)
        srv = mp(target=server.Server(handWrapped, route.Handler).spinUp,
                 args=(
                     'localhost',
                     9898,
                 ))
        srv.start()
        print("server running")
        webbrowser.open(auth.buildSpotifyInitialAuthRequest(handWrapped),
                        new=2)

    # Wait until the code has been stored in the config
    # FIXME: currently storage of code isn't automatic
    print('waiting for code ...')
    while config['app']['spotify']['code'] == None:
        config = app.createConfigFrom(configLocation)
        sleep(1)

    # Get the initial set of tokens for authorisation if None exist
Beispiel #10
0
    args = vars(parser.parse_args())
    input_txt_dir = args["txt_dir"]  # 输入文件夹中的txt文件夹
    dict_file = args["dict_file"]
    failed_txt_dir = os.path.join(args["output"], 'fix-failed-txt')  # 输出音素文件为输出文件夹下phone文件夹
    if not os.path.isdir(failed_txt_dir):
        os.makedirs(failed_txt_dir)
    # failed_wav_dir = os.path.join(args["output"], 'failed-wav')
    # if not os.path.isdir(failed_wav_dir):
    #     os.makedirs(failed_wav_dir)

    dict_class = phoneDict(dict_file)  # 读音素字典
    print("test word: %s : %s " % ('hello',dict_class.txtToDict2("今天。")))  # 字典测试
    input_files = os.listdir(input_txt_dir)
    data_num = 0
    total_num = len(input_files)
    processers = mp(32)
    for index,file_name in enumerate(input_files):
        # break
        # print("Processing: %s" % (file_name))
        lines = open(os.path.join(input_txt_dir, file_name)).readlines()
        # wavfile_path = os.path.join(input_wav_dir, file_name[:-4]+'.wav')
        txtfile_path = os.path.join(input_txt_dir, file_name)
        # output_wav_file = os.path.join(output_wav_dir, file_name[:-4]+'.wav') 
        failed_txt_file = os.path.join(failed_txt_dir, file_name)
        line = lines[0].strip()
        try:
            line = line.decode('utf-8', 'ignore').strip()
        except:
            print("failed decode %s " % (file_name))
            continue
        print(line)
from multiprocessing import Process as mp

def send_text_strings(filepath):
	with open(filepath, 'r') as trace_hist:
		try:
			for line in trace_hist:
				client.send(json.loads(re.sub("'",'"',line)))
				print(re.sub("'",'"',line))
				time.sleep(random.uniform(0.5,3))
		except KeyboardInterrupt:
			print("User interrupted")

if __name__ == "__main__":
	if len(sys.argv) < 3:
		raise TypeError("Usage: './proxy_clients.py trace_dump_file_1_path.txt trace_dump_file_2_path.txt'")
	client = connection('localhost', 5432)
	try:
		client.connect('localhost', 6666)
	except ConnectionRefusedError:
		print("No server found running at "+ sys.argv[1] + ":6666'")
		sys.exit()
	except:
		print("Failed to connect to server")
		raise
	path1 = str(sys.argv[1])
	path2 = str(sys.argv[2])
	proc1 = mp(target=send_text_strings, args=(path1,))
	proc2 = mp(target=send_text_strings, args=(path2,))
	proc1.start()
	proc2.start()
		
Beispiel #12
0
        default='output')
    args = vars(parser.parse_args())
    input_dir = os.path.join(os.getcwd(), args["input"])
    output_dir = os.path.join(os.getcwd(), args["output"])
    output_wav_dir = os.path.join(output_dir, 'wav')
    output_txt_dir = os.path.join(output_dir, 'txt')
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)
    if not os.path.isdir(output_wav_dir):
        os.makedirs(output_wav_dir)
    if not os.path.isdir(output_txt_dir):
        os.makedirs(output_txt_dir)

    print input_dir
    file_num = 0
    processers = mp(8)
    for root, root_dir_names, root_file_names in os.walk(input_dir):
        # print root, root_dir_names, len(root_file_names)
        for filename in fnmatch.filter(root_file_names, '*.txt'):
            print("Processing: %s " % (filename.decode('gbk')))
            file_path = os.path.join(root, filename)
            try:
                fin = open(file_path, 'r')
                lines = fin.readlines()
                fin.close()
            except:
                print("Failed open %s" % (filename.decode('gbk')))
                continue
            output_wav_dir = os.path.join(output_dir, filename, 'wav')
            output_txt_dir = os.path.join(output_dir, filename, 'txt')
            if not os.path.isdir(output_wav_dir):
Beispiel #13
0
def jenkins_build(mesos_masters=[
    ('[email protected]', 'nixos-vagrant-configs/mesos-zk-01'),
    ('[email protected]', 'nixos-vagrant-configs/mesos-zk-02'),
    ('[email protected]', 'nixos-vagrant-configs/mesos-zk-03'),
],
                  mesos_slaves=[('[email protected]',
                                 'nixos-vagrant-configs/slave')],
                  cleanup=True):
    """ runs a jenkins build """
    nodes = mesos_masters + mesos_slaves

    # clean previous build logs
    local('rm -f log/*')

    def _provision_obor(nodes=nodes):
        log_green('running _provision_obor')
        local('chmod 600 nixos-vagrant-configs/vagrant.priv')

        count = 1
        while True or count > 3:
            jobs = []
            for node, hostdir in nodes:
                jobs.append(
                    mp(target=local,
                       args=("fab -i nixos-vagrant-configs/vagrant.priv " +
                             "-H %s update:" % node +
                             "host_dir=%s," % hostdir + "rsync='yes'," +
                             "nix_gc='no'," + "nix_release='18.09'," +
                             "switch='no'" + "> log/`date '+%Y%m%d%H%M%S'`." +
                             "%s.provision.log 2>&1" % node, )))
            for job in jobs:
                job.start()

            exit_code = 0
            for job in jobs:
                job.join()
                exit_code = exit_code + job.exitcode

            if exit_code == 0:
                break
            count = count + 1

        log_green('_provision_obor completed')

    def _test_obor(mesos_masters=mesos_masters, mesos_slaves=mesos_slaves):
        log_green('running _test_obor')

        obor_env = [
            "eval `ssh-agent`",
            "ssh-add $PWD/nixos-vagrant-configs/*.priv",
        ]
        # local() doesn't support most context managers
        # so let's bake a local environment file and consume as a prefix()
        with open('shell_env', 'w') as shell_env:
            for line in obor_env:
                shell_env.write(line + '\n')
        local('chmod +x shell_env')

        local('chmod 600 nixos-vagrant-configs/vagrant.priv')
        with settings(shell='/run/current-system/sw/bin/bash -l -c'):
            with prefix(". ./shell_env"):  # pylint: disable=not-context-manager

                for target, _ in mesos_masters:
                    local(
                        "fab -i nixos-vagrant-configs/vagrant.priv " +
                        "-H {} acceptance_tests_mesos_master ".format(target) +
                        "> log/`date '+%Y%m%d%H%M%S'`."
                        "{}.test_obor.log 2>&1".format(target))

                for target, _ in mesos_slaves:
                    local(
                        "fab -i nixos-vagrant-configs/vagrant.priv " +
                        "-H {} acceptance_tests_mesos_slave ".format(target) +
                        "> log/`date '+%Y%m%d%H%M%S'`."
                        "{}.test_obor.log 2>&1".format(target))

        log_green('_test_obor completed')

    def _flow1():
        # spin up and provision the Cluster
        local('vagrant up')
        sleep(45)  # allow VMs to boot up
        _provision_obor()

    def _flow2():
        # spin up Railtrack, which is required for OBOR
        spin_up_railtrack()
        sleep(45)  # allow VMs to boot up
        provision_railtrack()

    try:
        p_flow1 = mp(target=_flow1)
        p_flow2 = mp(target=_flow2)

        p_flow1.start()
        p_flow2.start()

        p_flow1.join()
        p_flow2.join()

        # reload after initial provision
        local('vagrant reload')

        sleep(240)  # allow the start services

        # test all the things
        _test_obor()

        # and now destroy Railtrack and mesos VMs
        if cleanup in [True, 'yes', 'y', 'Y', 'YES']:
            clean()
    except:  # noqa: E722 pylint: disable=bare-except
        log_red("jenkins_build() FAILED, aborting...")
        if cleanup in [True, 'yes', 'y', 'Y', 'YES']:
            clean()
        sys.exit(1)