コード例 #1
0
ファイル: circuit.py プロジェクト: ShangChuanhe/AxLS
    def __init__(self, rtl, tech, saif=""):
        '''
        Parse a rtl circuit into a xml tree using a specific technology library

        Parameters
        ----------
        rtl : string
            path to the rtl file
        tech : string
            path to the technology file
        saif : string
            path to the saif file
        '''
        self.rtl_file = rtl
        self.tech_file = tech
        self.topmodule = rtl.split('/')[-1].replace(".v", "")
        self.netl_file = synthesis(rtl, tech, self.topmodule, 'yosys')
        technology = Technology(tech)
        # extract the usefull attributes of netlist
        netlist = Netlist(self.netl_file, technology)
        self.netl_root = netlist.root
        self.inputs = netlist.circuit_inputs
        self.outputs = netlist.circuit_outputs
        self.raw_inputs = netlist.raw_inputs
        self.raw_outputs = netlist.raw_outputs
        self.raw_parameters = netlist.raw_parameters

        if (saif != ""):
            self.saif_parser(saif)

        self.output_folder = path.dirname(path.abspath(rtl))
コード例 #2
0
ファイル: program.py プロジェクト: SunnyBlueCat/MyDisser
def handle_data():
    start_time = datetime.now()
    print(start_time)
    # получаем данные из форм
    text = request.form['text']
    temp = int(request.form['temp'])

    # передаем текст в функцию, которая его распарсит и вернет список слов
    words, tokensCount = textProcessing.parseText(text)
    print(words)

    # тут мы должны вызвать функцию, которая по списку слов найдет соответствующие им аудиофайлы и вернёт их список
    melody, found_words = getData.findMelody(words)
    print(found_words)
    # обрабатываем аудиофайлы, возвращаем список сигналов
    y = musicProcessing.musicProc(melody, temp)

    if len(y) >= 2:
        # синтезируем произведение
        y_music = synthesis.synthesis(y, temp)
        librosa.output.write_wav("static/output.wav", y_music, sr=22050)

        uniqueWords = len(words)
        # print(melody)
        print(datetime.now() - start_time)
        return render_template('index.html',
                               title='Home',
                               tokens=words,
                               text=text,
                               tokensCount=uniqueWords,
                               uniqueWords=uniqueWords)
    else:
        print(datetime.now() - start_time)
        return render_template('error.html', title='Error')
コード例 #3
0
    def notify_fu(type, result, already_notify):
        need_notify = set(result) - already_notify
        already_notify |= set(result)
        if need_notify:
            rtm_save_item({e: result[e] for e in need_notify}, type)
            msg = '%s tagrgets:\n%s' % (type, ' '.join(need_notify))

            tempname = '%s_%s_%s.txt' % (u_time_now_filename(),
                                         random.randint(1, 5), type)
            temppath = 'data/%s' % u_day_befor(0)
            u_mk_dir(temppath)
            u_write_to_file('%s/%s' % (temppath, tempname), need_notify)
            all_notify = 'data/%s/all_notify.txt' % u_day_befor(0)
            u_write_to_file(all_notify, already_notify)
            synthesis(all_notify, u_day_befor(0))
            synthesis('%s/%s' % (temppath, tempname), u_day_befor(0))
コード例 #4
0
def main():


    orgstats_h5 = h5py.File("data\stats\SF1.h5", mode='r')
    sf0 = orgstats_h5['f0stats'].value


    tarstats_h5 = h5py.File("data\stats\TF1.h5", mode='r')
    tf0 = tarstats_h5['f0stats'].value
    print(tf0)



    f="100001"
    ###########################################Location of the file
    wavf = "data\wav\SF1\\100001.wav"

    print("wave")
    print(wavf)
    fs, x = wavfile.read(wavf)
    x = x.astype(np.float)



    # analyze F0, mcep, and ap
    f0, spc, ap = analyze(x)
    f1 = h5py.File('data/h5/SF1/100001.h5', 'r')
    data_source_test = f1.get('mcep')
    data_source_test = np.array(data_source_test)
    data_source_test = data_source_test.astype('double')

    print(np.shape(data_source_test))
    mcep = data_source_test

    # convert F0
    cvf0 = convert(f0, sf0, tf0)

    f1 = h5py.File('predict_lstm2.h5', 'r')
    data_source_test = f1.get('predict')
    data_source_test = np.array(data_source_test)
    data_source_test = data_source_test.astype('double')
    print(np.shape(data_source_test))

    cmcep=data_source_test[0:704]

    wav = synthesis(cvf0,
                                cmcep,
                                ap,
                                r=mcep,
                                alpha=0.41,
                                )
    wavpath = os.path.join("data\\test", f + '_lstm2.wav')

    wav = np.clip(wav, -32768, 32767)
    wavfile.write(wavpath, fs, wav.astype(np.int16))
    print(wavpath)
コード例 #5
0
def main():

    os_h5 = h5py.File("data/stats/SF1.h5", mode='r')
    sf0 = os_h5['f0stats'].value
    cgv_h5 = h5py.File("data/model/cvgv.h5", mode='r')
    cvgvs = cgv_h5['cvgv'].value
    ts_h5 = h5py.File("data/stats/TF1.h5", mode='r')

    tf0 = ts_h5['f0stats'].value
    tgv_h5 = ts_h5['gv'].value
    print(tf0)
    mpath = 'model/GMM.pkl'
    mcepgmm = GMMConvertor(n_mix=32, covtype="full")
    param = joblib.load(mpath)
    mcepgmm.open_from_param(param)

    f = "100001"
    ###########################################Location of the file
    wavf = "data/wav/SF1/100001.wav"

    print("wave")
    print(wavf)
    fs, x = wavfile.read(wavf)
    x = x.astype(np.float)

    # analyze F0, mcep, and ap
    f0, spc, ap = analyze(x)
    mcep = pysptk.sp2mc(spc, 24, 0.410)

    mcep_0th = mcep[:, 0]
    cvmcep_wopow = mcepgmm.convert(static_delta(mcep[:, 1:]), cvtype="mlpg")
    cvmcep = np.c_[mcep_0th, cvmcep_wopow]

    # convert F0
    cvf0 = convert(f0, sf0, tf0)
    cvmwGV = mcepgv.postfilter(cvmcep, tgv_h5, cvgvstats=cvgvs, startdim=1)

    wav = synthesis(
        cvf0,
        cvmwGV,
        ap,
        r=mcep,
        alpha=0.41,
    )
    wavpath = os.path.join("data/test", f + '_gmm.wav')

    wav = np.clip(wav, -32768, 32767)
    wavfile.write(wavpath, fs, wav.astype(np.int16))
    print(wavpath)
コード例 #6
0
def run(texs):
    new_texs = {}
    orig_start = time.perf_counter()
    for i, tex_name in enumerate(texs):
        new_imgs = []
        for j, size in enumerate(data.WIN_SIZES):
            print(f"Starting synthesis for {tex_name} w={size}")
            start = time.perf_counter()
            new_img = s.synthesis(texs[tex_name], data.NEW_IMG_SIZE, size,
                                  data.GAUSS_MASKS[j])
            print(f"Finished synthesis in {time.perf_counter() - start}s")
            new_imgs.append(new_img)
        new_texs[tex_name] = new_imgs
    print(f"Final end time of {time.perf_counter() - orig_start}s")
    return new_texs
コード例 #7
0
ファイル: main.py プロジェクト: shun60s/Python-WORLD-Win10
 def decode(self, dat: dict) -> dict:
     '''
     This function combine WORLD components (F0, spectrogram, and aperiodicity) to make sound signal
     :param dat: contains WORLD components
     :return: a dictionary contains synthesized speech and WORLD components
     '''
     print('enter decode')
     if dat['is_requiem']:
         print('call synthesisRequiem')
         seeds_signals = get_seeds_signals(dat['fs'])
         y = synthesisRequiem(dat, dat, seeds_signals)
     else:
         print('call synthesis')
         y = synthesis(dat, dat)
     m = np.max(np.abs(y))
     if m > 1.0:
         logging.info('rescaling waveform')
         y /= m
     dat['out'] = y
     return dat
コード例 #8
0
def run(texs, show_progress):
    """(dict of name:list of imgs) => dict of name:list of synthesized imgs
    Top level of the synthesis algorithm, calls lower functions to do heavy lifting.
    Returns synthesized images at differend window sizes to be sent through plot()
    """
    new_texs = {}
    orig_start = time.perf_counter()
    for i, tex_name in enumerate(texs):
        new_imgs = []
        for j, size in enumerate(data.WIN_SIZES):
            print(f"Starting synthesis for {tex_name} w={size}")
            start = time.perf_counter()
            new_img = s.synthesis(texs[tex_name]["gray"],
                                  texs[tex_name]["clr"], data.NEW_IMG_SIZE,
                                  size, data.GAUSS_MASKS[j], show_progress)
            print(f"Finished synthesis in {time.perf_counter() - start}s")
            new_imgs.append(new_img)
        new_texs[tex_name] = new_imgs
    print(f"Final end time of {time.perf_counter() - orig_start}s")
    return new_texs
コード例 #9
0
#Opening Socrates outputs
    wb.open_new(
        os.path.join(
            projectDir, 'logical', 'Schematic', 'schematic_Connections_' +
            designName + '_' + design_version + '.pdf'))
    wb.open_new(
        os.path.join(projectDir, 'logical', designName, 'verilog',
                     designName + '.v'))
    wb.open_new(
        os.path.join(
            projectDir, 'arm.com-' + projectName + '-' + designName +
            '_design-' + design_version + '.xml'))
    wb.open_new(
        os.path.join(
            projectDir, 'arm.com-' + projectName + '-' + designName + '-' +
            design_version + '.xml'))

# STEP 7: SoC Synthesis
# ==============================================================================
if args.synthesis_req == "True":
    with open(args.design) as f:
        designJson = json.load(f)
    synthesis(designJson, designName, socVerilogDir, m0_module_name,
              m0_instance_name, synthDir, args.filelist, args.platform)
    subprocess.check_call(["make", "bleach_synth"], cwd=synthDir)
    subprocess.check_call(["make", "synth"], cwd=synthDir)

# STEP 8: Run chip level Cadre Flow
# ==============================================================================
コード例 #10
0
def main():


    orgstats_h5 = h5py.File("data\stats\SF1.h5", mode='r')
    sf0 = orgstats_h5['f0stats'].value
    cvgvstats_h5 = h5py.File("data\model\cvgv.h5", mode='r')
    cvgvstats = cvgvstats_h5['cvgv'].value



    tarstats_h5 = h5py.File("data\\stats\TF1.h5", mode='r')

    tf0 = tarstats_h5['f0stats'].value
    targvstats = tarstats_h5['gv'].value
    print(tf0)
    mcepgmmpath = os.path.join("data\model", 'GMM.pkl')


    mcepgmm = GMMConvertor(n_mix=32,
                           covtype="full"
                           )
    param = joblib.load(mcepgmmpath)
    mcepgmm.open_from_param(param)



    f="100001"

    wavf = "data\wav\SF1\\100001.wav"

    print("wave")
    print(wavf)
    fs, x = wavfile.read(wavf)
    x = x.astype(np.float)



    f0, spc, ap = analyze(x)
    mcep = pysptk.sp2mc(spc, 24, 0.410)

    mcep_0th = mcep[:, 0]
    cvmcep_wopow = mcepgmm.convert(static_delta(mcep[:, 1:]),
                                   cvtype="mlpg")
    cvmcep = np.c_[mcep_0th, cvmcep_wopow]


    cvf0 = convert(f0, sf0, tf0)
    cvmcep_wGV = mcepgv.postfilter(cvmcep,
                                   targvstats,
                                   cvgvstats=cvgvstats,
                                   startdim=1)




    wav = synthesis(cvf0,
                                cvmcep_wGV,
                                ap,
                                r=mcep,
                                alpha=0.41,
                                )
    wavpath = os.path.join("data\\test", f + '_gmm.wav')

    wav = np.clip(wav, -32768, 32767)
    wavfile.write(wavpath, fs, wav.astype(np.int16))
    print(wavpath)
コード例 #11
0
def main():
    parser = build_parser()
    options = parser.parse_args()

    if not os.path.isfile(options.network):
        parser.error(
            "Network %s does not exist. (Did you forget to download it?)" %
            options.network)

    content_image = imread(options.content)
    style_images = [imread(style) for style in options.styles]

    width = options.width
    if width is not None:
        new_shape = (int(
            math.floor(
                float(content_image.shape[0]) / content_image.shape[1] *
                width)), width)
        content_image = scipy.misc.imresize(content_image, new_shape)
    target_shape = content_image.shape
    for i in range(len(style_images)):
        style_scale = STYLE_SCALE
        if options.style_scales is not None:
            style_scale = options.style_scales[i]
        style_images[i] = scipy.misc.imresize(
            style_images[i],
            style_scale * target_shape[1] / style_images[i].shape[1])

    style_blend_weights = options.style_blend_weights
    if style_blend_weights is None:
        # default is equal weights
        style_blend_weights = [1.0 / len(style_images) for _ in style_images]
    else:
        total_blend_weight = sum(style_blend_weights)
        style_blend_weights = [
            weight / total_blend_weight for weight in style_blend_weights
        ]

    initial = options.initial
    if initial is not None:
        initial = scipy.misc.imresize(imread(initial), content_image.shape[:2])
        # Initial guess is specified, but not noiseblend - no noise should be blended
        if options.initial_noiseblend is None:
            options.initial_noiseblend = 0.0
    else:
        # Neither inital, nor noiseblend is provided, falling back to random generated initial guess
        if options.initial_noiseblend is None:
            options.initial_noiseblend = 1.0
        if options.initial_noiseblend < 1.0:
            initial = content_image

    if options.checkpoint_output and "%s" not in options.checkpoint_output:
        parser.error("To save intermediate images, the checkpoint output "
                     "parameter must contain `%s` (e.g. `foo%s.jpg`)")

    for iteration, image in synthesis(
            network=options.network,
            initial=initial,
            initial_noiseblend=options.initial_noiseblend,
            content=content_image,
            styles=style_images,
            iterations=options.iterations,
            content_weight=options.content_weight,
            content_weight_blend=options.content_weight_blend,
            style_weight=options.style_weight,
            style_layer_weight_exp=options.style_layer_weight_exp,
            style_blend_weights=style_blend_weights,
            tv_weight=options.tv_weight,
            learning_rate=options.learning_rate,
    ):
        output_file = None
        combined_rgb = image
        if iteration is not None:
            if options.checkpoint_output:
                output_file = options.checkpoint_output % iteration
        else:
            output_file = options.output
        if output_file:
            imsave(output_file, combined_rgb)
コード例 #12
0
from open import openWave
from synthesis import synthesis
import sys

name = "yamaoka"

#ごちゃごちゃやるよう
if __name__ == '__main__':
    for i in range (21):
        synthesis(i/20,name)
コード例 #13
0
def rtm(debug=False):
    already_notify = rtm_get_already_notified()

    # import itchat
    # itchat.auto_login(hotReload=True)
    # groups = itchat.get_chatrooms(update=True)
    # target_group = ''
    # nickname = '三语股票测试'
    # # nickname = '三语股票测试' if debug else '三语股票'
    # for group in groups:
    #     if group['NickName'] == nickname:
    #         target_group = group['UserName']
    #         break

    def notify_fu(type, result, already_notify):
        need_notify = set(result) - already_notify
        already_notify |= set(result)
        if need_notify:
            rtm_save_item({e: result[e] for e in need_notify}, type)
            msg = '%s tagrgets:\n%s' % (type, ' '.join(need_notify))

            tempname = '%s_%s_%s.txt' % (u_time_now_filename(),
                                         random.randint(1, 5), type)
            temppath = 'data/%s' % u_day_befor(0)
            u_mk_dir(temppath)
            u_write_to_file('%s/%s' % (temppath, tempname), need_notify)
            all_notify = 'data/%s/all_notify.txt' % u_day_befor(0)
            u_write_to_file(all_notify, already_notify)
            synthesis(all_notify, u_day_befor(0))
            synthesis('%s/%s' % (temppath, tempname), u_day_befor(0))
            # itchat.send(msg, toUserName=target_group)
            # u_itchat_send_file('data/dgn/%s/dgn_%s' % (u_day_befor(0),tempname), toUserName=target_group)

    print('enter while')
    while True:

        result = check_attention()
        if result:
            synthesis(u_create_path_by_system('t_attention_check.txt'))
        notify_fu('attention', result, already_notify)

        result = dh_check_macd()
        if result:
            synthesis(u_create_path_by_system('t_macd_check.txt'))
        notify_fu('macd', result, already_notify)

        result = check_acti()
        if result:
            synthesis(u_create_path_by_system('t_acti_check.txt'))
        notify_fu('acti', result, already_notify)

        result = Fundamental.check_doctor()
        if result:
            synthesis(u_create_path_by_system('t_doctor_check.txt'))
        notify_fu('doctor', result, already_notify)

        result = Fundamental.check_yuzen()
        if result:
            synthesis(u_create_path_by_system('t_yuzen_check.txt'))
        notify_fu('yuzen', result, already_notify)

        now = datetime.datetime.now()
        if now.hour >= 15:
            break
        if (now.hour == 11 and now.minute >= 30) or (
                now.hour >= 12 and now.hour < 13) or (now.hour == 9
                                                      and now.minute < 26):
            time.sleep(5)
        continue

    u_write_to_file(u_create_path_by_system('t_notified.txt'), already_notify)
    synthesis(u_create_path_by_system('t_notified.txt'))
    # u_itchat_send_file('data/dgn/dgn_t_notified.txt', toUserName=target_group)
    daily_stat_rtm()

    print('safe quit')