Beispiel #1
0
    def get_context_list_of_a_phone(filepath, target_phone):
        yaml_file = Utility.yaml_load(filepath)
        #         print yaml_file['preprocess']
        print 'Target phone : {}'.format(target_phone)
        for event in yaml_file['context_events']:
            name = event['name']
            args = event['value_getter']['event_feature']['args']
            type = None
            if len(args) > 1:
                type = args[1]['args'][0]

#             print type

            if type == 'entity':
                d = "{}".format(args[0])
                d = d.split(',')
                for ph in d:
                    if '\'{}\''.format(target_phone) in ph:
                        ph = '{}'.format(ph)
                        ph = ph.split('[')[-1].split(']')[0]
                        #                         print ph
                        #                         if int(ph) == 1:
                        if 'begin' in name:
                            if ph == '{}'.format(1):
                                print name, ph

#                 print d

#             break

#         Utility.print_yaml_formatted(yaml_file['context_events'][2])
#         Utility.print_yaml_formatted(yaml_file['context_events'][2]['name'])
#         Utility.print_yaml_formatted(yaml_file['context_events'][2]['value_getter']['event_feature']['args'][0]['uua'])
        pass
    def get_data_with_missing_values(self, 
        num_sampling, subtract_typical_contour, feature_name=None, 
        delta=False, deltadelta=False):

        x = np.linspace(0, len(self.raw_data), num=num_sampling)
        Y = np.interp(x, np.arange(len(self.raw_data)), self.raw_data)

        data = Y

        if feature_name is not None:
            training_data = np.interp(x, np.arange(len(self.training_feature[feature_name])), self.training_feature[feature_name])
            # print training_data, len(training_data)
            data = training_data

        data[ Y<0 ] = np.nan

        if subtract_typical_contour: 
            typical_tone_path = '/home/h1/decha/Dropbox/Inter_speech_2016/Syllable_object/Typical_contour/50dims/tone_{}.pickle'.format(self.tone)
            typical_tone_obj = Utility.load_obj(typical_tone_path)
            data = data - typical_tone_obj

        # print data, len(data)

        if delta:
            y_delta = np.gradient(data)
            # print y_delta
            if deltadelta:
                y_delta_delta = np.gradient(y_delta)
                # print y_delta_delta
                y_delta = np.append(y_delta, y_delta_delta)
            data = np.append(data, y_delta)

        # print np.array(data), len(data)

        return np.array(data)
def find_min_y(db_all):

    max_y = 600

    for syl in Utility.load_obj(db_all):

        if len(syl['raw_lf0']) == 0: continue

        r = syl['raw_lf0']
        r = np.array(r)
        r[r<0] = np.nan

        # print r

        # sys.exit()

        if min( np.exp( r ) ) < 150:
            # print min( np.exp( r ) )
            continue

        if min( np.exp( r ) ) < max_y:
            max_y = min( np.exp( r ) )

    print 'min y = ', max_y

    pass
def load_mono(mono_file):

    m = []

    for line in Utility.read_file_line_by_line(mono_file):
        spl = line.split(' ')

        ph = Utility.trim(spl[2])
        # print ph

        m.append(ph)

    # print m
    return m

    pass
def run_data_processor(db_file):

    db = Utility.load_obj(db_file)

    real = []
    Y = []

    for syl in db:

        if syl['stress'] == '2':
            real.append(1)
        elif syl['stress'] in ['0', '1']:
            real.append(int(syl['stress']))
        else:
            print syl['stress']
            real.append(int(syl['stress']))

        Y.append(
            syl['TF']['intepolate151_normalize_by_preprocessing.normalize']
            ['data'])

        # sys.exit()

    real = np.array(real)
    Y = np.array(Y)

    return (Y, real)

    pass
def plot(data, inverselengthscale, labels, name_out_file, title):

    # most_dominants = Utility.get_input_sensitivity(inverselengthscale, 2)

    # x = data[ :, 0 ]
    # y = data[ :, 1 ]

    x = data[:, 0]
    y = data[:, 2]

    label = map(int, labels)
    label = np.array(labels)

    print set(labels)

    # colors = ['red','green','blue','purple']

    if len(labels) > 1000:
        ind = np.random.choice(len(labels), 1000)
        x = x[ind]
        y = y[ind]
        labels = labels[ind]

    colors = Utility.get_color_map(len(set(labels)))

    plt.clf()
    # plt.scatter(x, y, c=labels, cmap=matplotlib.colors.ListedColormap(colors))
    for idx, s in enumerate(sorted(set(labels))):
        plt.scatter(x[labels == s], y[labels == s], c=colors[idx], label=s)
    plt.legend()
    plt.title(title)

    plt.savefig(name_out_file)
def get_training_object(train_list, feature):
    syl_list = []
    for t in train_list:
        syl_obj = Utility.load_obj(t)
        syl_list+=syl_obj.syllables_list
    syllable_management_object = SyllableDatabaseManagement(syllable_list=syl_list)
    Y, names, tone, stress, syllable_short_long_type,syllalbe_position, phoneme, syllable_type = syllable_management_object.get_GP_LVM_training_data(
            feature_key=feature,
            dur_position=[1,2],
            delta_bool=True,
            delta2_bool=True,
            num_sampling=50)

    stress = np.array(stress)
    stress[stress=='Stress'] = 1
    stress[stress=='Unstress'] = 0

    label_feature = stress
    alldata = ClassificationDataSet(len(Y[0]), 1, nb_classes=len(set(label_feature)))
    for idx, yyy in enumerate(Y):

        alldata.addSample(yyy, label_feature[idx])

    alldata._convertToOneOfMany( )

    return alldata
Beispiel #8
0
def get_last_syllable(filepath, name):

    # sil-sil+sil/A:X-X+X/S:l-x+z^/B:x-x+3/C:x_x-x_x+1_1/D:x-x+2/E:x-x+1/F:x_x-x_x+2_1/G:x_35_23/H:x-x+45

    global out

    pattern = re.compile(r"""(?P<start>.+)\s(?P<end>.+)\s.+\-(?P<curphone>.+)\+.+/A:.+/C:.+\-(?P<cur_position_syl>.+)_.+\+.+/D:.+/F:.+\-.+_(?P<cur_num_syl>.+)\+.+/G:.+""",re.VERBOSE)

    count = 0

    for line in Utility.read_file_line_by_line(filepath):

        count = count + 1

        match = re.match(pattern, line)
        if match:
           cur_position_syl = match.group('cur_position_syl')
           cur_num_syl = match.group('cur_num_syl')

           iden = '{}_{}'.format(name, count)
           # print iden

           if cur_position_syl == 'x': continue

           if ( cur_position_syl == cur_num_syl ) & ( ( cur_num_syl != '1' ) ):
                # print iden
                out.append(iden)

    pass
    def run_nn_train_tone_classification(trndata, tstdata, outpath, name, fold):

        print 'run_nn_train_tone_classification'

        fnn = buildNetwork(trndata.indim, 20, trndata.outdim, hiddenclass=TanhLayer, bias=True)

        trainer = BackpropTrainer( fnn, dataset=trndata, weightdecay=0.006)
        # trainer = BackpropTrainer( fnn, dataset=trndata, verbose=True)

        acc = 0.0
        tag = 'No acc'
        real_obj = []
        predicted_obj = []

        class_recog = 0

        for i in range(100):
            trainer.trainEpochs( 1 )
            trnresult = percentError( trainer.testOnClassData(),
                                      trndata['class'] )
            tstresult = percentError( trainer.testOnClassData(
                   dataset=tstdata ), tstdata['class'] )
            print "epoch: %4d" % trainer.totalepochs, \
                      "  train error: %5.2f%%" % trnresult, \
                      "  test error: %5.2f%%" % tstresult
            predicted = np.array( trainer.testOnClassData(dataset=tstdata) )
            real = np.array( tstdata['class'][:,0] )
            # print real
            # if (accuracy_score(real, predicted) > acc) & (0 != len(np.where(predicted==1)[0])):
            if ( len(set(predicted)) >= class_recog):
                class_recog = len(set(predicted))
                print 'Tone num : {}'.format(set(predicted))
                tag = "Add acc when epoch: %4d" % trainer.totalepochs
                real_obj = real
                predicted_obj = predicted
                acc = accuracy_score(real, predicted) 
            # else:
            #     print predicted

        Utility.save_obj(real_obj, '{}/{}_fold_{}_real.npy'.format(outpath, name, fold))
        Utility.save_obj(predicted_obj, '{}/{}_fold_{}_predicted.npy'.format(outpath, name, fold))
        print tag
        print 'Accuracy : {}'.format( acc )
        print 'Precision : {}'.format( precision_score(real_obj, predicted_obj, average=None) )
        print 'Recall : {}'.format( recall_score(real_obj, predicted_obj, average=None) )
        print 'F-1 : {}'.format( f1_score(real_obj, predicted_obj, average=None) )
        return acc
Beispiel #10
0
def is_finish(rmse_list):

    if len(rmse_list) < 2:
        print 'Finish cause no more input'
        return True

    Utility.sort_by_index(rmse_list, 0)
    print rmse_list

    first = rmse_list[0]
    second = rmse_list[1]

    for a, b in zip(first, second):
        if abs(a - b) > threshold:
            return False

    return True
Beispiel #11
0
def run_export_input_sensitivity(system_paths, tones):

    for path in system_paths:
        for tone in tones:
            target_path = '{}/Tone_{}/'.format(path, tone)

            model = Utility.load_obj('{}/GP_model.npy'.format(target_path))
            input_sensitivity = model.input_sensitivity()

            data = model.X.mean
            data_object = np.array(data)

            input_sensitive_path = '{}/input_sentivity.npy'.format(target_path)
            data_object_path = '{}/data_object.npy'.format(target_path)

            Utility.save_obj(input_sensitivity, input_sensitive_path)
            Utility.save_obj(data_object, data_object_path)
Beispiel #12
0
def set_pre_suc():
    tones = ['01234']

    name_list_path = '/home/h1/decha/Dropbox/python_workspace/Inter_speech_2016/playground/list_file_for_preceeding_suceeding/list_gpr_file/'

    for t in tones:
        path = '/home/h1/decha/Dropbox/Inter_speech_2016/Syllable_object/mix_object/current_version/all_vowel_type/syllable_object_{}.pickle'.format(
            t)
        print path

        syl_management = Utility.load_obj(path)
        for syl in syl_management.syllables_list:
            if 'manual' in syl.name_index: continue

            name = syl.name_index.split('_')
            file_tar = '{}/{}/{}.lab'.format(name_list_path, name[2][0],
                                             name[2])
            list_file = Utility.read_file_line_by_line(file_tar)
            for idx, l in enumerate(list_file):
                f = Utility.trim(l)
                if f == syl.name_index:
                    # print '--------------------'
                    preceeding = Utility.trim(list_file[idx - 1])
                    # print f
                    succeeding = Utility.trim(list_file[idx + 1])
                    # print '--------------------'
                    syl.set_preceeding_succeeding_name_index(
                        preceeding, succeeding)

            # sys.exit()

        Utility.save_obj(syl_management, path)
Beispiel #13
0
def get_j_set(db_file, out_file, sort_list_out_file):

    j_set_db = []
    j_set_sort_list = []

    db = Utility.load_obj(db_file)
    for syl in db:
        if 'j' in syl['id']:
            j_set_db.append(syl)
            # print syl['dur']
            dur = 0
            for idx, d in enumerate(syl['dur']):
                if idx == 0: continue

                dur = dur + d

            j_set_sort_list.append((syl['id'], d, syl['stress']))

    Utility.sort_by_index(j_set_sort_list, 1)
    print j_set_sort_list

    Utility.save_obj(j_set_sort_list, sort_list_out_file)
    Utility.save_obj(j_set_db, out_file)

    pass
def check(v, t, obj_path):
    print obj_path
    obj = Utility.load_obj(obj_path)
    print '------------------------------------------'
    print v, t
    for syl in obj.syllables_list:
        print syl.get_vowel_length_type(), syl.tone

    pass
def run(syllable_object, main_out_path):
    vowel_type = ['v', 'vv', 'vn', 'vvn', 'vsg', 'vvsg']
    tones = ['0', '1', '2', '3', '4']

    syllable_lists = dict()
    for v in vowel_type:
        for t in tones:
            syllable_lists['{}_{}'.format(v, t)] = []

    for syl in syllable_object.syllables_list:
        syllable_lists['{}_{}'.format(syl.get_vowel_length_type(),
                                      syl.tone)].append(syl)

    for s in syllable_lists:
        spl = s.split('_')
        outpath = '{}/{}/{}/'.format(main_out_path, spl[0], spl[1])
        Utility.make_directory(outpath)
        plot_syllable(syllable_lists[s], outpath)
Beispiel #16
0
def cal_lf0(config):

    base_path = config['base_path']
    label_path = config['label_path']
    name = config['name']
    outfilepath = config['outfilepath']
    var_path = config['var_path']
    syllable_base_path = config['syllable_base_path']
    syllable_var_path = config['syllable_var_path']

    #----------Syllable level--------#

    dur_list, names = PoGUtility.gen_dur_and_name_list(label_path, name)
    # print dur_list
    # print names

    syl_mean = np.load('{}/mean.npy'.format(syllable_base_path))

    # syl_mean, cccc = gen_mean_and_cov_of_dct_fake(names)

    syl_cov = np.load('{}/cov.npy'.format(syllable_base_path))

    print syl_cov

    var = np.load('{}'.format(syllable_var_path))
    vv = []
    for i, v in enumerate(var):
        vv.append(v[i])
    syl_var = np.array(vv)

    o = []
    for data_dct, dur in zip(syl_mean, dur_list):
        i_dct = PoGUtility.generate_inverse_DCT(data_dct, dur)
        # print i_dct
        o = o + i_dct

    o = np.array(o)
    o[o < 3] = np.nan
    print o.shape

    org = Utility.read_lf0_into_ascii(
        '/work/w2/decha/Data/GPR_speccom_data/data_before_remove_silence/lf0/tsc/sd/j/{}.lf0'
        .format(name))

    org[org < 0] = np.nan

    diff = len(org) - len(o)

    plt.plot(np.arange(len(o)) + diff, o, label='syn')
    plt.plot(range(len(org)), org, label='org')

    plt.legend()
    plt.savefig('./{}_dct_16_test.eps'.format(name))

    sys.exit()

    pass
def hmm_frame_to_mono_label(dur_path, mono_path, out_path):

    for dur_file in Utility.list_file(dur_path):

        if not 'dur' in dur_file: continue

        base = Utility.get_basefilename(dur_file)
        # print base

        dur = '{}/{}'.format(dur_path, dur_file)
        # print dur

        dur_list = get_dir_list_HMM(dur)
        # print dur_list

        mono = '{}/{}.lab'.format(mono_path, base)
        mono_list = load_mono(mono)

        out_file = '{}/{}.lab'.format(out_path, base)

        # print len(dur_list), len(mono_list)

        if len(dur_list) != len(mono_list):
            print base

        start = 0

        out = []

        for idx, d in enumerate(dur_list):
            # print dur_list[idx][0], mono_list[idx]

            o = '{}\t{}\t{}'.format(int(start),
                                    int(start + (dur_list[idx][0] * 10000000)),
                                    mono_list[idx])
            out.append(o)

            start = start + (dur_list[idx][0] * 10000000)

        Utility.write_to_file_line_by_line(out_file, out)

        # sys.exit()

    pass
def run_gen_mono(utt_set):

    set_path = '{}/{}/'.format(utterance_path, utt_set)

    set_syllable_base_path = '{}/{}/'.format(syllable_base, utt_set)

    out_set_path = '{}/{}/'.format(output_path, utt_set)
    Utility.make_directory(out_set_path)

    for i in xrange(1, 51):
        utt_file = Utility.yaml_load('{}/tscsd{}{}.utt.yaml'.format(
            set_path, utt_set, Utility.fill_zero(i, 2)))
        # print utt_file

        out_file = '{}/tscsd{}{}.lab'.format(out_set_path, utt_set,
                                             Utility.fill_zero(i, 2))

        stress_list = []
        recursion(utt_file, stress_list)

        syllable_time_label = Utility.read_file_line_by_line(
            '{}/tscsd{}{}.lab'.format(set_syllable_base_path, utt_set,
                                      Utility.fill_zero(i, 2)))
        # print stress_list, len(stress_list)
        # print len(syllable_time_label)
        if len(syllable_time_label) != len(stress_list):
            print utt_set, i
            # print 'Error'
            # sys.exit()

        out = []
        for idx, line in enumerate(syllable_time_label):
            # print line, stress_list[idx]
            o = '{}::{}'.format(
                Utility.trim(line).replace('-', '_').replace('+', '_'),
                stress_list[idx])
            # print o
            out.append(o)

        Utility.write_to_file_line_by_line(out_file, out)

        # sys.exit()

    pass
def find_config(name):

    for line in config_setting:
        spl = Utility.trim(line).split(' ')

        if name == spl[0]:
            print spl
            n_cluster = spl[1]
            n_neighbor = spl[2]
            return (n_cluster, n_neighbor)
def check_result(object_list_path):
    for obj_path in object_list_path:

        syl_object = Utility.load_obj(obj_path)
        syl_list = syl_object.syllables_list

        for syl in syl_list:
            print syl.get_phone_duration()

    pass
Beispiel #21
0
def run_add_and_non_duration(out_base, vowel, d, tone, feature_name, num_dims):

    for n in num_dims:

        print '------------Start-----------'

        print 'Vowel : {}, Delta : {}, Tone : {}'.format(vowel, d, tone)

        base_path = '{}/{}/input_dims_{}/delta-{}_delta-delta-{}/BGP_LVM_Tone_{}/'.format(
            out_base, vowel, n, d[0], d[1], tone)
        syllable_object = '{}/{}/syllable_object_{}_fold/syllable_object_{}_3-fold_'.format(
            object_base, vowel, tone, tone)
        outpath = '{}/{}/input_dims_{}/delta-{}_delta-delta-{}/BGP_LVM_Tone_{}/feed_forword_ann/{}/'.format(
            out_base, vowel, n, d[0], d[1], tone, feature_name)

        print 'ANN outpath : {}'.format(outpath)
        print 'Syllable object : {}'.format(syllable_object)

        Utility.make_directory(outpath)
        run(syllable_object, fold, outpath, feature, [1, 2], d[0], d[1],
            base_path)
        print '------------End-----------'

        print '------------Start-----------'

        print 'Vowel : No_duration_{}, Delta : {}, Tone : {}'.format(
            vowel, d, tone)

        base_path = '{}/{}/input_dims_{}/delta-{}_delta-delta-{}/BGP_LVM_Tone_{}/'.format(
            out_base, 'No_duration_' + vowel, n, d[0], d[1], tone)
        syllable_object = '{}/{}/syllable_object_{}_fold/syllable_object_{}_3-fold_'.format(
            object_base, vowel, tone, tone)
        outpath = '{}/{}/input_dims_{}/delta-{}_delta-delta-{}/BGP_LVM_Tone_{}/feed_forword_ann/{}/'.format(
            out_base, 'No_duration_' + vowel, n, d[0], d[1], tone,
            feature_name)

        print 'ANN outpath : {}'.format(outpath)
        print 'Syllable object : {}'.format(syllable_object)

        Utility.make_directory(outpath)
        run(syllable_object, fold, outpath, feature, [], d[0], d[1], base_path)

        print '------------End-----------'
Beispiel #22
0
def lf0_distortion_syn_is_gpr_format(org_path,syn_path):
    
    lf0_true_list = []
    lf0_pred_list = []
    
    for base in Utility.list_file(org_path) :
        
        if base.startswith('.'):
            continue
        
        # if '12' in base: continue

        # Load Original
        original_file = os.path.join(org_path, base)
        original_vector = numpy.loadtxt(Utility.read_lf0_into_ascii(original_file))
        
        # Load Synthesis
        synthesis_file = '{}/{}.npy'.format(syn_path, Utility.get_basefilename(base) )
        synthesis_vector = numpy.load(synthesis_file)
        synthesis_vector = synthesis_vector.reshape(len(synthesis_vector))

        # print synthesis_vector
# 
        synthesis_vector = np.nan_to_num(synthesis_vector)
        synthesis_vector[ np.where(synthesis_vector<=0.0) ] = UNDEF_VALUE

        # print synthesis_vector

        # sys.exit()

        for lf0_original, lf0_synthesis in zip(original_vector, synthesis_vector):
            if lf0_original == UNDEF_VALUE:
                continue
            if lf0_synthesis == UNDEF_VALUE:
                continue

            lf0_true_list.append(lf0_original)
            lf0_pred_list.append(lf0_synthesis)

    rmse = numpy.sqrt(sklearn.metrics.mean_squared_error(lf0_true_list, lf0_pred_list)) * 1200 / numpy.log(2)
    print('LF0 RMSE: {:f} in {} frames'.format(rmse, len(lf0_true_list)))

    pass
def check_result():

    path = '/home/h1/decha/Dropbox/Inter_speech_2016/Syllable_object/Tonal_object/remove_all_silence_file//vvvsg//syllable_object_01234.pickle'

    obj = Utility.load_obj(path)
    for syl in obj.syllables_list:
        if syl.name_index == 'tscsd_manual_g37_14':
            print syl.name_index, syl.stress_manual

    pass
Beispiel #24
0
def cal_F1(tones, vowel, m, base, types):

    real = np.array( [] )
    predicted = np.array( [] )

    for v in vowel:
        for t in tones:
            path = '{}/{}/input_dims_10/delta-True_delta-delta-True/BGP_LVM_Tone_{}/feed_forword_ann/{}/'.format(base, v, t, m)
            for fold in range(3):
                real_obj = Utility.load_obj( '{}/{}_data_fold_{}_real.npy'.format(path, types, fold) )
                real = np.append(real, real_obj)

                predicted_obj = Utility.load_obj( '{}/{}_data_fold_{}_predicted.npy'.format(path, types,fold) )
                predicted = np.append(predicted, predicted_obj)

    print tones, m
    print f1_score(real, predicted, average=None) 

    pass
def build_db(db_file):

    global db

    d = Utility.load_obj(db_file)
    db = db + d

    # print len(db)

    pass
def get_prediction_result(base_method_path, db_file):

    pred = Utility.load_obj('{}/prediction_labels.pkl'.format(base_method_path))
    real = run_data_processor(db_file)

    acc, f1 = cal_result(real, pred)

    return (pred, real, acc, f1)

    pass
def gen_data(db_file, name_out_path):

    out = []

    debug = False

    for syl in Utility.load_obj(db_file):

        # debug = False
        # if syl['id'] == 'tscsdj08_19':
        #     debug = True

        y = Syllable.get_normalize_gradient_interpolation(syl['raw_lf0'],
                                                          50,
                                                          syl['dur'],
                                                          debug=debug)
        # print y, len(y)

        if y is None:
            print syl['id']
            # print syl['raw_lf0']
            # sys.exit()
            continue

        syl['TF'] = dict()

        inte_data = dict()
        inte_data['data'] = y
        inte_data[
            'description'] = 'Raw lf0 (first 50 + delta + delta-delta) + duration in frame unit (the last one). Unvoice frames are interpolated by 3-rd polynomial '

        syl['TF']['intepolate151'] = inte_data

        # print syl

        out.append(syl)

        # sys.exit(0)

    Utility.save_obj(out, name_out_path)

    pass
    def read_mos(data_folder):

        all_score = dict()

        c_all_score = dict()

        count = dict()

        for score_file in Utility.list_file(data_folder):
            if score_file.startswith('.'): continue
            if not 'mos' in score_file: continue

            # print score_file
            score = Utility.load_json('{}/{}'.format(data_folder, score_file))

            for s in score:
                # print score[s]
                for k in score[s]:
                    # print k, score[s][k]
                    if k in all_score:
                        all_score[k] = all_score[k] + float(score[s][k])

                        # if k=='01_GPR_single_level/':
                        #     print float(score[s][k]), all_score[k]

                        count[k] = count[k] + 1
                        c_all_score[k].append(float(score[s][k]))
                    else:
                        all_score[k] = float(score[s][k])
                        count[k] = 1
                        c_all_score[k] = []
                        c_all_score[k].append(float(score[s][k]))

        print '-----------------------------------------'

        for k in all_score:
            print k, all_score[k]
            print count[k]

            print all_score[k] / count[k]
            print 'Mean :', np.average(c_all_score[k]), 'Var :', np.var(
                c_all_score[k])
def draw_figure(db_name, name_out_path):
    db = Utility.load_obj(db_name)

    for syl in db:
        # print syl

        lf0 = syl['raw_lf0']
        durs = syl['dur']
        n = syl['id']

        plot(lf0, durs, n, name_out_path)
Beispiel #30
0
def cal_lf0(config):

    base_path = config['base_path']
    label_path = config['label_path']
    name = config['name']
    outfilepath = config['outfilepath']
    var_path = config['var_path']
    syllable_base_path = config['syllable_base_path']
    syllable_var_path = config['syllable_var_path']
    original = config['original']
    koriyama_gen = config['koriyama_gen']
    figure_path = config['figure_path']
    ph_in_syl_object_path = config['phone_in_syllable_object_path']
    stress = config['stress']
    original_vuv = config['original_vuv']

    p_in_s_file = Utility.load_obj(ph_in_syl_object_path)

    vuv = np.load('{}/class.npy'.format(config['vuv_path']))

    if mask == 'original':
        vuv = original_vuv

    #--------Frame-------#

    lf0_mean = np.load('{}/mean.npy'.format(base_path))
    lf0_cov = np.load('{}/cov.npy'.format(base_path))

    var = np.load('{}'.format(var_path))

    if len(lf0_cov) > len(vuv):
        for i in range(len(lf0_cov) - len(vuv)):
            vuv.append(-1, axis=0)
    elif len(lf0_cov) < len(vuv):
        vuv = vuv[0:len(lf0_cov)]

    lf0_var = np.sum(var, axis=0)

    lf0_mean = np.array([lf0_mean[:, 0], lf0_mean[:, 1], lf0_mean[:, 2]])
    lf0_w = PoGUtility.generate_W_for_GPR_generate_features(len(lf0_cov), vuv)

    frame_B = alpha * PoGUtility.cal_sum_of_mean_part(lf0_var, lf0_w, lf0_cov,
                                                      lf0_mean)
    frame_A = alpha * PoGUtility.cal_sum_of_weight_part(
        lf0_var, lf0_w, lf0_cov)

    L = linalg.cholesky(frame_A, lower=True)
    lf0 = linalg.cho_solve((L, True), frame_B)

    lf0[lf0 < 1] = np.nan

    np.save(outfilepath, lf0)

    pass