def make_train(): #posi_tempo=[] print "start positive music feature extract" posi_mel=[] posi_chroma=[] posi_mfcc=[] hi_res=0 #posi_tempo=[] #nega_tempo=[] with warnings.catch_warnings(): warnings.simplefilter("ignore") for i in tqdm(positive_sample): #tools.make_vec(os.path.join(posi_dir,i),0,(posi_mel,posi_mfcc,posi_chroma)) try: mel,mfcc,chroma = tools.get_feature(os.path.join(posi_dir,i)) mel=[(0,j) for j in mel] mfcc=[(0,j) for j in mfcc] chroma=[(0,j) for j in chroma] posi_mel+=mel posi_mfcc+=mfcc posi_chroma+=chroma tools.make_spec(os.path.join(posi_dir,i),os.path.join(data_dir,"posi_spectro",i[:-3]+"png")) except audioop.error: hi_res+=1 p_dump(posi_mel,"posi_mel") p_dump(posi_mfcc,"posi_mfcc") p_dump(posi_chroma,"posi_chroma") del posi_mel del posi_mfcc del posi_chroma print "start negative music feature extract" nega_mel=[] nega_chroma=[] nega_mfcc=[] for i in tqdm(negative_sample): #tools.make_vec(os.path.join(nega_dir,i),0,(nega_mel,nega_mfcc,nega_chroma)) try: mel,mfcc,chroma=tools.get_feature(os.path.join(nega_dir,i)) mel=[(1,j) for j in mel] mfcc=[(1,j) for j in mfcc] chroma=[(1,j) for j in chroma] nega_mel+=mel nega_mfcc+=mfcc nega_chroma+=chroma tools.make_spec(os.path.join(nega_dir,i),os.path.join(data_dir,"nega_spectro",i[:-3]+"png")) except audioop.error: hi_res+=1 p_dump(nega_mel,"nega_mel") p_dump(nega_mfcc,"nega_mfcc") p_dump(nega_chroma,"nega_chroma") if hi_res: print "{} files are not used.it may be 24bit.".format(hi_res)
def get_features_values_by_sampling(graph): iterations = 1000 features = np.zeros((iterations, 7)) for iteration in range(iterations): g = graph.copy() g = tools.create_random_graph(g) features[iteration, :] = tools.get_feature(g) return features
def get_features_values_by_competition(graph): iterations = run.feature_iterations features = np.zeros((iterations * 1600, 7)) counter = 0 for iteration in range(iterations): g = graph.copy() while len(g.graph['free']) > 1: feature = tools.get_feature(g) features[counter][:] = feature counter += 1 # first player action action_choice1 = np.random.randint(3) if action_choice1 == 0: seed1 = actions.action_degree(g) elif action_choice1 == 1: seed1 = actions.action_weight(g) elif action_choice1 == 2: seed1 = actions.action_blocking(g, 1) # illegal action if seed1 == -1: print("Illegal action") action_choice1 = 0 seed1 = actions.action_degree(g) tools.activate_node(g, seed1, 1) # second player action action_choice2 = np.random.randint(3) if action_choice2 == 0: seed2 = actions.action_degree(g) elif action_choice2 == 1: seed2 = actions.action_weight(g) elif action_choice2 == 2: seed2 = actions.action_blocking(g, 2) # illegal action if seed2 == -1: print("Illegal action") action_choice2 = 0 seed2 = actions.action_degree(g) tools.activate_node(g, seed2, 2) a1, a2 = tools.diffuse(g) for n in a1: tools.activate_node(g, n, 1) for n in a2: tools.activate_node(g, n, 2) return features[0:counter, :]
def param_coord_mapping_Ver2(sampling=200): from get_indices import finished_modify from tools import get_feature from core import G import humanmodifier with open("/home/loaias/Workspace/Python/MakeHuman/log/parts/indices", 'r') as f: indices = pickle.load(f) mapping=[ ("eye_width", ["eyes/l-eye-push1-in|out", "eyes/l-eye-push2-in|out"]), ("eye_height", "eyes/l-eye-height2-min|max"), ("eye_distance", "eyes/l-eye-move-in|out"), ("face_width", "head/head-scale-horiz-less|more"), ("face_height", "chin/chin-height-min|max"), ("nose_width", "nose/nose-scale-horiz-incr|decr"), ("forehead_to_nose", "nose/nose-scale-vert-incr|decr"), ("nose_to_mouth", "mouth/mouth-trans-up|down") ] h = G.app.selectedHuman params = np.linspace(-1, 1, sampling) for el in mapping: coord = [] for param in params: if len(el[1]) == 2: modifier = h.getModifier(el[1][0]) humanmodifier.ModifierAction(modifier, 0, param, finished_modify).do() modifier = h.getModifier(el[1][1]) humanmodifier.ModifierAction(modifier, 0, -param, finished_modify).do() else: modifier = h.getModifier(el[1]) humanmodifier.ModifierAction(modifier, 0, param, finished_modify).do() mesh = h.mesh.__dict__["coord"] coord.append(get_feature(el[0], mesh)) result = [params, coord] np.save("%s/mapping_ver2/%s.npy" % (base_path, el[0]), result) raw_input("Parameter-Coordinate Mapping Complete.")
def main(m_list): mel_model = models.Mel() mfcc_model = models.Mfcc() chroma_model = models.Chroma() spec_model = models.Spectro() law_model = models.Law() serializers.load_hdf5(os.path.join(model_dir, "my_mel.model"), mel_model) serializers.load_hdf5(os.path.join(model_dir, "my_mfcc.model"), mfcc_model) serializers.load_hdf5(os.path.join(model_dir, "my_chroma.model"), chroma_model) serializers.load_hdf5(os.path.join(model_dir, "my_law.model"), law_model) serializers.load_hdf5(os.path.join(model_dir, "my_spec.model"), spec_model) candidate = [] with warnings.catch_warnings(): warnings.simplefilter("ignore") for i in tqdm(m_list): try: p_n = [] #print "begin",i,"({0}/{1})".format(num+1,len(m_list)) mel, mfcc, chroma = tools.get_feature(i) p_n.append(predict_mmc(mel_model, mel).mean(0)) #tools.fprint("mel done ") p_n.append(predict_mmc(mfcc_model, mfcc).mean(0)) #tools.fprint("mfcc done ") p_n.append(predict_mmc(chroma_model, chroma).mean(0)) #tools.fprint("chroma done") p_n.append(predict_law(law_model, i).mean(0)) #tools.fprint("law done ") p_n.append(predict_spec(spec_model, i)) #tools.fprint("spec done ") candidate.append((i, p_n)) #tools.fprint("end \n") except audioop.error: pass return candidate
def main(m_list): mel_model = models.Mel() mfcc_model = models.Mfcc() chroma_model = models.Chroma() spec_model = models.Spectro() law_model = models.Law() serializers.load_hdf5(os.path.join(model_dir,"my_mel.model"), mel_model) serializers.load_hdf5(os.path.join(model_dir,"my_mfcc.model"), mfcc_model) serializers.load_hdf5(os.path.join(model_dir,"my_chroma.model"), chroma_model) serializers.load_hdf5(os.path.join(model_dir,"my_law.model"), law_model) serializers.load_hdf5(os.path.join(model_dir,"my_spec.model"), spec_model) candidate = [] with warnings.catch_warnings(): warnings.simplefilter("ignore") for i in tqdm(m_list): try: p_n=[] #print "begin",i,"({0}/{1})".format(num+1,len(m_list)) mel,mfcc,chroma = tools.get_feature(i) p_n.append(predict_mmc(mel_model,mel).mean(0)) #tools.fprint("mel done ") p_n.append(predict_mmc(mfcc_model,mfcc).mean(0)) #tools.fprint("mfcc done ") p_n.append(predict_mmc(chroma_model,chroma).mean(0)) #tools.fprint("chroma done") p_n.append(predict_law(law_model,i).mean(0)) #tools.fprint("law done ") p_n.append(predict_spec(spec_model,i)) #tools.fprint("spec done ") candidate.append((i,p_n)) #tools.fprint("end \n") except audioop.error: pass return candidate
def param_coord_mapping_Ver3(sampling=20): from get_indices import finished_modify, get_feature from core import G import humanmodifier # with open("/home/loaias/Workspace/Python/MakeHuman/log/parts/indices", 'r') as f: # indices = pickle.load(f) mapping = [ ("eye-out", "eyes/l-eye-push1-in|out"), ("eye-in", "eyes/l-eye-push2-in|out"), ("eye-height", "eyes/l-eye-height2-min|max"), ("eye-distance", "eyes/l-eye-move-in|out"), ("face-width", "head/head-scale-horiz-less|more"), # ("face_height", "chin/chin-height-min|max"), ("face-height", "head/head-scale-vert-more|less"), ("nose-width", "nose/nose-scale-horiz-incr|decr"), ("nose-apex", "nose/nose-scale-vert-incr|decr"), ("mouth-middle", "mouth/mouth-trans-up|down") ] h = G.app.selectedHuman params = np.linspace(-1, 1, sampling) for el in mapping: parameter_feature_pairs = [] for param in params: modifier = h.getModifier(el[1]) humanmodifier.ModifierAction(modifier, 0, param, finished_modify).do() mesh = h.mesh.__dict__["coord"] feature = get_feature(el[0], mesh) parameter_feature_pairs.append((param, feature)) np.save("%s/mapping_ver3/%s.npy" % (base_path, el[0]), parameter_feature_pairs) print("Parameter-Coordinate Mapping Complete.")
def make_train(): #posi_tempo=[] print "start positive music feature extract" posi_mel = [] posi_chroma = [] posi_mfcc = [] hi_res = 0 #posi_tempo=[] #nega_tempo=[] with warnings.catch_warnings(): warnings.simplefilter("ignore") for i in tqdm(positive_sample): #tools.make_vec(os.path.join(posi_dir,i),0,(posi_mel,posi_mfcc,posi_chroma)) try: mel, mfcc, chroma = tools.get_feature(os.path.join( posi_dir, i)) mel = [(0, j) for j in mel] mfcc = [(0, j) for j in mfcc] chroma = [(0, j) for j in chroma] posi_mel += mel posi_mfcc += mfcc posi_chroma += chroma tools.make_spec( os.path.join(posi_dir, i), os.path.join(data_dir, "posi_spectro", i[:-3] + "png")) except audioop.error: hi_res += 1 p_dump(posi_mel, "posi_mel") p_dump(posi_mfcc, "posi_mfcc") p_dump(posi_chroma, "posi_chroma") del posi_mel del posi_mfcc del posi_chroma print "start negative music feature extract" nega_mel = [] nega_chroma = [] nega_mfcc = [] for i in tqdm(negative_sample): #tools.make_vec(os.path.join(nega_dir,i),0,(nega_mel,nega_mfcc,nega_chroma)) try: mel, mfcc, chroma = tools.get_feature(os.path.join( nega_dir, i)) mel = [(1, j) for j in mel] mfcc = [(1, j) for j in mfcc] chroma = [(1, j) for j in chroma] nega_mel += mel nega_mfcc += mfcc nega_chroma += chroma tools.make_spec( os.path.join(nega_dir, i), os.path.join(data_dir, "nega_spectro", i[:-3] + "png")) except audioop.error: hi_res += 1 p_dump(nega_mel, "nega_mel") p_dump(nega_mfcc, "nega_mfcc") p_dump(nega_chroma, "nega_chroma") if hi_res: print "{} files are not used.it may be 24bit.".format(hi_res)
knn = KNeighborsClassifier(n_neighbors=neighbors) knn.fit(train_data, train_label) # predict_labe = knn.predict(test_data) score = knn.score(test_data, test_label) print("精度为: ", score) # tools.tsen_plot("KNeighbor",features,labels) return score if __name__ == "__main__": simi_path = "E:/Program Files/workspace/report_sheng/report_similarity/" features, labels, words, feature_names = tools.get_feature(simi_path) labels_list = [] for i in range(len(labels)): labels_list.append(lable_dict[labels[i]]) word_matrix, word_names = tools.get_count_vect(words) score_dict = {} for num in trange(10, 1137): #使用卡方分布从词频特征抽取特征 new_feature = tools.select_feature(word_matrix.toarray(), labels_list, num) for neibhor in range(3, 32): #多项式贝叶斯分类