def runQueryAnalysis(audioTransName, hashprintTransName, testName): print("[runQueryAnalysis] start analysis of:", \ audioTransName, hashprintTransName, testName) testPath = os.path.join(audioTransName, testName) dbsPath = os.path.join(audioTransName, hashprintTransName) resultDict = dict() for artist in cu.listdir(os.path.join(audioTransName, hashprintTransName)): print("[runQueryAnalysis] start artist:", artist) artistResult = dict() hashprintDB = cu.loadHashprintDB(os.path.join(dbsPath, artist)) for songHashprint in cu.listdir(os.path.join(testPath, artist)): songHashprintPath = os.path.join(testPath, artist, songHashprint) artistResult[songHashprint] = hp.runHashprintQuery(songHashprintPath, hashprintDB) resultDict[artist] = artistResult analysis = ra.ResultAnalyzer(resultDict, audioTransName, hashprintTransName, testName) analysis.saveAll() analysis.saveToCommonCSV()
def runTesting(used_dataset, dataset_code): verbose = False multiplier = 5 classifiers = 4 metric_file = "classifiers_metrics" analysis_file = "results" print("Loading...") time.sleep(10) num_instaces = sc._jsc.sc().getExecutorMemoryStatus().size() - 1 print("Instances online: " + str(num_instaces)) mainTime = time.time() tc.mainTestClassifier(destination_file=metric_file + dataset_code, verbose=verbose, multiplier=multiplier, used_dataset=used_dataset) print("Test eseguiti in " + str(time.time() - mainTime) + " secondi") resa.ResultAnalysis(source_file=metric_file + dataset_code, destination_file=analysis_file + dataset_code, classifiers=classifiers) print("Analisi ultimate. File Results pronto")
def calculater_result(model, testdates, testys): ty = [] pv =[] print len(testdates) for i in range(len(testdates)): pre = bit2num(model.predict(testdates[i])) pv.append(pre) tar = bit2num(testys[i][0]) ty.append(tar) print pre, tar print ResultAnalysis.SenAndSpe(ty, pv)
break logger.info('_count = {},_max_accuray]={} '.format(earlystop._count,earlystop._max_accuray)) #testing logger.info('testing model') del bl_cnn # deletes the existing model bl_cnn = load_model(model_file) score, acc = bl_cnn.evaluate(X_test, y_test, batch_size=50, verbose=1, sample_weight=None) logger.info('max_val_acc :{}'.format(max_val_acc)) logger.info('Test score:{}'.format( score)) logger.info('Test accuracy: {}'.format(acc)) total_test_acc.append(acc) #get predict_y predictions = bl_cnn.predict(X_test) logger.info('predictions[0:10] = {}'.format(predictions[0:10])) predict_y = np.ndarray.flatten(np.argmax(predictions, axis= 1 )) logger.info('predict_y[0:10]={}'.format(predict_y[0:10])) #export result index_word = dict([(kv[1],kv[0])for kv in tdg.word_index.items()]) ResultAnalysis.comp_predict_lable(X = X_test,y=y_test,y_predict = predict_y, index2word=index_word,label2name={0:'negtive',1:'positive'}, file_to_save='./chinese_zi_epoch_{}.txt'.format(k)) logger.info('total_test_acc={}'.format(total_test_acc)) logger.info('mean acc = {}'.format(sum(total_test_acc) / len(total_test_acc))) logger.info('max acc = {}'.format( max(total_test_acc)))
print(X_test.shape[0], 'test samples') # 转换为one_hot类型 #Y_test = np_utils.to_categorical(y_test, nb_classes) Y_test =ones.one_hot_ten(y_test,nb_classes) print('one-hot-test:',Y_test) # 构建模型 model = Sequential() model=load_model('Cnn200.h5') result=model.predict(X_test) print(result) listOne=result[0:28] #listThree=result[28:] oneResult=analysisType.resultType(listOne) #threeResult=analysisType.resultType(listThree) print('oneResult=',oneResult) #print('threeResult=',threeResult) cutPic.cutLoginPicByPicType(oneResult) ##结果分类 # print('total=',len(result)) # # nb_type=2 #辨识图片种类 # num=len(result)/nb_type # count=0