trainX_text = trainX_text.reset_index(drop=True) testX_text = testX_text.reset_index(drop=True) #load text model and predict train_data = trainX_text test_data = testX_text text_results_train = [] for i in range(len(train_data)): result = predict_text(bert_model,train_data[i]) text_results_train.append(result) text_results_test = [] for i in range(len(test_data)): result = predict_text(bert_model,test_data[i]) text_results_test.append(result) # text results # unlist text_results_train_cat = np.concatenate(text_results_train, axis=0) text_results_test_cat = np.concatenate(text_results_test, axis=0 ) text_results_train_cat = split(text_results_train_cat, 2.86,3.59) text_results_test_cat = split(text_results_test_cat, 2.86,3.59)
rev = { v:k for k,v in index_map.items()} ##### audio # convert audio into feature audio = audio2wave(file) audio_X = prepare_data_librosa(audio, features='logmel', scaled=True) stackedX_test = stacked_dataset(all_models, audio_X) audio_pred = audio_logistic.predict(stackedX_test) print('Audio prediction:', [rev[item] for item in audio_pred]) ##### text text_pred = predict_text(bert_model,text) print('Text prediction:', [rev[item] for item in np.array([np.argmax(text_pred)])]) ##### ensemble ensemble_text_test = softmax(text_pred) ensemble_audio_test = audio_logistic.predict_proba(stackedX_test) stack_test = np.dstack((ensemble_text_test, ensemble_audio_test)) stack_test = stack_test.reshape((stack_test.shape[0], stack_test.shape[1]*stack_test.shape[2])) ensemble_pred = final_logistic.predict(stack_test) print('Ensemble prediction:', [rev[item] for item in ensemble_pred]) def cate(emo): if emo == 'hap': i = 'pos'