def trans_audio_2_wav(audio_path): audio_format = audio_path.split('.')[-1] song = AudioSegment.from_file(audio_path, audio_format) song = song.set_channels(1) out_path = audio_path.replace('Data/', 'Data/wav/') out_path = out_path.replace(audio_format, 'wav') if_no_create_it(out_path) song.export(out_path, 'wav') return 0
def build_cnn_model(onDataset='dataset.h5', featureX='X', targetY='Y', sub_combine_list=[]): onDataset_name = onDataset.split('/')[-1].split('.')[0] X_train, X_test, Y_train, Y_test, X_val, Y_val = load_train_test_data(onDataset, featureX, targetY) print 'len Xtrain=================>>>>>>>>>>>>>>>>>>', len(X_train) model_save_path = 'models/cnn_' + onDataset_name + '_model.json' model_weights_save_path = model_save_path.replace('.json', '.h5') print 'weight:', model_weights_save_path if LOAD_MODEL_FLAG and os.path.isfile(model_save_path) and os.path.isfile(model_weights_save_path): print model_save_path model = load_model(model_save_path) model = load_model_weights(model_weights_save_path, model) else: print 'create model' model = Sequential() #parm # nb_filter # nb_row # nb_col nb_filter_1, nb_row_1, nb_col_1, drop_rate = 63, 1, 10, 0.2 model.add(Conv2D(nb_filter_1, nb_row_1, nb_col_1, input_shape=(1, 2582, 63), activation='relu')) # shapes: [?,nb_row_1,2582+1-nb_col_1,nb_filter_1] model.add(Dropout(drop_rate)) model.add(MaxPooling2D(pool_size=(1, 20))) # shapes: [?,1,(2582+1-nb_col_1+1)/2,nb_filter_1] model.add(Dropout(drop_rate)) model.add(Conv2D(63, 1, 8, activation='relu')) model.add(Dropout(drop_rate)) model.add(MaxPooling2D(pool_size=(1, 20))) model.add(Dropout(drop_rate)) model.add(Flatten()) model.add(Dense(200, activation='relu')) model.add(Dropout(drop_rate)) model.add(Dense(50, activation='relu')) model.add(Dropout(drop_rate)) model.add(Dense(20, init='normal', activation='sigmoid')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy', 'precision', 'recall', 'fmeasure']) # accuracy mae fmeasure precision recall # train model ... print "train..." earlyStopping = EarlyStopping(monitor='val_acc',patience=100) print numpy.shape(X_test) model.fit(X_train, Y_train, verbose=2, shuffle=True, callbacks=[earlyStopping], nb_epoch=EPOCH, validation_data=(X_val, Y_val), batch_size=BATCH_SIZE) if_no_create_it(model_save_path) save_model(model, model_save_path) if_no_create_it(model_weights_save_path) save_model_weights(model, model_weights_save_path) loss_and_metrics = model.evaluate(X_test, Y_test, verbose=2, batch_size=BATCH_SIZE) print '===============' print 'loss_metrics: ', loss_and_metrics return loss_and_metrics[1]
def build_lstm_model(onDataset='dataset.h5', featureX='X', targetY='Y'): onDataset_name = onDataset.split('/')[-1].split('.')[0] X_train, X_test, Y_train, Y_test, X_val, Y_val = load_train_test_data( onDataset, featureX, targetY) print 'len Xtrain=================>>>>>>>>>>>>>>>>>>', len(X_train) INPUT_DIM = len(X_train[0][0]) FEATRE_DIM = INPUT_DIM model_save_path = 'models/lstm_' + featureX + '_' + onDataset_name + '_model.json' if_no_create_it(model_save_path) model_weights_save_path = model_save_path.replace('.json', '.h5') if_no_create_it(model_weights_save_path) print 'weight:', model_weights_save_path if LOAD_MODEL_FLAG and os.path.isfile(model_save_path) and os.path.isfile( model_weights_save_path): print model_save_path model = load_model(model_save_path) model = load_model_weights(model_weights_save_path, model) else: # create model model = Sequential() model.add(LSTM(500, input_shape=(TIME_SIZE, FEATRE_DIM))) model.add(Dropout(.5)) model.add(Dense(3, init='normal', activation='softmax')) # compile model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy', 'precision', 'recall', 'fmeasure' ]) # accuracy mae fmeasure precision recall # train model ... print "train..." earlyStopping = EarlyStopping(monitor='val_loss', patience=5, verbose=0, min_delta=0.02) # model.fit(X_train, Y_train, validation_split=0.2, verbose=2, callbacks=[earlyStopping], nb_epoch=EPOCH, # batch_size=BATCH_SIZE) model.fit(X_train, Y_train, verbose=2, shuffle=True, callbacks=[earlyStopping], nb_epoch=EPOCH, validation_data=(X_val, Y_val)) save_model(model, model_save_path) save_model_weights(model, model_weights_save_path) loss_and_metrics = model.evaluate(X_test, Y_test, verbose=0) print '===============' print 'loss_metrics: ', loss_and_metrics return loss_and_metrics[1]
def main(): model_path = 'kerasModel/dnn.model' if_no_create_it(model_path) retrain = 1 if retrain != 1 and os.path.isfile(model_path): # load json and create model json_file = open(model_path, 'r') loaded_model_json = json_file.read() json_file.close() model = model_from_json(loaded_model_json) # load weights into new model model.load_weights(model_path.replace('.model', '.weights')) print("Loaded model from disk") else: model = create_baseline() print model.summary() callbacks = [EarlyStopping(monitor='val_loss', patience=2, verbose=0)] model.fit(trainX, trainY, batch_size=100, nb_epoch=10, validation_data=(validX, validY), callbacks=callbacks, verbose=1) if retrain == 1 or not os.path.isfile(model_path): # serialize model to JSON model_json = model.to_json() with open(model_path, "w") as json_file: json_file.write(model_json) # serialize weights to HDF5 model.save_weights(model_path.replace('.model', '.weights')) print("Saved model to disk") pre_testY = model.predict_classes(testX) print '\n acc: ', get_acc(pre_testY, testY) return 0