def xception_multi_period_score_fusion(dataset, config): result = [] for i in range(iteration): pre_list = [] y_test_list = [] for period in ['R1', 'R3', 'R4', 'R5', 'R6']: model = BaseModel.Xception_Model(parallels=4, config=config) model.load_weights('xception_img_{}_itreation-{}-{}.hdf5'.format( dataset, i, period)) file_list = np.loadtxt('{}-{}_file_list.txt'.format( dataset, period)) img_x_list, y_list = utils.data_loader_for_xception_model( file_list=file_list, config=config) x = np.array(img_x_list) y = np.array(y_list) id_map = np.loadtxt(dataset + '_id.txt') for index, d in enumerate(y): for label in id_map: if d == label[0]: y[index] = label[1] y_one_hot = to_categorical(y) test_index = np.load( '{}_iteration_{}_img_{}_xception_test_index.npy'.format( dataset, i, period)) img_x_test = x[test_index] y_test = y_one_hot[test_index] y_test_list.append(y_test) pre = model.predict(img_x_test) pre_list.append(pre) pre_final_arr = np.array(pre_list) pre_final = np.sum(pre_final_arr, axis=2) pre_final_label = [np.argmax(d) for d in pre_final] for i in range(1, len(y_test_list) + 1): if y_test_list[i - 1] != y_test_list[i]: print("The test label of different period should be the same") return -1 y_test_label = [np.argmax(d) for d in y_test_list[0]] performance = get_performance(pre_final_label, y_test_label) result.append(performance) plot_result(result)
def xception_model_training_and_test(img_x_list, y_list, config): x = np.array(img_x_list) y = np.array(y_list) dataset = config['dataset'] if dataset == 'soybean': period = config['period'] else: period = dataset id_map = np.loadtxt(dataset + '_id.txt') for index, d in enumerate(y): for label in id_map: if d == label[0]: y[index] = label[1] y_one_hot = to_categorical(y) lr_adjust = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=1e-6) result = [] for i in range(iteration): index = np.arange(len(y)) # print(index) X_train_index, X_test_index, y_train_index, y_test_index = train_test_split( index, y, test_size=0.3, random_state=i, shuffle=True, stratify=y) print(len(X_train_index)) print(len(X_test_index)) np.save( '{}_iteration_{}_img_{}_xception_train_index.npy'.format( dataset, i, period), X_train_index) np.save( '{}_iteration_{}_img_{}_xception_test_index.npy'.format( dataset, i, period), X_test_index) X_train = x[X_train_index] X_test = x[X_test_index] y_train = y_one_hot[X_train_index] y_test = y_one_hot[X_test_index] save_best_weight = ModelCheckpoint( 'xception_img_{}_itreation-{}-{}.hdf5'.format(dataset, i, period), monitor='val_loss', verbose=1, save_best_only=True, mode='auto', save_weights_only=True) # you can change the parallels to create multi_gpu_model if you have more than one GPU available model = BaseModel.Xception_Model(parallels=1, config=config) # you should set a smaller batch_size if you GPU memory is limited model.fit(X_train, y_train, batch_size=32, epochs=100, validation_split=0.1, callbacks=[lr_adjust, save_best_weight]) K.clear_session() model2 = BaseModel.Xception_Model(parallels=1, config=config) model2.load_weights('xception_img_{}_itreation-{}-{}.hdf5'.format( dataset, i, period)) score = model2.evaluate(X_test, y_test) print(score) pre_final = model2.predict(X_test, batch_size=128) y_test_label = np.array([np.argmax(d) for d in y_test]) y_pre_label = np.array([np.argmax(d) for d in pre_final]) performance = get_performance(y_pre_label, y_test_label) performance['test_loss'] = score[0] performance['test_acc'] = score[1] K.clear_session() result.append(performance) json_str = json.dumps(performance, indent=4) with open( '{}_xception-iteration-{}-{}-result.json'.format( dataset, i, period), 'w') as json_file: json_file.write(json_str) plot_result(result)