def test_single_h5(FLAGS, h5_weights_path): if not os.path.isfile(h5_weights_path): print('%s is not a h5 weights file path' % h5_weights_path) return optimizer = adam(lr=FLAGS.learning_rate, clipnorm=0.001) objective = 'categorical_crossentropy' metrics = ['accuracy'] model = model_fn(FLAGS, objective, optimizer, metrics) load_weights(model, FLAGS.eval_weights_path) img_names, test_data, test_labels = load_test_data(FLAGS) predictions = model.predict(test_data, verbose=0) right_count = 0 error_infos = [] for index, pred in enumerate(predictions): pred_label = np.argmax(pred, axis=0) test_label = test_labels[index] if pred_label == test_label: right_count += 1 else: error_infos.append('%s, %s, %s\n' % (img_names[index], test_label, pred_label)) accuracy = right_count / len(img_names) print('accuracy: %s' % accuracy) result_file_name = os.path.join(os.path.dirname(h5_weights_path), '%s_accuracy.txt' % os.path.basename(h5_weights_path)) with open(result_file_name, 'w') as f: f.write('# predict error files\n') f.write('####################################\n') f.write('file_name, true_label, pred_label\n') f.writelines(error_infos) f.write('####################################\n') f.write('accuracy: %s\n' % accuracy) print('end')
def test_single_h5(FLAGS, h5_weights_path): if not os.path.isfile(h5_weights_path): print('%s is not a h5 weights file path' % h5_weights_path) return optimizer = Nadam(lr=FLAGS.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004) objective = 'categorical_crossentropy' metrics = ['accuracy'] model = model_fn(FLAGS, objective, optimizer, metrics) #model = model_fn_SE_ResNet50(FLAGS, objective, optimizer, metrics) load_weights(model, FLAGS.eval_weights_path) img_names, test_data, test_labels = load_test_data(FLAGS) predictions = model.predict(test_data, verbose=0) right_count = 0 error_infos = [] print("img len :") print(len(img_names)) print("test_data len :") print(len(test_data)) print("test_labels len :") print(len(test_labels)) for index, pred in enumerate(predictions): pred_label = np.argmax(pred, axis=0) + 1 test_label = test_labels[index] if pred_label == test_label: print("{},{},{}".format(img_names[index], test_label, pred_label)) right_count += 1 else: error_infos.append('%s, %s, %s\n' % (img_names[index], test_label, pred_label)) accuracy = right_count / len(img_names) print('accuracy: %s' % accuracy) result_file_name = os.path.join( os.path.dirname(h5_weights_path), '%s_accuracy.txt' % os.path.basename(h5_weights_path)) with open(result_file_name, 'w') as f: f.write('# predict error files\n') f.write('####################################\n') f.write('file_name, true_label, pred_label\n') f.writelines(error_infos) f.write('####################################\n') f.write('accuracy: %s\n' % accuracy) print('end')
def test_single_h5(FLAGS, h5_weights_path): if not os.path.isfile(h5_weights_path): print('%s is not a h5 weights file path' % h5_weights_path) return optimizer = Nadam(lr=FLAGS.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004) objective = 'categorical_crossentropy' metrics = ['accuracy'] model = model_fn(FLAGS, objective, optimizer, metrics) load_weights(model, FLAGS.eval_weights_path) img_names, test_data = load_test_data(FLAGS) predictions = model.predict(test_data, verbose=0) test_labels = [] for index, pred in enumerate(predictions): pred_label = np.argmax(pred, axis=0) test_labels.append(pred_label + 1) img_names = [x.split('/')[-1] for x in img_names] df = pd.DataFrame({"FileName": img_names, "type": test_labels}) df.to_csv('result.csv', index=0)
def test_batch_h5(FLAGS): """ test all the h5 weights files in the model_dir """ optimizer = adam(lr=FLAGS.learning_rate, clipnorm=0.001) objective = 'categorical_crossentropy' metrics = ['accuracy'] model = model_fn(FLAGS, objective, optimizer, metrics) img_names, test_data, test_labels = load_test_data(FLAGS) file_paths = mox.file.glob(os.path.join(FLAGS.eval_weights_path, '*.h5')) for h5_weights_path in file_paths: model = load_weights(model, h5_weights_path) predictions = model.predict(test_data, verbose=0) right_count = 0 error_infos = [] for index, pred in enumerate(predictions): pred_label = np.argmax(pred, axis=0) test_label = test_labels[index] if pred_label == test_label: right_count += 1 else: error_infos.append('%s, %s, %s\n' % (img_names[index], test_label, pred_label)) accuracy = float(right_count) / len(img_names) print('accuracy: %s' % accuracy) result_file_name = os.path.join(os.path.dirname(h5_weights_path), '%s_accuracy.txt' % os.path.basename(h5_weights_path)) with mox.file.File(result_file_name, 'w') as f: f.write('# predict error files\n') f.write('####################################\n') f.write('file_name, true_label, pred_label\n') f.writelines(error_infos) f.write('####################################\n') f.write('accuracy: %s\n' % accuracy) print('accuracy result file saved as %s' % result_file_name) print('end')