def predict(path): y_true, inputs, files = get_inputs_and_trues(path) if config.model == config.MODEL_VGG16: if args.store_activations: import train_relativity util.save_activations(model, inputs, files, 'fc2') train_relativity.train_relativity() if args.check_relativity: af = util.get_activation_function(model, 'fc2') acts = util.get_activations(af, [inputs[0]]) relativity_clf = joblib.load(config.relativity_model_path) predicted_relativity = relativity_clf.predict(acts)[0] print(relativity_clf.__classes[predicted_relativity]) if not args.store_activations: out = model.predict(np.array(inputs)) predictions = np.argmax(out, axis=1) for i, p in enumerate(predictions): recognized_class = classes_in_keras_format.keys()[ classes_in_keras_format.values().index(p)] print '{} ({}) ---> {} ({})'.format(y_true[i], files[i].split(os.sep)[-2], p, recognized_class) if args.accuracy: print 'accuracy {}'.format( accuracy_score(y_true=y_true, y_pred=predictions))
def predict(path): files = get_files(path) n_files = len(files) print('Found {} files'.format(n_files)) if args.novelty_detection: activation_function = util.get_activation_function(model, model_module.noveltyDetectionLayerName) novelty_detection_clf = joblib.load(config.get_novelty_detection_model_path()) y_trues = [] predictions = np.zeros(shape=(n_files,)) nb_batch = int(np.ceil(n_files / float(args.batch_size))) for n in range(0, nb_batch): print('Batch {}'.format(n)) n_from = n * args.batch_size n_to = min(args.batch_size * (n + 1), n_files) y_true, inputs = get_inputs_and_trues(files[n_from:n_to]) y_trues += y_true if args.store_activations: util.save_activations(model, inputs, files[n_from:n_to], model_module.noveltyDetectionLayerName, n) if args.novelty_detection: activations = util.get_activations(activation_function, [inputs[0]]) nd_preds = novelty_detection_clf.predict(activations)[0] print(novelty_detection_clf.__classes[nd_preds]) if not args.store_activations: # Warm up the model if n == 0: print('Warming up the model') start = time.clock() model.predict(np.array([inputs[0]])) end = time.clock() print('Warming up took {} s'.format(end - start)) # Make predictions start = time.clock() out = model.predict(np.array(inputs)) end = time.clock() predictions[n_from:n_to] = np.argmax(out, axis=1) print('Prediction on batch {} took: {}'.format(n, end - start)) if not args.store_activations: for i, p in enumerate(predictions): recognized_class = list(classes_in_keras_format.keys())[list(classes_in_keras_format.values()).index(p)] print('| should be {} ({}) -> predicted as {} ({})'.format(y_trues[i], files[i].split(os.sep)[-2], p, recognized_class)) if args.accuracy: print('Accuracy {}'.format(accuracy_score(y_true=y_trues, y_pred=predictions))) if args.plot_confusion_matrix: cnf_matrix = confusion_matrix(y_trues, predictions) util.plot_confusion_matrix(cnf_matrix, config.classes, normalize=False) util.plot_confusion_matrix(cnf_matrix, config.classes, normalize=True)
def predict(path): files = get_files(path) n_files = len(files) print('Found {} files'.format(n_files)) if args.novelty_detection: activation_function = util.get_activation_function( model, model_module.noveltyDetectionLayerName) novelty_detection_clf = joblib.load( config.get_novelty_detection_model_path()) y_trues = [] predictions = np.zeros(shape=(n_files, )) nb_batch = int(np.ceil(n_files / float(args.batch_size))) for n in range(0, nb_batch): print('Batch {}'.format(n)) n_from = n * args.batch_size n_to = min(args.batch_size * (n + 1), n_files) y_true, inputs = get_inputs_and_trues(files[n_from:n_to]) y_trues += y_true if args.store_activations: util.save_activations(model, inputs, files[n_from:n_to], model_module.noveltyDetectionLayerName, n) if args.novelty_detection: activations = util.get_activations(activation_function, [inputs[0]]) nd_preds = novelty_detection_clf.predict(activations)[0] print(novelty_detection_clf.__classes[nd_preds]) if not args.store_activations: # Warm up the model if n == 0: print('Warming up the model') start = time.clock() model.predict(np.array([inputs[0]])) end = time.clock() print('Warming up took {} s'.format(end - start)) # Make predictions start = time.clock() out = model.predict(np.array(inputs)) end = time.clock() predictions[n_from:n_to] = np.argmax(out, axis=1) print('Prediction on batch {} took: {}'.format(n, end - start)) freq = {} for p in predictions: if (str(p) in freq): freq[str(p)] += 1 else: freq[str(p)] = 0 print(freq) """
if util.get_keras_backend_name() != 'tensorflow': input_shape = ( 1, 3, ) + model_module.img_size else: input_shape = (1, ) + model_module.img_size + (3, ) dummpy_img = np.ones(input_shape) dummpy_img = preprocess_input(dummpy_img) model.predict(dummpy_img) end = time.clock() print('Warming up took {} s'.format(end - start)) print('Trying to load a Novelty Detector') try: af = util.get_activation_function(model, model_module.noveltyDetectionLayerName) print('Activation function is loaded') novelty_detection_clf = joblib.load( config.get_novelty_detection_model_path()) print('Novelty Detection classifier is loaded') except Exception as e: print('Error on loading Novelty Detection classifier', e) FILE_DOES_NOT_EXIST = '-1' UNKNOWN_ERROR = '-2' def handle(clientsocket): while 1: buf = clientsocket.recv(config.buffer_size)
def predict( dir, iter_index=0, augment_times=1, print_detail=True, ): """ 对目标数据集进行预测 :param dir: 待测图片数据文件夹 :param augment_times: 数据增强倍数 :param print_detail: 是否打印预测详细信息 :return: 预测数据 """ files = get_files(dir) n_files = len(files) class_label = dir.split(os.sep)[-2] print('Iter {0}, Found {1} files, class is {2}:{3}'.format( iter_index, n_files, class_label, labels_en[int(class_label)])) if args.novelty_detection: activation_function = util.get_activation_function( model, model_module.noveltyDetectionLayerName) novelty_detection_clf = joblib.load( config.get_novelty_detection_model_path()) y_trues = [] predictions_cat = np.zeros(shape=(n_files, )) predictions_pro = np.zeros(shape=(n_files, )) nb_batch = int(np.ceil(n_files / float(args.batch_size))) for n in range(0, nb_batch): if print_detail: print('Batch {}'.format(n)) n_from = n * args.batch_size n_to = min(args.batch_size * (n + 1), n_files) y_true, inputs = get_inputs_and_trues(files[n_from:n_to]) y_trues += y_true if args.store_activations: util.save_activations(model, inputs, files[n_from:n_to], model_module.noveltyDetectionLayerName, n) if args.novelty_detection: activations = util.get_activations(activation_function, [inputs[0]]) nd_preds = novelty_detection_clf.predict(activations)[0] if print_detail: print(novelty_detection_clf.__classes[nd_preds]) if not args.store_activations: # Warm up the model if n == 0: if print_detail: print('Warming up the model') start = time.clock() model.predict(np.array([inputs[0]])) end = time.clock() if print_detail: print('Warming up took {} s'.format(end - start)) # Make predictions # start = time.clock() # out = model.predict(np.array(inputs)) # end = time.clock() augmented_predictions = get_augment_predictions( inputs, augment_times) predictions_cat[n_from:n_to] = augmented_predictions["category"] predictions_pro[n_from:n_to] = augmented_predictions["probability"] if print_detail: print('Prediction on batch {} took: {} s'.format( n, end - start)) predict_stats = {} predict_stats["detail"] = [] predict_stats["summary"] = {"total": 0, "trues": 0, "falses": 0, "acc": 0} if not args.store_activations: for i, p in enumerate(predictions_cat): recognized_class = list(classes_in_keras_format.keys())[list( classes_in_keras_format.values()).index(p)] if print_detail: print( '[{}:{}] should be {} ({}:{}) -> predicted as {} ({}:{}), probability:{}' .format("%02d" % i, files[i].split(os.sep)[-1], y_trues[i], files[i].split(os.sep)[-2], labels_en[int(files[i].split(os.sep)[-2])], p, recognized_class, labels_en[int(recognized_class)], predictions_pro[i])) predict_stats["detail"].append( [y_trues[i], files[i].split(os.sep)[-2], p, recognized_class]) predict_stats["summary"]["total"] += 1 if (files[i].split(os.sep)[-2] == recognized_class + ""): predict_stats["summary"]["trues"] += 1 else: predict_stats["summary"]["falses"] += 1 predict_stats["summary"]["acc"] = float( predict_stats["summary"] ["trues"]) / predict_stats["summary"]["total"] if args.accuracy: if print_detail: print('Accuracy {}'.format( accuracy_score(y_true=y_trues, y_pred=predictions_cat))) if args.plot_confusion_matrix: cnf_matrix = confusion_matrix(y_trues, predictions_cat) util.plot_confusion_matrix(cnf_matrix, config.classes, normalize=False) util.plot_confusion_matrix(cnf_matrix, config.classes, normalize=True) print(predict_stats["summary"]) return predict_stats
def predict(path): files = get_files(path) #get files' path list n_files = len(files) #get the number of picture print('Found {} files'.format(n_files)) #print information if args.novelty_detection: #do not execute activation_function = util.get_activation_function( model, model_module.noveltyDetectionLayerName) novelty_detection_clf = joblib.load( config.get_novelty_detection_model_path()) y_trues = [] predictions = np.zeros(shape=(n_files, )) #creat a matrix: n_files*1 nb_batch = int(np.ceil( n_files / float(args.batch_size))) #ceil, count the number of batch for n in range(0, nb_batch): print('Batch {}'.format(n)) #print informatin n_from = n * args.batch_size #the number of beginning in current batch n_to = min(args.batch_size * (n + 1), n_files) #the number of end in current batch y_true, inputs = get_inputs_and_trues(files[n_from:n_to]) y_trues += y_true if args.store_activations: #do not execute util.save_activations(model, inputs, files[n_from:n_to], model_module.noveltyDetectionLayerName, n) if args.novelty_detection: #do not execute activations = util.get_activations(activation_function, [inputs[0]]) nd_preds = novelty_detection_clf.predict(activations)[0] print(novelty_detection_clf.__classes[nd_preds]) if not args.store_activations: # Warm up the model if n == 0: print('Warming up the model') #print execution information start = time.clock() #record start time model.predict(np.array([inputs[0]])) end = time.clock() #record end time print('Warming up took {} s'.format( end - start)) #print execution time # Make predictions start = time.clock() #record start time out = model.predict(np.array(inputs)) #predict! end = time.clock() #record end time predictions[n_from:n_to] = np.argmax( out, axis=1 ) #return the index of the maximum value of specified dimention print('Prediction on batch {} took: {}'.format( n, end - start)) #print execution time if not args.store_activations: for i, p in enumerate(predictions): recognized_class = list(classes_in_keras_format.keys())[list( classes_in_keras_format.values()).index(p)] print('| should be {} ({}) -> predicted as {} ({})'.format( y_trues[i], files[i].split(os.sep)[-2], p, recognized_class)) if args.accuracy: #do not execute print('Accuracy {}'.format( accuracy_score(y_true=y_trues, y_pred=predictions))) if args.plot_confusion_matrix: #do not execute cnf_matrix = confusion_matrix(y_trues, predictions) util.plot_confusion_matrix(cnf_matrix, config.classes, normalize=False) util.plot_confusion_matrix(cnf_matrix, config.classes, normalize=True)
from sklearn.externals import joblib parser = argparse.ArgumentParser() parser.add_argument('--model', type=str, default=config.MODEL_VGG16, help='Base model architecture') args = parser.parse_args() if args.model: config.model = args.model model_module = util.get_model_module() model = model_module.load_trained() print 'Model loaded' try: print 'Loading activation function' af = util.get_activation_function(model, model_module.RELATIVITY_LAYER) print 'Loading relativity classifier' relativity_clf = joblib.load(config.get_relativity_model_path()) except Exception as e: print e FILE_DOES_NOT_EXIST = '-1' UNKNOWN_ERROR = '-2' def handle(clientsocket): while 1: buf = clientsocket.recv(config.buffer_size) if buf == 'exit': return # client terminated connection
def predict(path): files = get_files(path) n_files = len(files) print('Found {} files'.format(n_files)) if args.novelty_detection: activation_function = util.get_activation_function( model, model_module.noveltyDetectionLayerName) novelty_detection_clf = joblib.load( config.get_novelty_detection_model_path()) range = 15 img_hard_h = 80 img_hard_w = 80 x_count = 0 y_count = 0 stride = 5 prob_map = np.zeros([img_hard_h // stride, img_hard_w // stride]) class_map = np.zeros([img_hard_h // stride, img_hard_w // stride], dtype=np.uint8) n = 0 class_index = 3 image_index = 1 y_true, inputs = get_inputs_and_trues(["data/sorted/test/" + str(class_index) + \ "/" + str(image_index) + ".png"]) for cx in np.arange(0, img_hard_h-1, stride): for cy in np.arange(0, img_hard_w, stride): if not args.store_activations: # Warm up the model if n == 0: print('Warming up the model') start = time.clock() model.predict(np.array([inputs[0]])) end = time.clock() print('Warming up took {} s'.format(end - start)) n = 1 cut = inputs[0][max(cx - range, 0): min(cx + range, 80), max(cy - range, 0): min(cy + range, 80), :] cut = gaussian_filter(cut, sigma=3) inputs[0][max(cx - range, 0): min(cx + range, 80), max(cy - range, 0): min(cy + range, 80), :] = cut # Make predictions start = time.clock() out = model.predict(np.array(inputs)) end = time.clock() pred_index = np.argmax(out, axis=1) print("<<<<<<<<<<<<<< result >>>>>>>>>>>>>>") print(out) print("predicted label is " + str(pred_index)) print("====================================") prob_map[x_count, y_count] = out[0][class_index] class_map[x_count, y_count] = pred_index y_count = y_count + 1 y_count = 0 x_count = x_count + 1 x_count = 0 cmap = plt.cm.rainbow norm = BoundaryNorm(np.arange(-0.5, 4.5, 1), cmap.N) plt.subplot(1, 2, 1) plt.matshow(prob_map, fignum=False, cmap=plt.cm.gnuplot) plt.colorbar(fraction=0.046, pad=0.04) plt.subplot(1, 2, 2) plt.matshow(class_map, fignum=False, cmap=cmap,norm=norm) plt.colorbar(fraction=0.046, pad=0.04, ticks=np.linspace(0,3,4)) plt.show()