def predict(image_path, checkpoint, top_k): model = save_load.load_checkpoint(checkpoint) model.eval() model.class_to_idx = {v: k for k, v in model.class_to_idx.items()} image = Image.open(image_path) processed_img = torch.from_numpy(process_image(image)) processed_img = processed_img.unsqueeze(0).float() if options.gpu_bool: model.to('cuda') processed_img = processed_img.to('cuda') else: model.to('cpu') with torch.no_grad(): output = model.forward(processed_img) x = torch.exp(output) x = x.topk(top_k, dim=1) classes = [model.class_to_idx[v] for v in x[1].cpu().numpy()[0]] probs = list(x[0].cpu().numpy()[0]) species = [cat_to_name[k] for k in classes] return probs, classes, species
def rate_image(img_path, classifier): landmarks, img = process_image(img_path) fv = feature_creator.create_feature_vec(landmarks) #Hotfix for a bug fv.insert(0, 0) rating = classifier.predict([fv])[0] return img, rating
def infer(model_path, image): face_coordinates = face_locations(image, model='cnn') if len(face_coordinates) == 0: print('Could not find any faces in the image') return model = load_model(model_path) processed_images = np.asarray([ process_image(image[y_min:y_max, x_min:x_max]) for y_min, x_max, y_max, x_min in face_coordinates ]) predictions = model.predict(processed_images) return zip(face_coordinates, np.concatenate(predictions).ravel())
def display(filename,model,probs,classes): ''' Display an image along with the top x classes ''' labels = [] for c in classes: labels.append(model.cat_to_name[model.idx_to_labels[c]]) label_pos = np.arange(len(labels)) #truth_label = model.cat_to_name[truth] img = Image.open(filename).convert('RGB') torchimg = from_numpy(process_image(img)) ''' display the image if it is not a console application ''' print("\nThe flower image was classified as : "+labels[0]) print("\nTop classifications are : "+str(labels)) print("\nTop probabilities are : "+str(probs))
def predict(image_path, model, topk=5): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' img=Image.open(image_path).convert('RGB') torchimg=from_numpy(process_image(img)) model.eval() if (model.gpu_request == True): selected_device = device("cuda" if is_available() else "cpu") else: selected_device = "cpu" with no_grad(): torchimg=torchimg.unsqueeze(0) inputs=torchimg.to(selected_device) inputs=inputs.float() logps=model(inputs) ps = exp(logps) (top_p, top_class) = ps.topk(topk) return top_p.squeeze().tolist(),top_class.squeeze().tolist()
def query(window, event, values, params): "Queries image for semantically similar images from the loaded database." "" orig_db_folder = params['orig_db_folder'] file_names = params['file_names'] kd_tree = params['kd_tree'] db_vectors = params['db_vectors'] query_folder = values['-Q FOLDER-'] query_size = values['-Q SIZE-'] file_list = values['-FILE LIST-'] if not all((orig_db_folder, file_names, kd_tree, db_vectors)): return None, None, None if query_folder == '' or query_size == '' or not file_list: return None, None, None query_path = os.path.join(query_folder, file_list[0]) query_size = int(query_size) query_img = preprocessing.process_image(query_path, cropped=False, scaled=False) vector = wavelet_index_search.construct_feature_vector(query_img, level=3) if event == '-F QUERY-': closest = wavelet_index_search.fast_query(vector, kd_tree, query_size) else: closest = wavelet_index_search.slow_query(vector, db_vectors, query_size) indices = [i for (i, _) in closest] get_orig = lambda i: cv2.imread( os.path.join(orig_db_folder, file_names[i]), cv2.IMREAD_COLOR) orig_imgs = list(map(get_orig, indices)) cropped = list(map(preprocessing.crop_centre_square, orig_imgs)) canvas = params['canvas'] fig = params['fig'] if fig is None: matplotlib.use('TkAgg') fig = plot(cropped) canvas = draw_figure(window['-CANVAS-'].TKCanvas, fig) else: plot(cropped, fig) canvas.draw() return closest, canvas, fig
def predict(image_path, model, top_k): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' print('Start loading checkpoint...', end='') model = load_checkpoint(model) print('checkpoint loaded.') model.to('cuda') model.eval() print('Opening image...', end='') pil_image = PIL.Image.open(image_path) print('image opened') processed_image = process_image(pil_image) tensor = torch.from_numpy(processed_image) tensor = tensor.float() tensor.resize_(1, 3, 224, 224) tensor = tensor.to('cuda') with torch.no_grad(): probs, classes = torch.exp(model(tensor)).topk(top_k) probs, classes = probs.to('cpu'), classes.to('cpu') probs, classes = list(probs.numpy()[0]), list(classes.numpy()[0]) return probs, [model.idx_to_class[idx] for idx in classes]
def telemetry(sid, data): # The current steering angle of the car steering_angle = data["steering_angle"] # The current throttle of the car throttle = data["throttle"] # The current speed of the car speed = data["speed"] # The current image from the center camera of the car imgString = data["image"] image = Image.open(BytesIO(base64.b64decode(imgString))) image_array = np.asarray(image) # img = image_array[None, :, :, :] # transform image img = process_image(image_array).reshape(-1, 16, 32, 3) # This model currently assumes that the features of the model are just the images. Feel free to change this. steering_angle = calc_angle(img) # The driving model currently just outputs a constant throttle. Feel free to edit this. throttle = .25 print(steering_angle, throttle) send_control(steering_angle, throttle)
from arguments import make_argparser from output import print_predictions from preprocessing import process_image def load_model(location): return tf.keras.models.load_model( location, custom_objects={"KerasLayer": hub.KerasLayer}) def predict(processed_img, model, top_k): image = np.expand_dims(processed_img, axis=0) prediction = model.predict(image) return tf.math.top_k(prediction, k=top_k, sorted=True) if __name__ == "__main__": parser = make_argparser() args = parser.parse_args() pil_img = Image.open(args.input) numpy_img = np.asarray(pil_img) processed_test_image = process_image(numpy_img) loaded_model = load_model(args.model) probs, classes = predict(processed_test_image, loaded_model, args.top_k) print_predictions(probs, classes)
'pretrained_models/proposal_final', 'models/vgg16_proposal_model.npy') detection_weights = extract_caffe_weights( 'pretrained_models/detection_test.prototxt', 'pretrained_models/detection_final', 'models/vgg16_detection_model.npy') proposal_layers = load_proposal_model(proposal_weights) detection_layers = load_detection_model(detection_weights) sess = tf.Session() sess.run(tf.initialize_all_variables()) for name in image_name_list: im = cv2.imread(name) im_prec, resize_scale = process_image(im, config.proposal) deltas, scores, feats = proposal_test_model(sess, im_prec, proposal_layers) pred_boxes, scores = generate_proposal_boxes(feats.shape[1:3], deltas, scores, im.shape[0:2], im_prec.shape[0:2], config.proposal) pred_boxes = boxes_filter(pred_boxes, scores, config.proposal) deltas, scores = detection_test_model(sess, feats, pred_boxes, detection_layers, resize_scale) boxes = generate_detection_boxes(deltas, pred_boxes, im.shape[0:2]) pred_boxes, pred_scores, labels = nms_filter(boxes, scores, config.detection) plot_image_with_bbox(im[:, :, ::-1], pred_boxes, pred_scores, labels, classes)
from model_functions import build_model, load_checkpoint, predict import argparse parser = argparse.ArgumentParser( description='use trained model to predict class of a image ') parser.add_argument('path_to_image', help='the path of image') parser.add_argument('model_directory', help='the directory of model that has been trained') parser.add_argument('--top_K', type=int, default=3, help='return the top K most possible classes') parser.add_argument('--category_names', default='', help='output the actual class name') parser.add_argument('--gpu', action='store_true', default=False, help='the training mode') args = parser.parse_args() model, other_info = load_checkpoint(args.model_directory) image = process_image(args.path_to_image) classes, probs = predict(image, model, topk=args.top_K, category_name=args.category_names, gpu_truth=args.gpu, other_info=other_info) print(classes) print(probs)