def run_model_on_corpus_image(checkpoint, imagenum, output_blobs): # This is based on decaf's "imagenet" script: corpus = get_image_corpus() image = corpus.get_all_images_data()[imagenum] - corpus.get_mean() model = get_models()[checkpoint] arr = image.astype(np.float32) return model.predict(data=arr, output_blobs=output_blobs)
def confusion_matrix(checkpoint): stats = get_model_stats_db().get_stats(checkpoint) confusion_matrix = stats.confusion_matrix json_matrix = list(list(float(y) for y in x) for x in confusion_matrix) label_names = get_image_corpus().label_names sample_images = stats.images_by_classification SAMPLE_IMAGE_LIMIT = 9 sample_images = [[x[:SAMPLE_IMAGE_LIMIT] for x in y] for y in sample_images] return jsonify({"confusionmatrix": json_matrix, "labelnames": label_names, "sampleimages": sample_images})
def predict_for_image(checkpoint, imagenum): """ Return predictions for a particular image. """ features = run_model_on_corpus_image(checkpoint, imagenum, ["probs_cudanet_out"]) class_number_probs = enumerate(features["probs_cudanet_out"][0]) corpus = get_image_corpus() class_label_probs = [{"class": corpus.label_names[l], "prob": float(p)} for (l, p) in class_number_probs] return jsonify({"predictions": class_label_probs})
def layer_filters_channels_image_json(checkpoints, layernames, filters, channels, imagenum): corpus = get_image_corpus() image = corpus.get_image(imagenum) arr = np.array(image.getdata()).reshape(1, 32, 32, 3).astype(np.float32) out = select_region_query( get_models(), times=checkpoints, layers=layernames, filters=filters, channels=channels, image=arr ) images = out # images = mapterminals(show_multiple, out) return images
def get_image_from_corpus(image_num): corpus = get_image_corpus() image = corpus.get_image(image_num) scale = int(request.args.get('scale', 1)) if scale != 1: (width, height) = image.size image = image.resize((width * scale, height * scale), Image.NEAREST) png_buffer = StringIO() image.save(png_buffer, format="PNG") png = png_buffer.getvalue() png_buffer.close() return Response(png, mimetype="image/png")
def get_image_from_corpus(image_num): corpus = get_image_corpus() image = corpus.get_image(image_num) scale = int(request.args.get("scale", 1)) if scale != 1: (width, height) = image.size image = image.resize((width * scale, height * scale), Image.NEAREST) png_buffer = StringIO() image.save(png_buffer, format="PNG") png = png_buffer.getvalue() png_buffer.close() return Response(png, mimetype="image/png")
def predict_for_image(checkpoint, imagenum): """ Return predictions for a particular image. """ features = run_model_on_corpus_image(checkpoint, imagenum, ["probs_cudanet_out"]) class_number_probs = enumerate(features["probs_cudanet_out"][0]) corpus = get_image_corpus() class_label_probs = [{ 'class': corpus.label_names[l], 'prob': float(p) } for (l, p) in class_number_probs] return jsonify({'predictions': class_label_probs})
def confusion_matrix(checkpoint): stats = get_model_stats_db().get_stats(checkpoint) confusion_matrix = stats.confusion_matrix json_matrix = list(list(float(y) for y in x) for x in confusion_matrix) label_names = get_image_corpus().label_names sample_images = stats.images_by_classification SAMPLE_IMAGE_LIMIT = 9 sample_images = [[x[:SAMPLE_IMAGE_LIMIT] for x in y] for y in sample_images] return jsonify({ 'confusionmatrix': json_matrix, 'labelnames': label_names, 'sampleimages': sample_images })
def layer_filters_channels_image_json(checkpoints, layernames, filters, channels, imagenum): corpus = get_image_corpus() image = corpus.get_image(imagenum) arr = np.array(image.getdata()).reshape(1, 32, 32, 3).astype(np.float32) out = select_region_query(get_models(), times=checkpoints, layers=layernames, filters=filters, channels=channels, image=arr) images = out #images = mapterminals(show_multiple, out) return images
def image_corpus_query(query): corpus = get_image_corpus() # TODO: limit the number of search results, or paginate them. results = dict(corpus.find_images(query)) return jsonify(results)