def get_classification(self, img): # Bounding Box Detection. try: with self.detection_graph.as_default(): category_index = self.labelify() # Expand dimension since the model expects image to have shape [1, None, None, 3]. img_expanded = np.expand_dims(img, axis=0) (boxes, scores, classes, num) = self.sess.run( [self.d_boxes, self.d_scores, self.d_classes, self.num_d], feed_dict={self.image_tensor: img_expanded}) vis_util.visualize_boxes_and_labels_on_image_array( img, np.squeeze(boxes), np.squeeze(classes).astype(np.int32), np.squeeze(scores), category_index, use_normalized_coordinates=True, line_thickness=8) boxes = self.detection_graph.get_tensor_by_name( 'detection_boxes:0') scores = self.detection_graph.get_tensor_by_name( 'detection_scores:0') classes = self.detection_graph.get_tensor_by_name( 'detection_classes:0') num_detections = self.detection_graph.get_tensor_by_name( 'num_detections:0') return img except Exception as e: print("Exception during classification", e) return
def create(self): self._ratio = 28 self._input_path = '../dataset/short/00001_00_0.1s.ARW' self._pattern = 0 def pack_raw(raw): # pack Bayer image to 4 channels im = raw.raw_image_visible.astype(np.float32) im = np.maximum(im - 512, 0) / (16383 - 512) # subtract the black level im = np.expand_dims(im, axis=2) img_shape = im.shape H = img_shape[0] W = img_shape[1] out = np.concatenate((im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :], im[1:H:2, 1:W:2, :], im[1:H:2, 0:W:2, :]), axis=2) return out # 读入图像 raw = rawpy.imread(self._input_path) input_full = np.expand_dims(pack_raw(raw), axis=0) * self._ratio input_full = np.minimum(input_full, 1.0) self._pattern = input_full return self._pattern
def load_raw(img_path): ratio = 300 raw = rawpy.imread(img_path) input_full = np.expand_dims(pack_raw(raw), axis=0) * ratio input_full = np.minimum(input_full, 1.0) return input_full
def load_raw2(in_name, input_dir, ratio): img_path = os.path.join(input_dir, in_name) raw = rawpy.imread(img_path) input_full = np.expand_dims(pack_raw(raw), axis=0) * ratio input_full = np.minimum(input_full, 1.0) # # keras 中是CHWN # channel_fist_img = covnert_to_channel_first(input_full) return input_full
def load_raw(in_name, gt_dir, input_dir): ratio, img_path, = get_ratio(in_name, gt_dir, input_dir) raw = rawpy.imread(img_path) input_full = np.expand_dims(pack_raw(raw), axis=0) * ratio input_full = np.minimum(input_full, 1.0) # # keras 中是CHWN # channel_fist_img = covnert_to_channel_first(input_full) return input_full
def pack_raw(raw): # pack Bayer image to 4 channels im = raw.raw_image_visible.astype(np.float32) im = np.maximum(im - 512, 0) / (16383 - 512) # subtract the black level im = np.expand_dims(im, axis=2) img_shape = im.shape H = img_shape[0] W = img_shape[1] out = np.concatenate((im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :], im[1:H:2, 1:W:2, :], im[1:H:2, 0:W:2, :]), axis=2) return out
def unit_vector(data, axis=None, out=None): """Return ndarray normalized by length, i.e. Euclidean norm, along axis. >>> v0 = np.random.random(3) >>> v1 = unit_vector(v0) >>> np.allclose(v1, v0 / np.linalg.norm(v0)) True >>> v0 = np.random.rand(5, 4, 3) >>> v1 = unit_vector(v0, axis=-1) >>> v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0*v0, axis=2)), 2) >>> np.allclose(v1, v2) True >>> v1 = unit_vector(v0, axis=1) >>> v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0*v0, axis=1)), 1) >>> np.allclose(v1, v2) True >>> v1 = np.empty((5, 4, 3)) >>> unit_vector(v0, axis=1, out=v1) >>> np.allclose(v1, v2) True >>> list(unit_vector([])) [] >>> list(unit_vector([1])) [1.0] """ if out is None: data = np.array(data, dtype=np.float64, copy=True) if data.ndim == 1: data /= np.sqrt(np.dot(data, data)) return data else: if out is not data: out[:] = np.array(data, copy=False) data = out length = np.atleast_1d(np.sum(data * data, axis)) np.sqrt(length, length) if axis is not None: length = np.expand_dims(length, axis) data /= length if out is None: return data
import np as np from keras.preprocessing import image import numpy as np import matplotlib.pyplot as plt from keras.models import load_model from keras.applications.vgg16 import VGG16, decode_predictions img = image.load_img("VGG16-asphalt_crack_detector/image_cat.jpeg", target_size=(224, 224)) img = np.asarray(img) plt.imshow(img) img = np.expand_dims(img, axis=0) saved_model = VGG16() output = saved_model.predict(img) print('Predicted:', decode_predictions(output, top=1)[0]) if output[0][0] > output[0][1]: print("dog") else: print('cat')