def load_data(self, split=0.25): excel_file = pd.ExcelFile(self.clinical_data_path) df = pd.read_excel(excel_file, 'Patient Data', index_col=[0]) df = df.rename(columns=df.iloc[0]) training_image_path = os.path.join(self.dataset_folder, 'images') images_list, images, label = preprocess_images(training_image_path) features_df = pd.DataFrame().reindex(columns=df.columns) for image in images_list: # Extract the patient ID name as long as it does not contain a dot patient_id = image.split('.')[0] features_df = features_df.append(df.loc[patient_id]) features = process_structured_data(features_df) total_size = len(images) valid_size = int(split * total_size) test_size = int(split * total_size) xf_train, xf_valid, im_train, im_valid, y_train, y_valid = train_test_split( features, images, label, test_size=valid_size, random_state=42) xf_train, xf_test, im_train, im_test, y_train, y_test = train_test_split( features, images, label, test_size=test_size, random_state=42) return (xf_train, xf_valid, xf_test), (im_train, im_valid, im_test), (y_train, y_valid, y_test)
def start_video(self, model): camera = cv2.VideoCapture(0) while True: frame = camera.read()[1] if frame is None: continue image_array = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) image_array = cv2.resize(image_array, (300, 300)) image_array = preprocess_images(image_array) selected_boxes = predict(model, image_array, prior_boxes, frame.shape[0:2], self.num_classes, self.lower_probability_threshold, self.iou_threshold, self.background_index, self.box_scale_factors) if selected_boxes is not None: draw_video_boxes(selected_boxes, frame, self.arg_to_class, self.colors, self.font) cv2.imshow('webcam', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break camera.release() cv2.destroyAllWindows()
num_ground_truth_boxes = 0 class_decoder = get_arg_to_class(class_names) num_classes = len(class_names) data_manager = DataManager(dataset_name, selected_classes, data_prefix, image_prefix) ground_truth_data = data_manager.load_data() difficult_data_flags = data_manager.parser.difficult_objects image_names = sorted(list(ground_truth_data.keys())) # print('Number of images found:', len(image_names)) for image_name in image_names: ground_truth_sample = ground_truth_data[image_name] image_prefix = data_manager.image_prefix image_path = image_prefix + image_name image_array, original_image_size = load_image(image_path, input_shape) image_array = preprocess_images(image_array) predicted_data = predict(model, image_array, prior_boxes, original_image_size, 21, class_threshold, iou_threshold) ground_truth_sample = denormalize_box(ground_truth_sample, original_image_size) ground_truth_boxes_in_image = len(ground_truth_sample) difficult_objects = difficult_data_flags[image_name] difficult_objects = np.asarray(difficult_objects, dtype=bool) num_ground_truth_boxes += np.sum(np.logical_not(difficult_objects)) if predicted_data is None: # print('Zero predictions given for image:', image_name) continue #plt.imshow(original_image_array.astype('uint8')) #plt.show() #draw_image_boxes(predicted_data, original_image_array, class_decoder, normalized=False)
# image_names = sorted(list(ground_truth_data.keys())) image_names = list(ground_truth_data.keys()) for image_name in tqdm(image_names): ground_truth_sample = ground_truth_data[image_name] image_path = image_prefix + image_name rgb_image, image_size = load_image(image_path, target_size=(300, 300)) pytorch_image = preprocess_pytorch_input(rgb_image) pytorch_output = pytorch_ssd(pytorch_image) p1 = pytorch_output[0].data.numpy() # bounding boxes p2 = softmax(np.squeeze(pytorch_output[1].data.numpy())) # classes p3 = pytorch_output[2].data.numpy() # prior boxes # pytorch_detections = pytorch_output.data keras_image = preprocess_images(rgb_image) keras_image_input = np.expand_dims(keras_image, axis=0) keras_output = model.predict(keras_image_input) keras_detection = predict(model, keras_image, prior_boxes, image_size, num_classes, lower_probability_threshold, iou_threshold, background_index) keras_output = np.squeeze(keras_output) k1 = keras_output[:, :4] k2 = keras_output[:, 4:] k3 = prior_boxes diff = np.abs(p1 - k1) diff_mask = .0001 > diff all_good = np.all(diff_mask) print(all_good)