def serialize_metadata(metadata, output_path, file_name="rois"): io_utils.create_folder(output_path) with open(os.path.join(output_path, "{}.bin".format(file_name)), "wb") as f: f.write(metadata.SerializeToString()) with open(os.path.join(output_path, "{}.txt".format(file_name)), "w") as f: f.write(str(metadata))
def file_copy_ftp_to_local(ftp_ip, ftp_login, ftp_passwd, ftp_file_path, local_file_path): if os.path.dirname(local_file_path) != "": io_utils.create_folder(os.path.dirname(local_file_path)) ftp_folder = os.path.dirname(ftp_file_path) ftp_filename = os.path.basename(ftp_file_path) ftp = ftplib.FTP(ftp_ip, ftp_login, ftp_passwd) ftp.cwd(ftp_folder) with open(local_file_path, 'wb') as f: ftp.retrbinary('RETR ' + ftp_filename, f.write)
def start_client(host, port, input_path, output_path): io_utils.create_folder(output_path) channel = grpc.insecure_channel(host + ":" + str(port)) stub = inference_service_pb2_grpc.InferenceServiceStub(channel) metadata = stub.process(inference_service_pb2.DetectionRequest( images_path=input_path)) if metadata is not None: serialize_metadata(metadata, output_path, 'rois') else: logger.error('An error has occurred processing folder. See server log for more details.')
def create_callbacks(model, training_model, prediction_model, validation_generator, args): callbacks = [] # save the prediction model if args.snapshots: # ensure directory created first; otherwise h5py will error after epoch. io_utils.create_folder(args.snapshot_path) checkpoint = keras.callbacks.ModelCheckpoint(os.path.join( args.snapshot_path, '{backbone}_{dataset_type}_{{epoch:02d}}.h5'.format( backbone=args.backbone, dataset_type=args.dataset_type)), verbose=1) checkpoint = RedirectModel(checkpoint, prediction_model) callbacks.append(checkpoint) tensorboard_callback = None if args.tensorboard_dir: tensorboard_callback = keras.callbacks.TensorBoard( log_dir=args.tensorboard_dir, histogram_freq=0, batch_size=args.batch_size, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None) callbacks.append(tensorboard_callback) if args.evaluation and validation_generator: if args.dataset_type == 'traffic_signs': # use prediction model for evaluation ground_truth_proto_file = os.path.join(args.val_path, 'rois.bin') evaluation = TrafficSignsEval( validation_generator, ground_truth_proto_file, os.path.join(args.train_path, 'rois.bin')) evaluation = RedirectModel(evaluation, prediction_model) callbacks.append(evaluation) callbacks.append( keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.1, patience=2, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)) return callbacks
def predict_folder(model, input_folder, output_folder, resolutions, rois_labels, score_threshold_per_class, algorithm="retinanet", draw_predictions=False, max_number_of_images=None, log_level=0): logger = logging.getLogger(__name__) if log_level > 0: logger.info( 'Prediction on directory {} is starting.'.format(input_folder)) if output_folder is not None and not os.path.isdir(output_folder): io_utils.create_folder(output_folder) img_files_lst = io_utils.get_images_from_folder(input_folder) if max_number_of_images != None: img_files_lst = img_files_lst[:max_number_of_images] all_predictions = dict() for idx, file_name in enumerate(tqdm(img_files_lst)): if log_level > 0: logger.info('{}/{} {}'.format(idx, len(img_files_lst), file_name)) try: image = apollo_python_common.image.get_bgr(file_name) if image is not None: image, image_pred = preprocess_image(image) merged_predictions, predictions_per_resolutions = predict_one_image( image_pred, model, rois_labels, resolutions, score_threshold_per_class, log_level) all_predictions[os.path.basename( file_name)] = merged_predictions if draw_predictions: # Painting predictions paint_detections_to_image(image, merged_predictions, (0, 0, 255)) # # For debugging: # for idx, prediction_per_resolution in enumerate(predictions_per_resolutions): # paint_detections_to_image(image, prediction_per_resolution, RESOLUTIONS_COLORS[idx]) out_file_name = os.path.join(output_folder, os.path.basename(file_name)) # Saving image with predicted boxes cv2.imwrite(out_file_name, image) else: logger.warning('Image {} is corrupted'.format(file_name)) except Exception as err: logger.error(err) print(traceback.format_exc()) metadata = get_preds_in_common_format(all_predictions, algorithm, "") return metadata