def upload_detect_image(path): out_filename = get_timestamp() + ".jpg" detect_img(img_path=path, out_filepath="camera_image/" + out_filename) dropbox = Uploader(DB_ACCESS_TOKEN) with open("camera_image/" + out_filename, "rb") as image_file: link = dropbox.upload(image_file.read(), "/webcam/" + out_filename, link=True) return link
def main(_): flags_dict = FLAGS.flag_values_dict() if FLAGS.config is not None: import yaml with open(FLAGS.config) as stream: config = yaml.safe_load(stream) if 'backbone' in config: config['backbone'] = BACKBONE[config['backbone']] if 'opt' in config: config['opt'] = OPT[config['opt']] if 'input_size' in config: if isinstance(config['input_size'], str): config['input_size'] = parse_tuple(config['input_size']) elif isinstance(config['input_size'], list): config['input_size'] = [ parse_tuple(size) for size in config['input_size'] ] else: raise ValueError( 'Please use array or tuple to define input_size') if 'learning_rate' in config: config['learning_rate'] = [ float(lr) for lr in config['learning_rate'] ] flags_dict.update(config) os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpus if FLAGS.mode == MODE.TRAIN: tf.logging.info('Train mode') train(flags_dict) elif FLAGS.mode == MODE.TRAIN_BACKBONE: tf.logging.info('Train backbone mode') train_backbone(flags_dict) elif FLAGS.mode == MODE.IMAGE: tf.logging.info('Image detection mode') detect_img(YOLO(flags_dict)) elif FLAGS.mode == MODE.VIDEO: tf.logging.info('Video detection mode') detect_video(YOLO(flags_dict), FLAGS.input, FLAGS.output) elif FLAGS.mode == MODE.MAP: tf.logging.info('Calculate test dataset map') calculate_map(YOLO(flags_dict), FLAGS.test_dataset) elif FLAGS.mode == MODE.SERVING: tf.logging.info('Export hdf5 model to serving model') export_serving_model(YOLO(flags_dict), FLAGS.export) elif FLAGS.mode == MODE.TFLITE: tf.logging.info('Export hdf5 model to tflite model') export_tflite_model(YOLO(flags_dict), FLAGS.export) elif FLAGS.mode == MODE.TFJS: tf.logging.info('Export hdf5 model to tensorflow.js model') export_tfjs_model(YOLO(flags_dict), FLAGS.export)
def _main(): # Get the arguments args = get_args() if args.image: # Image detection mode print('[i] ==> Image detection mode\n') detect_img(YOLO(args)) else: print('[i] ==> Video detection mode\n') # Call the detect_video method here detect_video(YOLO(args), args.video, args.output) print('Well done!!!')
def do_task(path_ImageToBeDetected, conn): print('before detected-- path_ImageToBeDetected: ', path_ImageToBeDetected) path_ImageToBeDetected = path_ImageToBeDetected.strip() print('after detected-- path_ImageToBeDetected: ', path_ImageToBeDetected) path_ImageSaved = y.detect_img(yol, path_ImageToBeDetected) print('开始返回') ret = conn.send(path_ImageSaved.encode()) print(ret)
def main(_): os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpus if FLAGS.mode == MODE.TRAIN: tf.logging.info('Train mode') train(FLAGS) elif FLAGS.mode == MODE.IMAGE: tf.logging.info('Image detection mode') detect_img(YOLO(**vars(FLAGS))) elif FLAGS.mode == MODE.VIDEO: tf.logging.info('Video detection mode') detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output) elif FLAGS.mode == MODE.MAP: tf.logging.info('Calculate test dataset map') calculate_map(YOLO(**vars(FLAGS)), FLAGS.test_dataset) elif FLAGS.mode == MODE.SERVING: tf.logging.info('Export hdf5 model to saved model') export_serving_model(YOLO(**vars(FLAGS)), FLAGS.export) elif FLAGS.mode == MODE.TFLITE: tf.logging.info('Export hdf5 model to tflite model') export_tflite_model(YOLO(**vars(FLAGS)), FLAGS.export) elif FLAGS.mode == MODE.TFJS: tf.logging.info('Export hdf5 model to tflite model') export_tfjs_model(YOLO(**vars(FLAGS)), FLAGS.export)
parser.add_argument("--test_dataset", nargs='?', type=str, default="", help="[Optional] Test dataset glob") FLAGS = parser.parse_args() if FLAGS.image: """ Image detection mode, disregard any remaining command line arguments """ print("Image detection mode") if "input" in FLAGS: print(" Ignoring remaining command line arguments: " + FLAGS.input + "," + FLAGS.output) detect_img(YOLO(**vars(FLAGS))) elif FLAGS.images: """ Image detection mode, disregard any remaining command line arguments """ print("Image detection mode") detect_imgs(YOLO(**vars(FLAGS)), FLAGS.input) elif FLAGS.map: """ Calculate test dataset map """ print("Calculate map") if "input" in FLAGS: print(" Ignoring remaining command line arguments: " + FLAGS.input + "," + FLAGS.output) YOLO(**vars(FLAGS)).calculate_map(FLAGS.test_dataset)
def main(_): flags_dict = FLAGS.flag_values_dict() if FLAGS.config is not None: import yaml with open(FLAGS.config) as stream: config = yaml.safe_load(stream) if 'backbone' in config: config['backbone'] = BACKBONE[config['backbone']] if 'opt' in config: config['opt'] = OPT[config['opt']] if 'input_size' in config: if isinstance(config['input_size'], str): config['input_size'] = parse_tuple(config['input_size']) elif isinstance(config['input_size'], list): config['input_size'] = [ parse_tuple(size) for size in config['input_size'] ] else: raise ValueError( 'Please use array or tuple to define input_size') if 'learning_rate' in config: config['learning_rate'] = [ float(lr) for lr in config['learning_rate'] ] flags_dict.update(config) opt = flags_dict.get('opt', None) if opt == OPT.XLA: tf.config.optimizer.set_jit(True) elif opt == OPT.DEBUG: tf.compat.v2.random.set_seed(111111) tf.debugging.set_log_device_placement(True) tf.config.experimental_run_functions_eagerly(True) logging.set_verbosity(logging.DEBUG) gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: gpu_indexs = [int(gpu.name.split(':')[-1]) for gpu in gpus] valid_gpu_indexs = list( filter(lambda gpu: gpu in flags_dict['gpus'], gpu_indexs)) valid_gpus = [gpus[index] for index in valid_gpu_indexs] tf.config.experimental.set_visible_devices(valid_gpus, 'GPU') flags_dict['gpus'] = get_gpu_name(valid_gpus) config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True session = InteractiveSession(config=config) if flags_dict['backbone'] is None: raise ValueError("Please select your model's backbone") if FLAGS.mode == MODE.TRAIN: log('Train mode') train(flags_dict) elif FLAGS.mode == MODE.TRAIN_BACKBONE: log('Train backbone mode') train_backbone(flags_dict) elif FLAGS.mode == MODE.IMAGE: if flags_dict['model'] is None: raise ValueError('Please enter your model path') log('Image detection mode') detect_img(YOLO(flags_dict)) elif FLAGS.mode == MODE.VIDEO: if flags_dict['model'] is None: raise ValueError('Please enter your model path') log('Video detection mode') detect_video(YOLO(flags_dict), FLAGS.input, FLAGS.output) elif FLAGS.mode == MODE.MAP: if flags_dict['model'] is None: raise ValueError('Please enter your model path') log('Calculate test dataset map') flags_dict['score'] = 0.0 calculate_map(YOLO(flags_dict), FLAGS.test_dataset) elif FLAGS.mode == MODE.SERVING: tf.disable_eager_execution() log('Export hdf5 model to serving model') export_serving_model(YOLO(flags_dict), FLAGS.export) elif FLAGS.mode == MODE.TFLITE: log('Export hdf5 model to tflite model') export_tflite_model(YOLO(flags_dict), FLAGS.export) elif FLAGS.mode == MODE.TFJS: log('Export hdf5 model to tensorflow.js model') export_tfjs_model(YOLO(flags_dict), FLAGS.export)
from yolo import YOLO, detect_video, detect_img if __name__ == '__main__': # 检测图片 detect_img(YOLO(), 'test/person.jpg') # 检测视频 # detect_video(YOLO(), 'test/test_video.mp4', 'test/test_video_out.mp4') # 检测camera # detect_video(YOLO(), video_path=0, output_path='test/test_video_out.mp4')
import selectors yol = y.YOLO() HOST = '127.0.0.1' PORT = 7000 with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind((HOST, PORT)) while True: s.listen() print('Listening Port: 7000 ....') conn, addr = s.accept() with conn: print('Connected by', addr) while True: path_ImageToBeDetected = conn.recv(1024) if not path_ImageToBeDetected: break path_ImageToBeDetected = path_ImageToBeDetected.decode() path_ImageToBeDetected = path_ImageToBeDetected.replace( "\r", '') path_ImageToBeDetected = path_ImageToBeDetected.replace( "\n", '') print('main path_ImageToBeDetected: ', path_ImageToBeDetected) y.detect_img(yol, path_ImageToBeDetected) name_ImageToBeDetected = path_ImageToBeDetected.split('/')[-1] path_ImageDected = r'J:/test_database/okPic/' + name_ImageToBeDetected print(path_ImageDected) conn.sendall(path_ImageDected.encode()) break
def main(_): flags_dict = FLAGS.flag_values_dict() if FLAGS.config is not None: import yaml with open(FLAGS.config) as stream: config = yaml.safe_load(stream) if 'backbone' in config: config['backbone'] = BACKBONE[config['backbone']] if 'opt' in config: config['opt'] = OPT[config['opt']] if 'input_size' in config: if isinstance(config['input_size'], str): config['input_size'] = parse_tuple(config['input_size']) elif isinstance(config['input_size'], list): config['input_size'] = [ parse_tuple(size) for size in config['input_size'] ] else: raise ValueError( 'Please use array or tuple to define input_size') if 'learning_rate' in config: config['learning_rate'] = [ float(lr) for lr in config['learning_rate'] ] flags_dict.update(config) gpus = tf.config.experimental.list_physical_devices('GPU') print(gpus) if gpus: gpu_indexs = [int(gpu.name.split(':')[-1]) for gpu in gpus] valid_gpu_indexs = list( filter(lambda gpu: gpu in flags_dict['gpus'], gpu_indexs)) valid_gpus = [gpus[index] for index in valid_gpu_indexs] tf.config.experimental.set_visible_devices(valid_gpus, 'GPU') flags_dict['gpus'] = get_gpu_name(valid_gpus) if flags_dict['backbone'] is None: raise ValueError("Please select your model's backbone") if FLAGS.mode == MODE.TRAIN: log('Train mode') train(flags_dict) elif FLAGS.mode == MODE.TRAIN_BACKBONE: log('Train backbone mode') train_backbone(flags_dict) elif FLAGS.mode == MODE.IMAGE: tf.disable_eager_execution() if flags_dict['model'] is None: raise ValueError('Please enter your model path') log('Image detection mode') detect_img(YOLO(flags_dict)) elif FLAGS.mode == MODE.VIDEO: if flags_dict['model'] is None: raise ValueError('Please enter your model path') log('Video detection mode') detect_video(YOLO(flags_dict), FLAGS.input, FLAGS.output) elif FLAGS.mode == MODE.MAP: if flags_dict['model'] is None: raise ValueError('Please enter your model path') log('Calculate test dataset map') calculate_map(YOLO(flags_dict), FLAGS.test_dataset) elif FLAGS.mode == MODE.SERVING: tf.disable_eager_execution() log('Export hdf5 model to serving model') export_serving_model(YOLO(flags_dict), FLAGS.export) elif FLAGS.mode == MODE.TFLITE: log('Export hdf5 model to tflite model') export_tflite_model(YOLO(flags_dict), FLAGS.export) elif FLAGS.mode == MODE.TFJS: log('Export hdf5 model to tensorflow.js model') export_tfjs_model(YOLO(flags_dict), FLAGS.export)