コード例 #1
0
ファイル: main.py プロジェクト: ikki11190818/Disny-app
def upload_file():
    global graph
    with graph.as_default():
        if request.method == 'POST':
            if 'file' not in request.files:
                flash('ファイルがありません')
                return redirect("index.html")
            file = request.files['file']
            if file.filename == '':
                flash('ファイルがありません')
                return redirect("index.html")
            if file and allowed_file(file.filename):
                filename = secure_filename(file.filename)
                file.save(os.path.join(upload_folder, filename))
                filepath = os.path.join(upload_folder, filename)
                outputpath = os.path.join(output_folder, output_filename)

                detect_video(YOLO(), filepath, outputpath)

                return render_template("index.html",
                                       textmassage="識別結果",
                                       video_path=outputpath)

        return render_template("index.html",
                               textmassage="サンプル動画",
                               video_path='static\outputs\demo.mp4')
コード例 #2
0
def main(_):
    flags_dict = FLAGS.flag_values_dict()
    if FLAGS.config is not None:
        import yaml
        with open(FLAGS.config) as stream:
            config = yaml.safe_load(stream)
            if 'backbone' in config:
                config['backbone'] = BACKBONE[config['backbone']]
            if 'opt' in config:
                config['opt'] = OPT[config['opt']]
            if 'input_size' in config:
                if isinstance(config['input_size'], str):
                    config['input_size'] = parse_tuple(config['input_size'])
                elif isinstance(config['input_size'], list):
                    config['input_size'] = [
                        parse_tuple(size) for size in config['input_size']
                    ]
                else:
                    raise ValueError(
                        'Please use array or tuple to define input_size')
            if 'learning_rate' in config:
                config['learning_rate'] = [
                    float(lr) for lr in config['learning_rate']
                ]
            flags_dict.update(config)

    os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpus
    if FLAGS.mode == MODE.TRAIN:
        tf.logging.info('Train mode')
        train(flags_dict)
    elif FLAGS.mode == MODE.TRAIN_BACKBONE:
        tf.logging.info('Train backbone mode')
        train_backbone(flags_dict)
    elif FLAGS.mode == MODE.IMAGE:
        tf.logging.info('Image detection mode')
        detect_img(YOLO(flags_dict))
    elif FLAGS.mode == MODE.VIDEO:
        tf.logging.info('Video detection mode')
        detect_video(YOLO(flags_dict), FLAGS.input, FLAGS.output)
    elif FLAGS.mode == MODE.MAP:
        tf.logging.info('Calculate test dataset map')
        calculate_map(YOLO(flags_dict), FLAGS.test_dataset)
    elif FLAGS.mode == MODE.SERVING:
        tf.logging.info('Export hdf5 model to serving model')
        export_serving_model(YOLO(flags_dict), FLAGS.export)
    elif FLAGS.mode == MODE.TFLITE:
        tf.logging.info('Export hdf5 model to tflite model')
        export_tflite_model(YOLO(flags_dict), FLAGS.export)
    elif FLAGS.mode == MODE.TFJS:
        tf.logging.info('Export hdf5 model to tensorflow.js model')
        export_tfjs_model(YOLO(flags_dict), FLAGS.export)
コード例 #3
0
def train_test_video(yolo):
    global FLAGS
    if FLAGS.live:
        src_video = FLAGS.video
        path, video_name = os.path.split(src_video)
        base_name = video_name.split('.')[0]
        outfile = path + '/{}_processed{}'.format(base_name, '.mp4')
    if FLAGS.live:
        src_video = int(FLAGS.live)
        timestr = time.strftime("%Y%m%d-%H%M%S")
        outfile = '/Users/pdevine/live_recording{}{}'.format(timestr, '.mp4')
    print('saving video to file: '.format(outfile))
    detect_video(yolo, src_video, outfile)
    yolo.close_session()
コード例 #4
0
ファイル: yoloface_gpu.py プロジェクト: arnoldjair/yoloface
def _main():
    # Get the arguments
    args = get_args()

    if args.image:
        # Image detection mode
        print('[i] ==> Image detection mode\n')
        detect_img(YOLO(args))
    else:
        print('[i] ==> Video detection mode\n')
        # Call the detect_video method here
        detect_video(YOLO(args), args.video, args.output)

    print('Well done!!!')
コード例 #5
0
def instantiate():
    FLAGS = args

    if FLAGS.image:

        print("Image detection mode")

        if "input" in FLAGS:
            print(" Ignoring remaining command line arguments: " +
                  FLAGS.input + "," + FLAGS.output)

        detect_img(YOLO(**vars(FLAGS)))

    elif "input" in FLAGS:

        detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output)

    else:

        print(
            "Must specify at least video_input_path.  See usage with --help.")
コード例 #6
0
def main(_):
    os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpus
    if FLAGS.mode == MODE.TRAIN:
        tf.logging.info('Train mode')
        train(FLAGS)
    elif FLAGS.mode == MODE.IMAGE:
        tf.logging.info('Image detection mode')
        detect_img(YOLO(**vars(FLAGS)))
    elif FLAGS.mode == MODE.VIDEO:
        tf.logging.info('Video detection mode')
        detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output)
    elif FLAGS.mode == MODE.MAP:
        tf.logging.info('Calculate test dataset map')
        calculate_map(YOLO(**vars(FLAGS)), FLAGS.test_dataset)
    elif FLAGS.mode == MODE.SERVING:
        tf.logging.info('Export hdf5 model to saved model')
        export_serving_model(YOLO(**vars(FLAGS)), FLAGS.export)
    elif FLAGS.mode == MODE.TFLITE:
        tf.logging.info('Export hdf5 model to tflite model')
        export_tflite_model(YOLO(**vars(FLAGS)), FLAGS.export)
    elif FLAGS.mode == MODE.TFJS:
        tf.logging.info('Export hdf5 model to tflite model')
        export_tfjs_model(YOLO(**vars(FLAGS)), FLAGS.export)
コード例 #7
0
def create_model(train_weight_final, anchors_path, yolo_classname, vpath,
                 timecode):

    #create the model
    score = 0.25
    num_gpu = 1
    yolo = y.YOLO(
        **{
            "model_path": train_weight_final,
            "anchors_path": anchors_path,
            "classes_path": yolo_classname,
            "score": score,
            "gpu_num": num_gpu,
            "model_image_size": (416, 416),
        })

    # create the dataframe
    df = pd.DataFrame(columns=[
        "cut_nb",
        "frame_ID",
        "xmin",
        "ymin",
        "xmax",
        "ymax",
        "label",
        "confidence",
    ])

    onoff = False
    cut_nb = 0
    index = 0

    # brut force en fonction du json des times codes

    for i in timecode:
        if onoff:
            end = i['time']
            end = end['secondes']
            onoff = False
            cut_nb += 1
            videotoclips(vpath, start, end, cut_nb)
            # labels to draw on images
            class_file = open(yolo_classname, "r")

            input_labels = [
                line.rstrip("\n") for line in class_file.readlines()
            ]
            print("Found {} input labels: {} ...".format(
                len(input_labels), input_labels))

            df, index, width, height = y.detect_video(
                yolo,
                "res/img/video_" + str(cut_nb) + ".mp4",
                df,
                cut_nb,
                index,
                #output_path="res/img/video_detect" + str(cut_nb) + ".mp4",
                Video_on=False)

            os.remove("res/img/video_" + str(cut_nb) + ".mp4")

        if i['category'] == 'CAMERA':
            if i['description'] == 'BAD_camera vert':
                start = i['time']
                start = start['secondes']
                onoff = True

    yolo.close_session()

    df = transform_df(df, width, height)

    return df
コード例 #8
0
ファイル: yolo_video.py プロジェクト: Sanster/keras-yolo3
from yolo import YOLO
from yolo import detect_video

if __name__ == '__main__':
    video_path = 'path2your-video'
    detect_video(YOLO(), 0)
コード例 #9
0
from yolo import YOLO
from yolo import detect_video

if __name__ == '__main__':
    video_path = '../test_video.mp4'
    detect_video(YOLO(), video_path)
コード例 #10
0
import sys

if len(sys.argv) < 2:
    print("Usage: $ python {0} [video_path] [output_path(optional)]",
          sys.argv[0])
    exit()

from yolo import YOLO
from yolo import detect_video

if __name__ == '__main__':
    video_path = sys.argv[1]
    if len(sys.argv) > 2:
        output_path = sys.argv[2]
        detect_video(YOLO(), video_path, output_path)
    else:
        detect_video(YOLO(), video_path)
コード例 #11
0
import sys
import os
from timeit import default_timer as timer
import argparse
from yolo import YOLO, detect_video
from PIL import Image
import tensorflow as tf
import cv2

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)

FLAGS = {'image': False, 'input': 'V2.MP4', 'output': ''}

if __name__ == '__main__':
    image = False
    input_video = 'V2.MP4'
    output = ''

    detect_video(YOLO(FLAGS), FLAGS.input, FLAGS.output)
コード例 #12
0
                        nargs='?',
                        type=str,
                        required=False,
                        default='./path2your_video',
                        help="Video input path")

    parser.add_argument("--output",
                        nargs='?',
                        type=str,
                        default="",
                        help="[Optional] Video output path")

    FLAGS = parser.parse_args()

    if FLAGS.image:
        """
        Image detection mode, disregard any remaining command line arguments
        """
        print("Image detection mode")
        if "input" in FLAGS:
            print(" Ignoring remaining command line arguments: " +
                  FLAGS.input + "," + FLAGS.output)
        detect_img(YOLO(**vars(FLAGS)))
    elif "input" in FLAGS:
        yolo = YOLO(**vars(FLAGS))

        detect_video(yolo, FLAGS.input, FLAGS.output)
    else:
        print(
            "Must specify at least video_input_path.  See usage with --help.")
コード例 #13
0
                        default='./path2your_video',
                        help="Video input path")

    parser.add_argument("--output",
                        nargs='?',
                        type=str,
                        default="",
                        help="[Optional] Video output path")

    parser.add_argument("--frame_path",
                        nargs='?',
                        type=str,
                        default="",
                        help="Frame save path")
    FLAGS = parser.parse_args()

    if FLAGS.image:
        """
        Image detection mode, disregard any remaining command line arguments
        """
        print("Image detection mode")
        if "input" in FLAGS:
            print(" Ignoring remaining command line arguments: " +
                  FLAGS.input + "," + FLAGS.output)
        detect_img(YOLO(**vars(FLAGS)))
    elif "input" in FLAGS:
        detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output,
                     FLAGS.frame_path)
    else:
        print(
            "Must specify at least video_input_path.  See usage with --help.")
コード例 #14
0
def instantiate():
    # class YOLO defines the default value, so suppress any default here
    parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
    '''
    Command line options
    '''
    parser.add_argument('--model',
                        type=str,
                        help='path to model weight file, default ' +
                        YOLO.get_defaults("model_path"))

    parser.add_argument('--anchors',
                        type=str,
                        help='path to anchor definitions, default ' +
                        YOLO.get_defaults("anchors_path"))

    parser.add_argument('--classes',
                        type=str,
                        help='path to class definitions, default ' +
                        YOLO.get_defaults("classes_path"))

    parser.add_argument('--gpu_num',
                        type=int,
                        help='Number of GPU to use, default ' +
                        str(YOLO.get_defaults("gpu_num")))

    parser.add_argument(
        '--image',
        default=False,
        action="store_true",
        help='Image detection mode, will ignore all positional arguments')
    '''
    Command line positional arguments -- for video detection mode
    '''
    parser.add_argument("--input",
                        nargs='?',
                        type=str,
                        required=False,
                        default='./path2your_video',
                        help="Video input path")

    parser.add_argument("--output",
                        nargs='?',
                        type=str,
                        default="",
                        help="[Optional] Video output path")

    FLAGS = parser.parse_args()

    if FLAGS.image:
        """
        Image detection mode, disregard any remaining command line arguments
        """
        print("Image detection mode")
        if "input" in FLAGS:
            print(" Ignoring remaining command line arguments: " +
                  FLAGS.input + "," + FLAGS.output)
        detect_img(YOLO(**vars(FLAGS)))
    elif "input" in FLAGS:
        detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output)
    else:
        print(
            "Must specify at least video_input_path.  See usage with --help.")
コード例 #15
0
ファイル: Detector.py プロジェクト: mmaaz60/Yolov3
                        img_path.rstrip('\n')
                    ] + single_prediction + [x_size, y_size]],
                                 columns=[
                                     'image', 'image_path', 'xmin', 'ymin',
                                     'xmax', 'ymax', 'label', 'confidence',
                                     'x_size', 'y_size'
                                 ]))
        end = timer()
        print('Processed {} images in {:.1f}sec - {:.1f}FPS'.format(
            len(input_image_paths), end - start,
            len(input_image_paths) / (end - start)))
        out_df.to_csv(args.box, index=False)

    # This is for videos
    if input_video_paths:
        print('Found {} input videos: {} ...'.format(
            len(input_video_paths),
            [os.path.basename(f) for f in input_video_paths[:5]]))
        start = timer()
        for i, vid_path in enumerate(input_video_paths):
            output_path = os.path.join(
                args.output,
                os.path.basename(vid_path).replace('.', args.postfix + '.'))
            detect_video(yolo, vid_path, output_path=output_path)

        end = timer()
        print('Processed {} videos in {:.1f}sec'.format(
            len(input_video_paths), end - start))
    # Close the current yolo session
    yolo.close_session()
コード例 #16
0
                        help="[Optional] Video output path")

    FLAGS = parser.parse_args()

    if FLAGS.image:
        """
        Image detection mode, disregard any remaining command line arguments
        """
        print("Image detection mode")
        if "input" in FLAGS:
            print(" Ignoring remaining command line arguments: " +
                  FLAGS.input + "," + FLAGS.output)
        detect_img(YOLO(**vars(FLAGS)))

    elif "input" in FLAGS:
        status = detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output,
                              FLAGS.file_path)
        print(status)
        exists = os.path.isfile(
            processed_directory +
            str(os.path.splitext(os.path.basename(FLAGS.output))[0]) + '.ts')
        if exists:
            print("File already exists. Deleting.")
            os.remove(
                processed_directory +
                str(os.path.splitext(os.path.basename(FLAGS.output))[0]) +
                '.ts')
        os.system('sudo ffmpeg -i ' + str(FLAGS.output) + ' -vcodec libx264 ' +
                  processed_directory +
                  str(os.path.splitext(os.path.basename(FLAGS.output))[0]) +
                  '.ts')
        print("File Converted")
コード例 #17
0
               FLAGS.video + "," + FLAGS.output)
     img_path = os.path.expanduser('~/') + FLAGS.image
     detect_img(YOLO(**vars(FLAGS)), img_path)
 elif "video" in FLAGS:
     video_path = os.path.expanduser('~/') + FLAGS.video
     if not FLAGS.tracking:
         print("Not tracking", video_path)
         if os.path.isfile("detection.json"):
             if messagebox.askokcancel(
                     'Attention',
                     'Please remove the previous file and Re-Run deteciton'
             ):
                 os.rename("detection.json",
                           time.strftime("%H_%M_%S") + "detection.json")
                 print("performing yolo detection on video")
                 detect_video(YOLO(**vars(FLAGS)), video_path, output_path)
         else:
             print("performing yolo detection on video")
             detect_video(YOLO(**vars(FLAGS)), video_path, output_path)
     else:  # tracking
         try:
             os.stat("detection.json")
         except:
             "Please perform video detection first"
         det_path = os.path.join(output_path, 'res.txt')
         print("Detection result will be written to: ", det_path)
         detfile = open(det_path, 'w')  # Initialize the file
         detfile.close()
         tracking(video_path, det_path, FLAGS.sigma_l, FLAGS.sigma_h,
                  FLAGS.sigma_iou, FLAGS.t_min)
 else:
コード例 #18
0
ファイル: yolo_video.py プロジェクト: ant1pink/keras-yolo3
        '--image', default=False, action="store_true",
        help='Image detection mode, will ignore all positional arguments'
    )
    '''
    Command line positional arguments -- for video detection mode
    '''
    parser.add_argument(
        "--input", nargs='?', type=str,required=False,default='./path2your_video',
        help = "Video input path"
    )

    parser.add_argument(
        "--output", nargs='?', type=str, default="",
        help = "[Optional] Video output path"
    )

    FLAGS = parser.parse_args()

    if FLAGS.image:
        """
        Image detection mode, disregard any remaining command line arguments
        """
        print("Image detection mode")
        if "input" in FLAGS:
            print(" Ignoring remaining command line arguments: " + FLAGS.input + "," + FLAGS.output)
        detect_img(YOLO(**vars(FLAGS)))
    elif "input" in FLAGS:
        detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output)
    else:
        print("Must specify at least video_input_path.  See usage with --help.")
コード例 #19
0
ファイル: yolo_video.py プロジェクト: LQQQQQQQQQQ/AI
    def open_video(self):
        
        video_path=filedialog.askopenfilename(title='打开单个文件',filetypes=[("视频文件","*.mp4")],initialdir='F:/')

        detect_video(self.yolo,video_path)            
コード例 #20
0
ファイル: video.py プロジェクト: Jichen66/Master_Thesis
import sys, os
import argparse
from yolo import YOLO, detect_video
from PIL import Image
os.environ["CUDA_VISIBLE_DEVICES"] = ""

####### notice which weight file did yolo.py load !!!!!!!!#########

if __name__ == '__main__':
    detect_video(YOLO(),
                 "/home/smart/Desktop/dashcam/2014_1122_122434_035A.mp4")

    #2014_1122_122434_035A
    #detect_video(YOLO(), "/home/smart/2.mp4")
    #detect_video(YOLO(), "/home/smart/pp.avi")
    #detect_video(YOLO(),"/home/smart/Desktop/dashcam/2014_1122_121545_034AA.mp4")
コード例 #21
0
    '''
    parser.add_argument("--input",
                        nargs='?',
                        type=str,
                        required=False,
                        default='./path2your_video',
                        help="Video input path")

    parser.add_argument("--output",
                        nargs='?',
                        type=str,
                        default="",
                        help="[Optional] Video output path")

    options = parser.parse_args()

    if options.image:
        """
        Image detection mode, disregard any remaining command line arguments
        """
        print("Image detection mode")
        if "input" in options:
            print(" Ignoring remaining command line arguments: " +
                  options.input + "," + options.output)
        detect_img(YOLO(**vars(options)))
    elif "input" in options:
        detect_video(YOLO(**vars(options)), options.input, options.output)
    else:
        print(
            "Must specify at least video_input_path.  See usage with --help.")
コード例 #22
0
ファイル: yolo_video.py プロジェクト: youshyee/animal_project
    parser.add_argument("--output",
                        nargs='?',
                        type=str,
                        default="",
                        help="[Optional] Video output path")

    FLAGS = parser.parse_args()

    if FLAGS.image:
        """
        Image detection mode, disregard any remaining command line arguments
        """
        print("Image detection mode")
        if "input" in FLAGS:
            print(" Ignoring remaining command line arguments: " +
                  FLAGS.input + "," + FLAGS.output)
        detect_img(YOLO(**vars(FLAGS)))
    elif "input" in FLAGS:
        check_dir(FLAGS.output)
        print(FLAGS.input)
        for filename in os.listdir(FLAGS.input):
            videopath = os.path.join(FLAGS.input, filename)
            outputpath = os.path.join(FLAGS.output, 'processed_' + filename)
            bbox_list = detect_video(YOLO(**vars(FLAGS)), videopath,
                                     outputpath)
            findbestway(videopath, outputpath, bbox_list)
    else:
        print(
            "Must specify at least video_input_path.  See usage with --help.")
コード例 #23
0
    parser.add_argument("--output",
                        nargs='?',
                        type=str,
                        default="",
                        help="[Optional] Video output path")

    parser.add_argument("--conf",
                        type=float,
                        default=0.3,
                        dest="score",
                        help="[Optional] Confidence score")

    FLAGS = parser.parse_args()

    if FLAGS.image:
        """
        Image detection mode, disregard any remaining command line arguments
        """
        print("Image detection mode")
        if "input" in FLAGS:
            print(" Ignoring remaining command line arguments: " +
                  FLAGS.input + "," + FLAGS.output)
        detect_img(YOLO(**vars(FLAGS)))
    elif "input" in FLAGS:
        detect_video(YOLO(**vars(FLAGS)),
                     video_path=FLAGS.input,
                     output_path=FLAGS.output)
    else:
        print(
            "Must specify at least video_input_path.  See usage with --help.")
コード例 #24
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2020/2/13 21:46
# @Author  : codingchaozhang
from nets.yolo3 import yolo_body
from keras.layers import Input
from yolo import YOLO, detect_video
from PIL import Image

yolo = YOLO()

while True:

    try:
        detect_video(yolo)
    except:
        print('Open Error! Try again!')
        continue

yolo.close_session()
コード例 #25
0
ファイル: yolo_video.py プロジェクト: SpikeKing/keras-yolo3
import sys

if len(sys.argv) < 2:
    print("Usage: $ python {0} [video_path] [output_path(optional)]", sys.argv[0])
    exit()

from yolo import YOLO
from yolo import detect_video

if __name__ == '__main__':
    video_path = sys.argv[1]
    if len(sys.argv) > 2:
        output_path = sys.argv[2]
        detect_video(YOLO(), video_path, output_path)
    else:
        detect_video(YOLO(), video_path)
コード例 #26
0
import sys
sys.path.append('keras-yolo3')

from yolo import YOLO, detect_video

model = YOLO(model_path='./models/v4-12k-adam1e3-train10/ep039-loss16.249-val_loss15.892.h5',
             anchors_path='./keras-yolo3/model_data/yolo_anchors.txt',
             classes_path='./classes-yolo-format.txt',
             score=0.01)

# Pretrained YOLO model
# yolo_model = YOLO(model_path='../../../data/yolov3/yolov3-320.h5',
#                   anchors_path='./keras-yolo3/model_data/yolo_anchors.txt',
#                   classes_path='./keras-yolo3/model_data/coco_classes.txt')

detect_video(model, 0)
コード例 #27
0
    parser.add_argument("--file_name",
                        nargs='?',
                        type=str,
                        default="",
                        help="Path of result file")

    parser.add_argument('--round_num',
                        type=int,
                        help='Number of detection round ')

    FLAGS = parser.parse_args()

    if FLAGS.image:
        """
        Image detection mode, disregard any remaining command line arguments
        """
        print("Image detection mode")
        if "input" in FLAGS:
            print(" Ignoring remaining command line arguments: " +
                  FLAGS.input + "," + FLAGS.output)
        detect_img(YOLO(**vars(FLAGS)), FLAGS.round_num, FLAGS.file_name)
    elif "input" in FLAGS:
        print(FLAGS.round_num)
        detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output,
                     FLAGS.file_name, FLAGS.round_num)
        print('END')
    else:
        print(
            "Must specify at least video_input_path.  See usage with --help.")
コード例 #28
0
ファイル: main.py プロジェクト: Kayuse88/mobilenetv2-yolov3
def main(_):
    flags_dict = FLAGS.flag_values_dict()
    if FLAGS.config is not None:
        import yaml
        with open(FLAGS.config) as stream:
            config = yaml.safe_load(stream)
            if 'backbone' in config:
                config['backbone'] = BACKBONE[config['backbone']]
            if 'opt' in config:
                config['opt'] = OPT[config['opt']]
            if 'input_size' in config:
                if isinstance(config['input_size'], str):
                    config['input_size'] = parse_tuple(config['input_size'])
                elif isinstance(config['input_size'], list):
                    config['input_size'] = [
                        parse_tuple(size) for size in config['input_size']
                    ]
                else:
                    raise ValueError(
                        'Please use array or tuple to define input_size')
            if 'learning_rate' in config:
                config['learning_rate'] = [
                    float(lr) for lr in config['learning_rate']
                ]
            flags_dict.update(config)

    opt = flags_dict.get('opt', None)
    if opt == OPT.XLA:
        tf.config.optimizer.set_jit(True)
    elif opt == OPT.DEBUG:
        tf.compat.v2.random.set_seed(111111)
        tf.debugging.set_log_device_placement(True)
        tf.config.experimental_run_functions_eagerly(True)
        logging.set_verbosity(logging.DEBUG)

    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        gpu_indexs = [int(gpu.name.split(':')[-1]) for gpu in gpus]
        valid_gpu_indexs = list(
            filter(lambda gpu: gpu in flags_dict['gpus'], gpu_indexs))
        valid_gpus = [gpus[index] for index in valid_gpu_indexs]
        tf.config.experimental.set_visible_devices(valid_gpus, 'GPU')
        flags_dict['gpus'] = get_gpu_name(valid_gpus)
        config = tf.compat.v1.ConfigProto()
        config.gpu_options.allow_growth = True
        session = InteractiveSession(config=config)
    if flags_dict['backbone'] is None:
        raise ValueError("Please select your model's backbone")
    if FLAGS.mode == MODE.TRAIN:
        log('Train mode')
        train(flags_dict)
    elif FLAGS.mode == MODE.TRAIN_BACKBONE:
        log('Train backbone mode')
        train_backbone(flags_dict)
    elif FLAGS.mode == MODE.IMAGE:
        if flags_dict['model'] is None:
            raise ValueError('Please enter your model path')
        log('Image detection mode')
        detect_img(YOLO(flags_dict))
    elif FLAGS.mode == MODE.VIDEO:
        if flags_dict['model'] is None:
            raise ValueError('Please enter your model path')
        log('Video detection mode')
        detect_video(YOLO(flags_dict), FLAGS.input, FLAGS.output)
    elif FLAGS.mode == MODE.MAP:
        if flags_dict['model'] is None:
            raise ValueError('Please enter your model path')
        log('Calculate test dataset map')
        flags_dict['score'] = 0.0
        calculate_map(YOLO(flags_dict), FLAGS.test_dataset)
    elif FLAGS.mode == MODE.SERVING:
        tf.disable_eager_execution()
        log('Export hdf5 model to serving model')
        export_serving_model(YOLO(flags_dict), FLAGS.export)
    elif FLAGS.mode == MODE.TFLITE:
        log('Export hdf5 model to tflite model')
        export_tflite_model(YOLO(flags_dict), FLAGS.export)
    elif FLAGS.mode == MODE.TFJS:
        log('Export hdf5 model to tensorflow.js model')
        export_tfjs_model(YOLO(flags_dict), FLAGS.export)
コード例 #29
0
                        nargs='?',
                        type=str,
                        required=False,
                        default='./path2your_video',
                        help="Video input path")
    parser.add_argument("--camera",
                        nargs='?',
                        type=str,
                        required=False,
                        help="init camera capture")

    parser.add_argument("--output",
                        nargs='?',
                        type=str,
                        default="resultvideos.mp4",
                        help="[Optional] Video output path")

    FLAGS = parser.parse_args()

    if FLAGS.image:
        """
        Image detection mode, disregard any remaining command line arguments
        """
        print("Image detection mode")
        if "input" in FLAGS:
            print(" Ignoring remaining command line arguments: " +
                  FLAGS.input + "," + FLAGS.output)
        detect_img(YOLO(**vars(FLAGS)))
    elif "input" in FLAGS:
        detect_video(YOLO(**vars(FLAGS)), FLAGS.input, 'resultvideos.mp4')
コード例 #30
0
ファイル: yolo_video.py プロジェクト: RovisLab/GOL
    '''
    Command line positional arguments -- for video detection mode
    '''
    parser.add_argument(
        "--input", nargs='?', type=str,required=False,default='./path2your_video',
        help = "Video input path"
    )

    parser.add_argument(
        "--output", nargs='?', type=str, default="",
        help = "[Optional] Video output path"
    )

    FLAGS = parser.parse_args()

    detect_video(YOLO(**vars(FLAGS)), "kitti.avi", FLAGS.output)
    quit()

    if FLAGS.image:
        """
        Image detection mode, disregard any remaining command line arguments
        """
        print("Image detection mode")
        if "input" in FLAGS:
            print(" Ignoring remaining command line arguments: " + FLAGS.input + "," + FLAGS.output)
        detect_img(YOLO(**vars(FLAGS)), img_filename=input("Image filename: "))
    elif "input" in FLAGS:
        detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output)
    else:
        print("Must specify at least video_input_path.  See usage with --help.")
コード例 #31
0
    '''
    parser.add_argument("--input",
                        nargs='?',
                        type=str,
                        required=False,
                        default='./1.avi',
                        help="Video input path")

    parser.add_argument("--output",
                        nargs='?',
                        type=str,
                        default="",
                        help="[Optional] Video output path")

    FLAGS = parser.parse_args()

    if FLAGS.image:
        """
        Image detection mode, disregard any remaining command line arguments
        """
        print("Image detection mode")
        if "input" in FLAGS:
            print(" Ignoring remaining command line arguments: " +
                  FLAGS.input + "," + FLAGS.output)
        detect_img(YOLO(**vars(FLAGS)))
    elif "input" in FLAGS:
        detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output)
    else:
        print(
            "Must specify at least video_input_path.  See usage with --help.")
コード例 #32
0
#python yolo_video.py --input car.mp4 --output car.avi
#python yolo_video.py --input test_data/akiha.mp4
#python yolo_video.py --image
import sys
import argparse
from yolo import YOLO, detect_video
from PIL import Image

#detect_video(YOLO(), 0 ,"saved_cam.avi")

detect_video(YOLO(), "http://192.168.43.232:8080/video", "saved_cam.mp4")