示例#1
0
def main():
    args = get_args()
    config = json.load(open(args.config, "r"))
    weights = args.weights
    try:
        input_size = config['model']['input_size'][:]
    except:
        input_size = [
            config['model']['input_size'], config['model']['input_size']
        ]
    yolo = create_yolo(config['model']['architecture'],
                       config['model']['labels'], input_size,
                       config['model']['anchors'])
    yolo.load_weights(weights)
    threshold = args.threshold

    cap = cv2.VideoCapture(0)
    video_idx = 0
    writing_video = False
    writer = None
    while True:
        ret, frame = cap.read()
        frame, vizframe = prep_image(frame, input_size[0])

        print(frame.shape)
        height, width = frame.shape[:2]
        prediction_time, boxes, probs = yolo.predict(frame[np.newaxis, ...],
                                                     height, width,
                                                     float(threshold))
        labels = np.argmax(probs, axis=1) if len(probs) > 0 else []
        n_copy = draw_boxes(vizframe, boxes, probs, config['model']['labels'])
        n_c_v = to_bgr(n_copy)
        if writing_video:
            writer.write(n_c_v)
        cv2.imshow("annotated", n_c_v)
        ky = cv2.waitKey(1) & 0xFF
        if ky == ord('s'):
            if not writing_video:
                fcc = cv2.VideoWriter_fourcc(*'VP90')  # VP90 MJPG
                writer = cv2.VideoWriter(f'output-{video_idx}.avi', fcc, 25.0,
                                         (224, 224))
                print(f" START writing output-{video_idx}.avi")
                writing_video = True
            else:
                print(f" STOP writing output-{video_idx}.avi")
                writer.release()
                writing_video = False
                video_idx += 1
        if ky == ord('q'):
            if writing_video:
                writer.release()
            break

    cap.release()
示例#2
0
def train_from_config(config, project_folder):
    #added for compatibility with < 0.5.7 versions
    try:
        input_size = config['model']['input_size'][:]
    except:
        input_size = [
            config['model']['input_size'], config['model']['input_size']
        ]

    # Create the converter
    converter = Converter(config['converter']['type'],
                          config['model']['architecture'], input_size,
                          config['train']['valid_image_folder'])

    #  Detector
    if config['model']['type'] == 'Detector':
        if config['train']['is_only_detect']:
            labels = ["object"]
        else:
            if config['model']['labels']:
                labels = config['model']['labels']
            else:
                labels = get_object_labels(
                    config['train']['train_annot_folder'])
        print("labels: ", labels)
        print("training on: ", config['train']['train_image_folder'],
              "and validating on: ", config['train']['train_annot_folder'])
        # 1. Construct the model
        yolo = create_yolo(
            config['model']['architecture'], labels, input_size,
            config['model']['anchors'], config['model']['coord_scale'],
            config['model']['class_scale'], config['model']['object_scale'],
            config['model']['no_object_scale'], config['weights']['backend'])

        # 2. Load the pretrained weights (if any)
        yolo.load_weights(config['weights']['full'], by_name=True)

        # 3. actual training
        model_layers, model_path = yolo.train(
            config['train']['train_image_folder'],
            config['train']['train_annot_folder'],
            config['train']['actual_epoch'], project_folder,
            config["train"]["batch_size"], config["train"]["augumentation"],
            config['train']['learning_rate'], config['train']['train_times'],
            config['train']['valid_times'],
            config['train']['valid_image_folder'],
            config['train']['valid_annot_folder'],
            config['train']['first_trainable_layer'],
            config['train']['valid_metric'])
    # 4 Convert the model
    converter.convert_model(model_path, model_layers,
                            config['train']['valid_image_folder'])
    return model_path
示例#3
0
def setup_inference(config,
                    weights,
                    threshold=0.3,
                    path=None,
                    dataset="testing"):
    #added for compatibility with < 0.5.7 versions
    try:
        input_size = config['model']['input_size'][:]
    except:
        input_size = [
            config['model']['input_size'], config['model']['input_size']
        ]
    """make directory to save inference results """
    dirname = os.path.join(os.path.dirname(weights), 'Inference_results')
    if os.path.isdir(dirname):
        print(
            "Folder {} is already exists. Image files in directory might be overwritten"
            .format(dirname))
    else:
        print("Folder {} is created.".format(dirname))
        os.makedirs(dirname)

    if config['model']['type'] == 'Detector':
        # 2. create yolo instance & predict
        yolo = create_yolo(config['model']['architecture'],
                           config['model']['labels'], input_size,
                           config['model']['anchors'])
        yolo.load_weights(weights)

        # 3. read image
        # 3. read image
        if dataset == 'testing':
            print("the dataset used for testing is:",
                  config['test']['test_image_folder'],
                  " the annotations are: ",
                  config['test']['test_label_folder'])
            # added testing directly in configuration
            annotations = parse_annotation(
                config['test']['test_label_folder'],
                config['test']['test_image_folder'],
                config['model']['labels'],
                is_only_detect=config['train']['is_only_detect'])
        else:
            print("the dataset used for testing is:",
                  config['train']['valid_image_folder'],
                  " the annotations are: ",
                  config['train']['valid_annot_folder'])
            annotations = parse_annotation(
                config['train']['valid_annot_folder'],
                config['train']['valid_image_folder'],
                config['model']['labels'],
                is_only_detect=config['train']['is_only_detect'])

        n_true_positives = 0
        n_truth = 0
        n_pred = 0
        inference_time = []
        for i in range(len(annotations)):
            img_path = annotations.fname(i)
            img_fname = os.path.basename(img_path)
            true_boxes = annotations.boxes(i)
            true_labels = annotations.code_labels(i)

            orig_image, input_image = prepare_image(img_path, yolo)
            height, width = orig_image.shape[:2]
            prediction_time, boxes, probs = yolo.predict(
                input_image, height, width, float(threshold))
            inference_time.append(prediction_time)
            labels = np.argmax(probs, axis=1) if len(probs) > 0 else []
            # 4. save detection result
            orig_image = draw_scaled_boxes(orig_image, boxes, probs,
                                           config['model']['labels'])
            output_path = os.path.join(dirname, os.path.split(img_fname)[-1])
            cv2.imwrite(output_path, orig_image)
            print("{}-boxes are detected. {} saved.".format(
                len(boxes), output_path))
            show_image(output_path)
            n_true_positives += count_true_positives(boxes, true_boxes, labels,
                                                     true_labels)
            n_truth += len(true_boxes)
            n_pred += len(boxes)
        print(calc_score(n_true_positives, n_truth, n_pred))
        if len(inference_time) > 1:
            print("Average prediction time:{} ms".format(
                sum(inference_time[1:]) / len(inference_time[1:])))
示例#4
0
def train_from_config(config, project_folder):
    #added for compatibility with < 0.5.7 versions
    try:
        input_size = config['model']['input_size'][:]
    except:
        input_size = [
            config['model']['input_size'], config['model']['input_size']
        ]

    # Create the converter
    converter = Converter(config['converter']['type'],
                          config['model']['architecture'], input_size,
                          config['train']['valid_image_folder'])

    #  Segmentation network
    if config['model']['type'] == 'SegNet':
        print('Segmentation')
        # 1. Construct the model
        segnet = create_segnet(config['model']['architecture'], input_size,
                               config['model']['n_classes'],
                               config['weights']['backend'])
        # 2. Load the pretrained weights (if any)
        segnet.load_weights(config['weights']['full'], by_name=True)
        # 3. actual training
        model_layers, model_path = segnet.train(
            config['train']['train_image_folder'],
            config['train']['train_annot_folder'],
            config['train']['actual_epoch'], project_folder,
            config["train"]["batch_size"], config["train"]["augumentation"],
            config['train']['learning_rate'], config['train']['train_times'],
            config['train']['valid_times'],
            config['train']['valid_image_folder'],
            config['train']['valid_annot_folder'],
            config['train']['first_trainable_layer'],
            config['train']['ignore_zero_class'],
            config['train']['valid_metric'])

    #  Classifier
    if config['model']['type'] == 'Classifier':
        print('Classifier')
        if config['model']['labels']:
            labels = config['model']['labels']
        else:
            labels = get_labels(config['train']['train_image_folder'])
            # 1. Construct the model
        classifier = create_classifier(config['model']['architecture'], labels,
                                       input_size,
                                       config['model']['fully-connected'],
                                       config['model']['dropout'],
                                       config['weights']['backend'],
                                       config['weights']['save_bottleneck'])
        # 2. Load the pretrained weights (if any)
        classifier.load_weights(config['weights']['full'], by_name=True)

        # 3. actual training
        model_layers, model_path = classifier.train(
            config['train']['train_image_folder'],
            config['train']['actual_epoch'], project_folder,
            config["train"]["batch_size"], config["train"]["augumentation"],
            config['train']['learning_rate'], config['train']['train_times'],
            config['train']['valid_times'],
            config['train']['valid_image_folder'],
            config['train']['first_trainable_layer'],
            config['train']['valid_metric'])

    #  Detector
    if config['model']['type'] == 'Detector':
        if config['train']['is_only_detect']:
            labels = ["object"]
        else:
            if config['model']['labels']:
                labels = config['model']['labels']
            else:
                labels = get_object_labels(
                    config['train']['train_annot_folder'])
        print(labels)

        # 1. Construct the model
        yolo = create_yolo(
            config['model']['architecture'], labels, input_size,
            config['model']['anchors'], config['model']['coord_scale'],
            config['model']['class_scale'], config['model']['object_scale'],
            config['model']['no_object_scale'], config['weights']['backend'])

        # 2. Load the pretrained weights (if any)
        yolo.load_weights(config['weights']['full'], by_name=True)

        # 3. actual training
        model_layers, model_path = yolo.train(
            config['train']['train_image_folder'],
            config['train']['train_annot_folder'],
            config['train']['actual_epoch'], project_folder,
            config["train"]["batch_size"], config["train"]["augumentation"],
            config['train']['learning_rate'], config['train']['train_times'],
            config['train']['valid_times'],
            config['train']['valid_image_folder'],
            config['train']['valid_annot_folder'],
            config['train']['first_trainable_layer'],
            config['train']['valid_metric'])
    # 4 Convert the model
    converter.convert_model(model_path, model_layers,
                            config['train']['valid_image_folder'])
    return model_path
示例#5
0
def setup_inference(config, weights,threshold=0.3, path=None):
    #added for compatibility with < 0.5.7 versions
    try:
        input_size = config['model']['input_size'][:]
    except:
        input_size = [config['model']['input_size'],config['model']['input_size']]

    """make directory to save inference results """
    dirname = os.path.join(os.path.dirname(weights),'Inference_results')
    if os.path.isdir(dirname):
        print("Folder {} is already exists. Image files in directory might be overwritten".format(dirname))
    else:
        print("Folder {} is created.".format(dirname))
        os.makedirs(dirname)

    if config['model']['type']=='SegNet':
        print('Segmentation')           
        # 1. Construct the model 
        segnet = create_segnet(config['model']['architecture'],
                                   input_size,
                                   config['model']['n_classes'])   
        # 2. Load the pretrained weights (if any) 
        segnet.load_weights(weights)
        predict_multiple(segnet._network, inp_dir=config['train']['valid_image_folder'], out_dir=dirname, overlay_img=True)
        print(evaluate(segnet._network, inp_images_dir=config['train']['valid_image_folder'], annotations_dir=config['train']['valid_annot_folder']))


    if config['model']['type']=='Classifier':
        print('Classifier')    
        if config['model']['labels']:
            labels = config['model']['labels']
        else:
            labels = get_labels(config['train']['train_image_folder'])
        # 1.Construct the model 
        classifier = create_classifier(config['model']['architecture'],
                                       labels,
                                       input_size,
                                       config['model']['fully-connected'],
                                       config['model']['dropout'])   
        # 2. Load the pretrained weights (if any) 
        classifier.load_weights(weights)
        font = cv2.FONT_HERSHEY_SIMPLEX
        valid_image_folder = config['train']['valid_image_folder']
        image_files_list = glob.glob(valid_image_folder + '/**/*.jpg', recursive=True)
        inference_time = []
        for filename in image_files_list:
            output_path = os.path.join(dirname, os.path.basename(filename))
            orig_image, input_image = prepare_image(filename, classifier)
            prediction_time, img_class, prob = classifier.predict(input_image)
            inference_time.append(prediction_time)
            print(orig_image.shape)
            cv2.putText(orig_image, "{}:{:.2f}".format(img_class[0], prob[0]), (10,30), font, orig_image.shape[1]/700 , (0, 0, 255), 2, True)
            cv2.imwrite(output_path, orig_image)
            show_image(output_path)
            print("{}:{}".format(img_class[0], prob[0]))
        if len(inference_time)>1:
            print("Average prediction time:{} ms".format(sum(inference_time[1:])/len(inference_time[1:])))

    if config['model']['type']=='Detector':
        # 2. create yolo instance & predict
        yolo = create_yolo(config['model']['architecture'],
                           config['model']['labels'],
                           input_size,
                           config['model']['anchors'])
        yolo.load_weights(weights)

        # 3. read image
        annotations = parse_annotation(config['train']['valid_annot_folder'],
                                       config['train']['valid_image_folder'],
                                       config['model']['labels'],
                                       is_only_detect=config['train']['is_only_detect'])

        n_true_positives = 0
        n_truth = 0
        n_pred = 0
        inference_time = []
        for i in range(len(annotations)):
            img_path = annotations.fname(i)
            img_fname = os.path.basename(img_path)
            true_boxes = annotations.boxes(i)
            true_labels = annotations.code_labels(i)

            orig_image, input_image = prepare_image(img_path, yolo)
            height, width = orig_image.shape[:2]
            prediction_time, boxes, probs = yolo.predict(input_image, height, width, float(threshold))
            inference_time.append(prediction_time)
            labels = np.argmax(probs, axis=1) if len(probs) > 0 else [] 
            # 4. save detection result
            orig_image = draw_scaled_boxes(orig_image, boxes, probs, config['model']['labels'])
            output_path = os.path.join(dirname, os.path.split(img_fname)[-1])
            cv2.imwrite(output_path, orig_image)
            print("{}-boxes are detected. {} saved.".format(len(boxes), output_path))
            show_image(output_path)
            n_true_positives += count_true_positives(boxes, true_boxes, labels, true_labels)
            n_truth += len(true_boxes)
            n_pred += len(boxes)
        print(calc_score(n_true_positives, n_truth, n_pred))
        if len(inference_time)>1:
            print("Average prediction time:{} ms".format(sum(inference_time[1:])/len(inference_time[1:])))
示例#6
0
def setup_evaluation(config, weights, threshold=0.3, path=None):
    try:
        matplotlib.use('TkAgg')
    except:
        pass
    #added for compatibility with < 0.5.7 versions
    try:
        input_size = config['model']['input_size'][:]
    except:
        input_size = [
            config['model']['input_size'], config['model']['input_size']
        ]
    """make directory to save inference results """
    dirname = os.path.join(os.path.dirname(weights), 'Evaluation_results')
    if os.path.isdir(dirname):
        print(
            "Folder {} is already exists. Image files in directory might be overwritten"
            .format(dirname))
    else:
        print("Folder {} is created.".format(dirname))
        os.makedirs(dirname)

    if config['model']['type'] == 'SegNet':
        print('Segmentation')
        # 1. Construct the model
        segnet = create_segnet(config['model']['architecture'], input_size,
                               config['model']['n_classes'])
        # 2. Load the pretrained weights (if any)
        segnet.load_weights(weights)
        for filename in os.listdir(config['train']['valid_image_folder']):
            filepath = os.path.join(config['train']['valid_image_folder'],
                                    filename)
            orig_image, input_image = prepare_image(filepath, segnet)
            output_path = os.path.join(dirname, os.path.basename(filename))
            predict(model=segnet._network,
                    inp=input_image,
                    image=orig_image,
                    out_fname=output_path)
            #show_image(output_path)

    if config['model']['type'] == 'Classifier':
        print('Classifier')
        if config['model']['labels']:
            labels = config['model']['labels']
        else:
            labels = get_labels(config['train']['train_image_folder'])
        # 1.Construct the model
        classifier = create_classifier(config['model']['architecture'], labels,
                                       input_size,
                                       config['model']['fully-connected'],
                                       config['model']['dropout'])
        # 2. Load the pretrained weights (if any)
        classifier.load_weights(weights)
        font = cv2.FONT_HERSHEY_SIMPLEX
        valid_image_folder = config['train']['valid_image_folder']
        image_files_list = glob.glob(valid_image_folder + '/**/*.jpg',
                                     recursive=True)
        inference_time = []
        for filename in image_files_list:
            output_path = os.path.join(dirname, os.path.basename(filename))
            orig_image, input_image = prepare_image(filename, classifier)
            prediction_time, img_class, prob = classifier.predict(input_image)
            inference_time.append(prediction_time)

            # label shape and colorization
            text = "{}:{:.2f}".format(img_class[0], prob[0])
            background_color = (70, 120, 70
                                )  # grayish green background for text
            text_color = (255, 255, 255)  # white text

            size = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]
            left = 10
            top = 30 - size[1]
            right = left + size[0]
            bottom = top + size[1]

            # set up the colored rectangle background for text
            cv2.rectangle(orig_image, (left - 1, top - 5),
                          (right + 1, bottom + 1), background_color, -1)
            # set up text
            cv2.putText(orig_image, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, text_color, 1)
            cv2.imwrite(output_path, orig_image)
            #show_image(output_path)
            print("{}:{}".format(img_class[0], prob[0]))
        if len(inference_time) > 1:
            print("Average prediction time:{} ms".format(
                sum(inference_time[1:]) / len(inference_time[1:])))

    if config['model']['type'] == 'Detector':
        # 2. create yolo instance & predict
        yolo = create_yolo(config['model']['architecture'],
                           config['model']['labels'], input_size,
                           config['model']['anchors'])
        yolo.load_weights(weights)

        # 3. read image
        annotations = parse_annotation(
            config['train']['valid_annot_folder'],
            config['train']['valid_image_folder'],
            config['model']['labels'],
            is_only_detect=config['train']['is_only_detect'])

        n_true_positives = 0
        n_truth = 0
        n_pred = 0
        inference_time = []
        for i in range(len(annotations)):
            img_path = annotations.fname(i)
            img_fname = os.path.basename(img_path)
            true_boxes = annotations.boxes(i)
            true_labels = annotations.code_labels(i)

            orig_image, input_image = prepare_image(img_path, yolo)
            height, width = orig_image.shape[:2]
            prediction_time, boxes, probs = yolo.predict(
                input_image, height, width, float(threshold))
            inference_time.append(prediction_time)
            labels = np.argmax(probs, axis=1) if len(probs) > 0 else []
            # 4. save detection result
            orig_image = draw_boxes(orig_image, boxes, probs,
                                    config['model']['labels'])
            output_path = os.path.join(dirname, os.path.split(img_fname)[-1])
            cv2.imwrite(output_path, orig_image)
            print("{}-boxes are detected. {} saved.".format(
                len(boxes), output_path))
            #show_image(output_path)
            n_true_positives += count_true_positives(boxes, true_boxes, labels,
                                                     true_labels)
            n_truth += len(true_boxes)
            n_pred += len(boxes)
        print(calc_score(n_true_positives, n_truth, n_pred))
        if len(inference_time) > 1:
            print("Average prediction time:{} ms".format(
                sum(inference_time[1:]) / len(inference_time[1:])))