Beispiel #1
0
def evaluation_gt():

    #read labels path
    gt_path = '/home/alupotto/data/autel/new_labels'
    gt_list = glob.glob("{}/*.txt".format(gt_path))

    #declare dictionary gt
    classes = load_classes('data/autel.names')
    dict_gt = dict.fromkeys(classes, 0)

    for gt_file in gt_list:

        with open(gt_file) as f_gt:
            gt_lines = f_gt.readlines()

        #clean \n & convert to a list
        gt_lines = [line.strip('\n').split(' ') for line in gt_lines]
        #convert to string to float
        gt_lines = [int(gt_line[0]) for gt_line in gt_lines]

        for class_id in gt_lines:
            class_name = classes[class_id]

            if class_name in dict_gt:
                dict_gt[class_name] += 1

            else:
                dict_gt[class_name] = 1

    with open('pickles/dict_gt.pkl', 'wb') as gt_pkl:
        pickle.dump(dict_gt, gt_pkl, protocol=pickle.HIGHEST_PROTOCOL)

    return dict_gt
Beispiel #2
0
def check_resolutions(imgs_path):

    #file_resolutions = open('files_check/resolution.txt', 'w')

    #load classes autel
    classes = load_classes('data/autel.resolutions')
    #dictionary count resolution
    dict_res_img = dict.fromkeys(classes, 0)
    dict_videos = dict.fromkeys(classes)
    dict_res_videos = {k: [] for k in dict_videos}

    #load images
    img_files = sorted(glob.glob('%s**/*.jpg' % imgs_path))

    #load labels
    label_files = sorted(glob.glob('%s**/*.xml' % imgs_path))

    #img with bad resolution frames
    folder_id = imgs_path.split('/')[-2]

    #img_bad_res = open("../files_check/frames_bad_{}.txt".format(folder_id), 'w')

    for img_path in img_files:

        #check if image has label
        if img_path.replace('.jpg', '.xml') in label_files:
            #video id
            video_id = img_path.split('/')[-1].split('_')[-2]

            #extract image shape
            img = cv2.imread(img_path)
            h_img, w_img, _ = img.shape

            #class id
            cls_id = resolution_id(w_img, h_img)
            class_name = classes[cls_id]

            if not video_id in dict_res_videos[class_name]:
                dict_res_videos[class_name].append(video_id)

            #num images dictionary
            if class_name in dict_res_img:
                dict_res_img[class_name] += 1

            else:
                dict_res_img[class_name] = 1

    #pickle resolution images
    with open('pickles/dict_res_img_{}.pkl'.format(folder_id),
              'wb') as res_pkl:
        pickle.dump(dict_res_img, res_pkl)

    #pickle resolution videos
    with open('pickles/dict_res_videos_{}.pkl'.format(folder_id),
              'wb') as res_pkl:
        pickle.dump(dict_res_videos, res_pkl)

    return dict_res_img, dict_res_videos
Beispiel #3
0
def create_labels_yolo(images_path, labels_path):

    #load classes autel
    classes = load_classes('data/autel.names')

    #load images
    img_files = sorted(glob.glob('%s/*.jpg' % images_path))

    #load labels
    label_files = sorted(glob.glob('%s/*.xml' % images_path))

    for img_path in img_files:
        video_id = img_path.split('/')[-1].split('_')[-2]

        if img_path.replace('.jpg', '.xml') in label_files:

            # xml path
            xml_path = img_path.replace('.jpg', '.xml')

            # root xml
            root = ET.parse(xml_path)

            # check real image shape with shape of the label
            h_xml = int(root.find('size').find('height').text)
            w_xml = int(root.find('size').find('width').text)

            # extract image shape
            img = cv2.imread(img_path)
            h_img, w_img, _ = img.shape

            #check resolution objects
            if check_resolution(h_img, w_img, video_id):

                #label file generation
                label_id = img_path.replace('.jpg', '.txt').split('/')[-1]
                #prompt
                print(labels_path + label_id)
                #file label
                gt_file = open(labels_path + label_id, 'w')

                for obj in root.findall('object'):
                    obj_name = obj.find('name').text

                    if obj_name in classes:
                        #parse xml
                        bndbox = obj.find('bndbox')
                        x0 = float(bndbox.find('xmin').text)
                        y0 = float(bndbox.find('ymin').text)
                        x1 = float(bndbox.find('xmax').text)
                        y1 = float(bndbox.find('ymax').text)
                        #prepare coords for conversion
                        obj_id = classes.index(obj_name)
                        bbox_xml = (x0, x1, y0, y1)
                        #print in txt file
                        gt_file.write(
                            str(obj_id) + " " +
                            " ".join([str(coord)
                                      for coord in bbox_xml]) + '\n')
Beispiel #4
0
def check_labels(imgs_path):

    #load classes autel
    classes = load_classes('data/autel.names')

    #dictionary count classes
    dict_gt = dict.fromkeys(classes, 0)

    #dictionary count resolution
    dict_resolution = {}

    #load images
    img_files = sorted(glob.glob('%s/*.jpg' % imgs_path))

    #load labels
    label_files = sorted(glob.glob('%s/*.xml' % imgs_path))

    #img with bad resolution frames
    folder_id = imgs_path.split('/')[-2]
    img_bad_res = open("files_check/frames_bad_{}.txt".format(folder_id), 'w')

    for img_path in img_files:
        # print(dict_gt)
        #check if image has label
        if img_path.replace('.jpg', '.xml') in label_files:
            #xml path
            xml_path = img_path.replace('.jpg', '.xml')

            #root xml
            root = ET.parse(xml_path)

            #check real image shape with shape of the label
            h_xml = int(root.find('size').find('height').text)
            w_xml = int(root.find('size').find('width').text)

            #extract image shape
            img = cv2.imread(img_path)
            h_img, w_img, ch_img = img.shape

            #check xml & jpg are equal

            if check_size_objects((h_img, w_img), (h_xml, w_xml), root):

                for obj in root.findall('object'):
                    obj_name = obj.find('name').text
                    #count objects
                    if obj_name in dict_gt:
                        dict_gt[obj_name] += 1
                    else:
                        dict_gt[obj_name] = 1

    with open('pickles/dict_{}.pkl'.format(folder_id), 'wb') as gt_pkl:
        pickle.dump(dict_gt, gt_pkl)
Beispiel #5
0
def histogram_resolutions_img(dict_res_img=None):

    if dict_res_img is None:
        with open('pickles/dict_res_img_images.pkl', 'rb') as gt_pkl:
            dict_res_img = pickle.load(gt_pkl)

    classes = load_classes('data/autel.resolutions')
    window_title = "img/resolution"
    plot_title = "num images per resolution"
    x_label = "num images"
    output_path = "output/resolution_img_08072018.png"
    to_show = True
    plot_color = 'royalblue'
    draw_plot_func(dict_res_img, len(classes), window_title, plot_title,
                   x_label, output_path, to_show, plot_color, "")
Beispiel #6
0
def check_devset():
    # load classes with background
    classes = load_classes('data/autel_background.names')

    with open('/home/alupotto/data/autel_08102018/autel_devset.txt',
              'r') as file_dev:
        list_paths = file_dev.readlines()

    lbls_path = [
        img_path.replace('images', 'labels_yolo_withbackground').replace(
            '.jpg', '.txt').strip('\n') for img_path in list_paths
    ]

    dev_dict = count_classes(lbls_path, classes)

    print(dev_dict)
Beispiel #7
0
def histogram_resolutions_videos(dict_res_video=None):

    if dict_res_video is None:
        with open('pickles/dict_res_videos_images.pkl', 'rb') as gt_pkl:
            dict_res_video = pickle.load(gt_pkl)

    for key, values in dict_res_video.items():
        dict_res_video[key] = len(values)

    classes = load_classes('data/autel.resolutions')

    window_title = "ground truth"
    plot_title = "number videos per resolution"
    x_label = "num videos"
    output_path = "output/resolution_videos_08072018.png"
    to_show = True
    plot_color = 'royalblue'
    draw_plot_func(dict_res_video, len(classes), window_title, plot_title,
                   x_label, output_path, to_show, plot_color, "")
Beispiel #8
0
def histogram_classes_gt(dict_gt=None):

    if dict_gt is None:
        with open('pickle/dict_gt.pkl', 'rb') as gt_pkl:
            dict_gt = pickle.load(gt_pkl)

    classes = load_classes('data/autel.names')
    gt_path = '/home/alupotto/data/autel/new_labels'
    gt_list = glob.glob("{}/*.txt".format(gt_path))

    window_title = "ground truth"
    plot_title = "Ground-Truth\n"
    plot_title += "(" + str(len(gt_list)) + " files and " + str(
        len(classes)) + " classes)"
    x_label = "number ground truth objects"
    output_path = "output/ground_truth.png"
    to_show = False
    plot_color = 'forestgreen'
    draw_plot_func(dict_gt, len(classes), window_title, plot_title, x_label,
                   output_path, to_show, plot_color, "")
    return dict_gt
Beispiel #9
0
def histogram_classes_AP(dict_classes=None):

    if dict_classes is None:
        with open('pickles/dict_classes.pkl', 'rb') as cls_pkl:
            dict_classes = pickle.load(cls_pkl)

    classes = load_classes('data/autel.names')
    for key, value in dict_classes.items():
        dict_classes[key] = np.mean(value)

    mAP = dict_classes['mAP']
    dict_classes.pop('mAP')

    window_title = "AP per class"
    plot_title = "mAP = {0:.2f}%".format(mAP * 100)
    x_label = "Average Precision"
    output_path = "output/classes_AP.png"
    to_show = True
    plot_color = 'royalblue'
    draw_plot_func(dict_classes, len(classes), window_title, plot_title,
                   x_label, output_path, to_show, plot_color, "")
    return dict_classes
Beispiel #10
0
def histogram_classes_gt(imgs_path, dict_gt=None):

    if dict_gt is None:
        with open('pickles/dict_part1_autel.pkl', 'rb') as gt_pkl:
            dict_gt = pickle.load(gt_pkl)

    print(dict_gt)

    classes = load_classes('data/autel.names')
    # load images
    img_files = sorted(glob.glob('%s/*.jpg' % imgs_path))
    print(len(img_files))
    window_title = "ground truth"
    plot_title = "Ground-Truth\n"
    plot_title += "(" + str(len(img_files)) + " files and " + str(
        len(classes)) + " classes)"
    x_label = "number ground truth objects"
    output_path = "output/ground_truth_08072018.png"
    to_show = True
    plot_color = 'forestgreen'
    draw_plot_func(dict_gt, len(classes), window_title, plot_title, x_label,
                   output_path, to_show, plot_color, "")
Beispiel #11
0
def create_dev_set(labels_path, per_cent):
    #load classes with background
    classes = load_classes('data/autel_background.names')
    #file_paths
    file_paths = open(
        os.path.join(os.path.dirname(labels_path.rstrip('/')),
                     'autel_devset.txt'), 'w')
    #load labels
    label_files = sorted(glob.glob('%s/*.txt' % labels_path))
    #check dict_classes
    if not os.path.isfile('pickles/dict_gt.pkl'):
        dict_classes = count_classes(label_files, classes)
    else:
        with open('pickles/dict_gt.pkl', 'rb') as gt_pkl:
            dict_classes = pickle.load(gt_pkl)
    #apply per_cent to classes in dict
    for key, value in dict_classes.items():
        dict_classes[key] = int(value * per_cent)

    #shuffling
    seed(23)
    shuffle(label_files)
    check_dict = {}

    for label in label_files:
        with open(label, 'r') as lbl_file:
            file = lbl_file.readlines()

        id_classes = [int(line.split(' ')[0]) for line in file]

        for cls in id_classes:
            dict_classes[classes[cls]] -= 1

        if all(value > 0 for value in dict_classes.values()):
            img_path = label.replace('labels_yolo_withbackground',
                                     'images').replace('.txt', '.jpg')
            file_paths.write(img_path + '\n')
Beispiel #12
0
def evaluation_classes(gt_path, pred_path):

    MIN_OVERLAP = 0.5
    ROOT_PATH = '/home/alupotto/data/autel/'

    #AP: 0.1667(0.0556)
    #AP: 0.85

    #read labels path
    #gt_path = '/home/alupotto/data/autel/new_labels'
    gt_list = glob.glob("{}/*.txt".format(gt_path))
    gt_list.sort()

    #paths predicted
    #pred_path = '/home/alupotto/data/autel/new_predicted'
    #pred_path = '/home/alupotto/data/autel/experiments/predicted_2420'

    APs = []

    #declare dictionary classes
    classes = load_classes('data/autel.names')
    AP_classes = dict.fromkeys(classes)
    AP_classes = {k: [] for k in AP_classes}

    for idx, gt_file in enumerate(gt_list):

        #read images path
        with open(gt_file) as f_gt:
            gt_lines = f_gt.readlines()

        #clean \n & convert to a list
        gt_lines = [line.strip('\n').split(' ') for line in gt_lines]
        #convert to string to float
        gt_lines = [list(map(float, gt_line)) for gt_line in gt_lines]

        #extract gt_id
        gt_id = gt_file.split('/')[-1]
        pred_file = os.path.join(pred_path, gt_id)

        #read prediction
        with open(pred_file) as f_pred:
            f_pred = f_pred.readlines()

        # clean \n and covnert to a list
        pred_lines = [line.strip('\n').split(' ') for line in f_pred]
        #convert string to float
        pred_lines = [list(map(float, pred_line)) for pred_line in pred_lines]

        gt_lines = np.array(gt_lines)
        bbox_gt = gt_lines[:, 1:]

        AP_temp = {}
        correct = []
        detected = []

        if len(pred_lines) == 0:
            if len(gt_lines) != 0:
                tpos_non = len(gt_lines) * [0]
                for idx, (cls_gt, _, _, _, _) in enumerate(gt_lines):
                    cls_name = classes[int(cls_gt)]
                    tpos_non[idx] = int(0)
                    APs.append(0)
                    AP_temp[cls_name] = tpos_non

                continue

        tpos = len(pred_lines) * [0]

        for idx, (cls_pred, conf, x1, y1, x2, y2) in enumerate(pred_lines):

            bbox_pred = np.array((x1, y1, x2, y2))
            bbox_pred = bbox_pred.reshape((1, bbox_pred.shape[0]))

            iou = bbox_iou(bbox_pred, bbox_gt)
            # extract index of largest overlap
            best_i = np.argmax(iou)
            cls_name = classes[int(cls_pred)]
            if iou[best_i] > MIN_OVERLAP and cls_pred == gt_lines[
                    best_i, 0] and best_i not in detected:

                correct.append(1)
                detected.append(best_i)
                tpos[idx] = int(1)
                AP_temp[cls_name] = tpos

            else:
                correct.append(0)
                tpos[idx] = int(0)

                AP_temp[cls_name] = tpos

        for cls, value in AP_temp.items():

            true_positives = np.array(value)
            false_positives = 1 - true_positives

            false_positives = np.cumsum(false_positives)
            true_positives = np.cumsum(true_positives)

            recall = true_positives / len(gt_lines) if len(
                gt_lines) else true_positives
            precision = true_positives / np.maximum(
                true_positives + false_positives,
                np.finfo(np.float64).eps)
            AP = compute_ap(recall, precision)
            APs.append(AP)

            if cls in AP_classes:
                AP_classes[cls].append(AP)
            else:
                AP_classes[cls] = AP

        print("Sample [%d/%d] AP: %.4f mAP: %.4f" %
              (len(APs), len(gt_list), AP, np.mean(APs)))
    AP_classes['mAP'] = np.mean(APs)
    with open('pickles/dict_classes.pkl', 'wb') as cls_pkl:
        pickle.dump(AP_classes, cls_pkl, protocol=pickle.HIGHEST_PROTOCOL)

    return AP_classes
Beispiel #13
0
import cv2

config_path = 'config/yolov3.cfg'
weights_path = 'config/yolov3.weights'
class_path = 'config/coco.names'

img_size = 416
conf_thres = 0.8
nms_thres = 0.4

# Load model and weights
model = Darknet(config_path, img_size=img_size)
model.load_weights(weights_path)
model.cuda()
model.eval()
classes = load_classes(class_path)
Tensor = torch.cuda.FloatTensor


def detect_image(img):
    # Scale and pad image
    ratio = min(img_size / img.size[0], img_size / img.size[1])
    imw = round(img.size[0] * ratio)
    imh = round(img.size[1] * ratio)
    img_transforms = transforms.Compose([
        transforms.Resize((imh, imw)),
        transforms.Pad((max(int(
            (imh - imw) / 2), 0), max(int(
                (imw - imh) / 2), 0), max(int(
                    (imh - imw) / 2), 0), max(int((imw - imh) / 2), 0)),
                       (128, 128, 128)),
parser.add_argument("--epochs", type=int, default=20, help="number of epochs")
parser.add_argument("--batch_size", type=int, default=16, help="size of each image batch")
parser.add_argument("--n_cpu", type=int, default=0, help="number of cpu threads used during batch generation")
parser.add_argument("--is_cuda", type=bool, default=True, help="whether to use cuda")
parser.add_argument("--class_path", type=str, default="data/fish.names", help="path to class label file")
parser.add_argument("--data_config_path", type=str, default="data/fish.data", help="path to data config file")
parser.add_argument("--model_config_path", type=str, default="config/fish.cfg", help="path to model config file")
parser.add_argument("--weights_path", type=str, default="config/yolov3.weights", help="path to pre-trained weight file")
parser.add_argument("--checkpoint_interval", type=int, default=1, help="interval between saving model weights")
parser.add_argument("--checkpoint_dir", type=str, default="checkpoints", help="directory where model checkpoints are saved")

args = parser.parse_args()
#print(args)

# Load data class and data configuration.
classes = load_classes(args.class_path)
data_cfg = parse_data_config(args.data_config_path)
train_path = data_cfg["train"]

# Load hyperparameters.
hyperparams = parse_model_config(args.model_config_path)[0]
learning_rate = float(hyperparams["learning_rate"])
momentum = float(hyperparams["momentum"])
decay = float(hyperparams["decay"])
burn_in = int(hyperparams["burn_in"])


###################################################################################3

# Create and initial model.
model = Darknet(args.model_config_path)