コード例 #1
0
def create_json_data(voc_path, output_path):
    '''
        Create json file saved data
        voc_path: Path to VOC folder
        output_path: Path to save folder
    '''
    #TRain
    train_images = list()
    train_objects = list()
    
    with open(os.path.join(voc_path, "ImageSets/Main/trainval.txt")) as f:
        ids = f.read().splitlines()
    
    for id in ids:
        #Get object, image in XML file
        object_path = os.path.join(voc_path, "Annotations", id + ".xml")
        objects = parse_annotation(object_path)
        if len(objects) == 0:
            continue
        train_objects.append(objects)
        
        image_path = voc_path + "/JPEGImages/"+id+".jpg"
        train_images.append(image_path)
        
    assert len(train_objects) == len(train_images)
    
    #Save to file
    with open(os.path.join(output_path, "TRAIN_images.json"), "w") as j:
        json.dump(train_images, j)
    
    with open(os.path.join(output_path, "TRAIN_objects.json"), "w") as j:
        json.dump(train_objects, j)
    
    #Test
    test_images = list()
    test_objects = list()
    
    with open(os.path.join(voc_path, "ImageSets/Main/test.txt")) as f:
        ids = f.read().splitlines()
        
    for id in ids:
        object_path = os.path.join(voc_path, "Annotations", id + ".xml")
        objects = parse_annotation(object_path)
        if len(objects) == 0:
            continue
        test_objects.append(objects)
        test_image_path = voc_path + "/JPEGImages/"+id+".jpg"
        test_images.append(test_image_path)
    
    assert len(test_images) == len(test_objects)
    
    #Save to file
    with open(os.path.join(output_path, "TEST_images.json"), "w") as j:
        json.dump(test_images, j)
    with open(os.path.join(output_path, "TEST_objects.json"), "w") as j:
        json.dump(test_objects, j)
コード例 #2
0
def generate_anchors(ann_dir, img_dir, labels, net_input_size, num_anchors):

    train_imgs, train_labels = parse_annotation(ann_dir, img_dir, labels)

    grid_w = net_input_size / 32
    grid_h = net_input_size / 32

    # run k_mean to find the anchors
    annotation_dims = []
    for image in train_imgs:
        cell_w = image['width'] / grid_w
        cell_h = image['height'] / grid_h

        for obj in image['object']:
            relative_w = (float(obj['xmax']) - float(obj['xmin'])) / cell_w
            relatice_h = (float(obj["ymax"]) - float(obj['ymin'])) / cell_h
            annotation_dims.append(tuple(map(float, (relative_w, relatice_h))))

    annotation_dims = np.array(annotation_dims)
    centroids = run_kmeans(annotation_dims, num_anchors)

    # write anchors to file
    print('\naverage IOU for', num_anchors, 'anchors:',
          '%0.2f' % avg_IOU(annotation_dims, centroids))
    print_anchors(centroids)
コード例 #3
0
def process(params):
    input_txt_paths, output_txt_path = params
    assert(len(input_txt_paths) > 0)
    
    image_size_list = []
    areas_list = []
    
    for input_txt_path in input_txt_paths:
        with open(input_txt_path, "r") as f:
            lines = f.readlines()
        image_size, areas = utils.parse_annotation(lines)
        image_size_list.append(image_size)
        areas_list.append(areas)
    
    image_size = image_size_list[0]
    for s in image_size_list:
        assert(s == image_size)
    
    areas = list(itertools.chain.from_iterable(areas_list))
    num_areas = len(areas)
    
    areas = [f"{x0} {y0} {x1} {y1} 1" for x0, y0, x1, y1 in areas]
    areas = "\n".join(areas)
    
    annotation = f"{image_size[0]} {image_size[1]}\n{num_areas}\n{areas}"
    
    with open(output_txt_path, "w") as f:
        f.write(annotation)
コード例 #4
0
def process(params):
    input_txt_path, input_png_path, output_png_path, rotation = params
    with open(input_txt_path, "r") as f:
        lines = f.readlines()
    image_size, areas = utils.parse_annotation(lines)

    image = utils.load_image_with_rotation(input_png_path, rotation)
    image_h, image_w, _ = image.shape
    assert (image_w == image_size[0] and image_h == image_size[1])

    cv2.imwrite(str(output_png_path), image)
コード例 #5
0
def process(params):
    txt_filename, annotations = params

    input_txt_path = input_dir / txt_filename
    output_txt_path = output_dir / txt_filename

    if not input_txt_path.exists():
        print(input_txt_path.name, "does not exist, skipped.")
        return None

    with open(input_txt_path, "r") as f:
        lines = f.readlines()

    image_size, areas = utils.parse_annotation(lines)
    image_w, image_h = image_size

    if len(areas) <= 0:
        return None

    if len(areas) <= max([i for i, _ in annotations]):
        print(target_file.name, " annotation is not valid, skipped.")
        return None

    areas = [(areas[i], s) for i, s in annotations]
    areas_scaled = []

    for (x0, y0, x1, y1), scale in areas:
        #print(x0, y0, x1, y1)
        orig_w, orig_h = (x1 - x0, y1 - y0)
        assert (orig_h > 0 and orig_w > 0)

        remove_ratio = 1.0 - (1.0 - scale_factor)**scale
        trim_w = math.ceil(orig_w * remove_ratio * 0.5)
        trim_h = math.ceil(orig_h * remove_ratio * 0.5)
        x0 = min(x0 + trim_w, image_w - 1)
        y0 = min(y0 + trim_h, image_h - 1)
        x1 = max(x1 - trim_w, 0)
        y1 = max(y1 - trim_h, 0)
        #print(x0, y0, x1, y1)
        assert (x0 < x1 and y0 < y1)

        areas_scaled.append((x0, y0, x1, y1))

        #break
    areas = [f"{x0} {y0} {x1} {y1} 1" for x0, y0, x1, y1 in areas_scaled]

    annotations = "\n".join(areas)
    annotations = f"{image_size[0]} {image_size[1]}\n{len(areas)}\n{annotations}"

    with open(output_txt_path, "w") as f:
        f.write(annotations)
コード例 #6
0
def process(params):
    input_path, output_path = params
    #print(input_path)

    with open(input_path, "r") as f:
        lines = f.readlines()
    image_size, areas = utils.parse_annotation(lines)

    input_areas = [(x0, y0, x1, y1, (x1 - x0) * (y1 - y0))
                   for x0, y0, x1, y1 in areas]
    output_areas = []

    for x0i, y0i, x1i, y1i, ai in input_areas:
        need_to_append = True
        for idx, (x0o, y0o, x1o, y1o, ao, no) in enumerate(output_areas):
            x0u, y0u, x1u, y1u = (max(x0i, x0o), max(y0i, y0o), min(x1i, x1o),
                                  min(y1i, y1o))
            if (x1u - x0u <= 0 or y1u - y0u <= 0):
                continue
            au = (x1u - x0u) * (y1u - y0u)
            iou = 2 * au / (ai + ao)
            assert (iou <= 1)
            if (iou >= iou_merge_threshold):
                no_new = no + 1
                x0 = max(0, int((x0o * no + x0i) / no_new))
                y0 = max(0, int((y0o * no + y0i) / no_new))
                x1 = min(image_size[0] - 1, int((x1o * no + x1i) / no_new))
                y1 = min(image_size[1] - 1, int((y1o * no + y1i) / no_new))
                assert (x1 - x0 > 0)
                assert (y1 - y0 > 0)
                a = (x1 - x0) * (y1 - y0)
                output_areas[idx] = (x0, y0, x1, y1, a, no_new)
                need_to_append = False
                break
        if need_to_append:
            output_areas.append((x0i, y0i, x1i, y1i, ai, 1))

    areas = [f"{x0} {y0} {x1} {y1} 1" for x0, y0, x1, y1, _, _ in output_areas]
    num_areas = len(areas)
    areas = "\n".join(areas)

    annotation = f"{image_size[0]} {image_size[1]}\n{num_areas}\n{areas}"
    #print(len(input_areas), num_areas)

    with open(output_path, "w") as f:
        f.write(annotation)
コード例 #7
0
def process(params):
    input_txt_path, input_png_path, rotation = params
    output_png_filename_prefix = f"{input_png_path.stem}_{rotation}"
    
    with open(input_txt_path, "r") as f:
        lines = f.readlines()
    image_size, areas = utils.parse_annotation(lines)
    
    image = utils.load_image_with_rotation(input_png_path, rotation)
    image_h, image_w, _ = image.shape
    assert(image_w == image_size[0] and image_h == image_size[1])
    
    if (False):
        plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
        plt.show()
        
    area_size_list = []

    for i, area in enumerate(areas):
        x0, y0, x1, y1 = area

        crop_image = image[y0:y1, x0:x1, :]
        crop_h, crop_w, _ = crop_image.shape

        if (False):
            plt.imshow(cv2.cvtColor(crop_image, cv2.COLOR_BGR2RGB))
            plt.show()

        if (True):
            crop_image = cv2.resize(crop_image, (output_image_size, output_image_size), interpolation = cv2.INTER_CUBIC)
            file_path = output_dir / f"{output_png_filename_prefix}_{i}.png"
            cv2.imwrite(str(file_path), crop_image)
        
        area_size_list.append((crop_h, crop_w))
    
    return area_size_list
コード例 #8
0
import json
import os

import numpy as np
from tqdm import tqdm
import re

from utils import parse_annotation

parser = argparse.ArgumentParser()
parser.add_argument('--data_dir',
                    default=os.path.join('/Users/UnicornKing/20180101_120040'))
args = parser.parse_args()

if __name__ == '__main__':
    annotations = [(file, parse_annotation(os.path.join(args.data_dir, file)))
                   for file in sorted(os.listdir(args.data_dir))
                   if os.path.splitext(file)[-1] == '.json']
    prev_file, prev_annot = annotations[0]
    for file, annot in tqdm(annotations[1:]):
        prev_labels = set([shape['label'] for shape in prev_annot['shapes']])
        labels = set([shape['label'] for shape in annot['shapes']])
        for label in prev_labels.intersection(labels):
            if re.search(r'_\d+', label) is None:
                continue
            prev_points = [
                shape['points'] for shape in prev_annot['shapes']
                if shape['label'] == label
            ][0]
            points = [
                shape['points'] for shape in annot['shapes']
コード例 #9
0
    b = np.random.randint(0, 255, (len(action_to_idx), 1))
    colors = np.column_stack([r, g, b]).tolist()

    annot_cache = {}
    video_writer = cv2.VideoWriter('out.avi', 0, args.fps, (1200, 675))
    for i, frame in tqdm(
            enumerate(
                sorted(glob.glob(os.path.join(args.frames_dir,
                                              '*.jpg')))[:25000][::2])):
        frame_name = os.path.splitext(frame)[0]
        annot_name = frame_name + '.json'
        annot_path = os.path.join(args.frames_dir, annot_name)
        image_path = os.path.join(args.frames_dir, frame)
        image = cv2.imread(image_path)
        if os.path.exists(annot_path):
            annot = parse_annotation(annot_path)
            for shape in annot['shapes']:
                label = shape['label']
                if re.sub(r'_\d+', '', label) not in action_to_idx:
                    continue

                annot_cache.setdefault(label, [])

                annot_cache[label].append({
                    'image': image,
                    'points': shape['points']
                })

                if len(annot_cache[label]) == args.num_segments:

                    if label not in meta['action'].values:
コード例 #10
0
from utils import parse_annotation, make_dir_if_needed
import cv2
import re
import numpy as np
from tqdm import tqdm
from definitions import action_to_idx, idx_to_action

parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='/Users/UnicornKing/20180101_120040')
parser.add_argument('--output_image_dir', default='data/actions')
parser.add_argument('--output_label_path', default='data/actions.txt')
parser.add_argument('--split_length', type=int, default=60)
args = parser.parse_args()

if __name__ == '__main__':
    annotations = [(file, parse_annotation(os.path.join(args.data_dir, file))) for file in
                   sorted(os.listdir(args.data_dir))
                   if os.path.splitext(file)[-1] == '.json']
    label_counts = {}
    splits_counts = {}
    records = []
    dirs = set()
    for file, annot in tqdm(annotations):
        file_name = os.path.splitext(file)[0]
        for i, shape in enumerate(annot['shapes']):
            x1, y1 = shape['points'][0]
            x2, y2 = shape['points'][1]
            label = shape['label']
            if re.sub(r'_\d+', '', label) not in action_to_idx:
                continue
コード例 #11
0
ファイル: training.py プロジェクト: qjadud1994/Alpha-Car
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.optimizers import SGD, Adagrad
import numpy as np
import tensorflow as tf

from parameter import *
from utils import parse_annotation, data_gen
from Model2 import model

all_img = parse_annotation(ann_dir)


def custom_loss(y_true, y_pred):
    ### Adjust prediction
    # adjust x and y
    pred_box_xy = tf.sigmoid(y_pred[:, :, :, :, :2])

    # adjust w and h
    pred_box_wh = tf.exp(y_pred[:, :, :, :, 2:4]) * np.reshape(
        ANCHORS, [1, 1, 1, BOX, 2])
    pred_box_wh = tf.sqrt(
        pred_box_wh /
        np.reshape([float(GRID_W), float(GRID_H)], [1, 1, 1, 1, 2]))

    # adjust confidence
    pred_box_conf = tf.expand_dims(tf.sigmoid(y_pred[:, :, :, :, 4]), -1)

    # adjust probability
    pred_box_prob = tf.nn.softmax(y_pred[:, :, :, :, 5:])

    y_pred = tf.concat(