def generate_set(input_path,
                 output_path,
                 size=200,
                 layover=0.1,
                 input_size=1000,
                 thread_count=8):
    """
    Generates a training set by loading all examples into memory, and resizing them.

    :param input_path:
    :param output_path:
    :param size:
    :param layover:
    :param input_size:
    :return:
    """

    # Assuming that the files are located in the folders 'labels' and 'examples'
    label_paths = utils.get_file_paths("{}/labels".format(input_path))
    example_paths = utils.get_file_paths("{}/examples".format(input_path))

    # Defines the output path based on the size
    output_path = "{0}/{1}x{1}".format(output_path, size)

    export_path_example = "{}/examples/".format(output_path)
    export_path_label = "{}/labels/".format(output_path)

    # Make the path if it does not exist
    utils.make_path(export_path_example)
    utils.make_path(export_path_label)

    path_length = len(label_paths)

    q = Queue()
    for i in range(path_length):
        q.put(i)

    # Starts n threads
    for i in range(thread_count):
        # Create a new database connection for each thread.
        t = threading.Thread(target=work,
                             args=(q, example_paths, label_paths, path_length,
                                   export_path_example, export_path_label,
                                   size, layover, input_size))

        # Sticks the thread in a list so that it remains accessible
        t.daemon = True
        t.start()

    q.join()

    # Empty the console after progress print
    print("")
Exemple #2
0
def run():
    # read input
    gt_root = config.IN_ANNOTATIONS + "/gt"
    user_root = config.IN_ANNOTATIONS + "/users"
    gt_file_paths = utils.get_file_paths(gt_root)
    user_tool_paths = utils.get_immediate_subdirs(user_root)
    if len(list(gt_file_paths)) < 1 or len(list(user_tool_paths)) < 1:
        return {"success": False, "message": "missing inputs"}

    tool_userpaths = {}  # all user folders per tool
    for path in user_tool_paths:
        tool_userpaths[utils.get_last_subdir(
            path)] = utils.get_immediate_subdirs(path)

    if len(tool_userpaths) < 1:
        return {"success": False, "message": "Could not fetch user data"}

    # calculate user IoU scores
    users = []
    for tool_label, user_folders in tool_userpaths.items():
        for folder in user_folders:
            user_id = utils.get_last_subdir(folder)
            userFiles = utils.get_file_paths(folder)
            users.append(get_user_score(gt_file_paths, userFiles, user_id))
            # print_user(users[len(users) - 1])  # print current user

    if len(users) < 1:
        return {"success": False, "message": "Failed calculating IOUs."}

    # write IoU score output files
    # sort users according to id
    users.sort(key=lambda x: int(x.id), reverse=False)
    # open out file
    utils.make_dir(config.OUT_IOU_DIR)
    outfile = config.OUT_IOU_DIR + "/" + config.OUT_IOU_FILE + "." + config.OUT_CSV_EXT
    fp = utils.open_file_writing(outfile, True)
    # write csv header
    fp.write("user,tool")
    for video in config.Video:
        fp.write("," + video.value)
    fp.write("\n")

    # write csv lines & finish
    for user in users:
        fp.write(str(user.id) + "," + user.tool.value)
        for video in config.Video:
            fp.write("," + str(user.calc_avg(video)))
        fp.write("\n")
    fp.close()
    return {"success": True, "message": "Wrote " + outfile}
Exemple #3
0
def main():
    if args.mode == 'train':
        content_path, mask_path = get_file_paths(args.image_path)    
        content_path.sort()
        mask_path.sort()
        for path in content_path:
            split(get_image(path), path, mode='sat')
        for path in mask_path:
            split(get_image(path), path, mode='mask')
    elif args.mode == 'validation':
        content_path, _ = get_file_paths(args.image_path)    
        content_path.sort()
        for path in content_path:
            split(get_image(path), path, mode='sat')
Exemple #4
0
    def batch_gen(self, path, batch_size, crop_size):
        content_path, mask_path = get_file_paths(path)

        while True:
            index = random.sample(range(1, len(content_path)), batch_size)
            try:
                offset_h = random.randint(0, (2448 - crop_size[0]))
                offset_w = random.randint(0, (2448 - crop_size[1]))
                offset = (offset_h, offset_w)

                contents = [
                    vgg_sub_mean(
                        random_crop(get_image(content_path[i]), offset,
                                    crop_size)) for i in index
                ]
                masks = [
                    mask_preprocess(
                        random_crop(get_image(mask_path[i]), offset,
                                    crop_size)) for i in index
                ]

                contents = np.asarray(contents, dtype=np.float32)
                masks = np.asarray(masks, dtype=np.uint8)

            except Exception as err:
                print("\nError: {}".format(err))
                continue

            yield contents, masks
Exemple #5
0
def get_rel_to_root_files(path):
    wd = os.getcwd()
    all_files = utils.get_file_paths(path)
    file_names = []
    for f in all_files:
        file_names.append(f.replace(wd,""))
    return file_names
def train_classifier(bbox_detector_path, training_data_path, output_path):
    detector = cv2.CascadeClassifier(bbox_detector_path)
    classifier = cv2.face.LBPHFaceRecognizer_create()

    labels = os.listdir(training_data_path)
    label_to_idx_mapper = {label: ix for ix, label in enumerate(labels)}
    idx_to_label_mapper = {
        ix: label
        for label, ix in label_to_idx_mapper.items()
    }

    x, y = [], []
    for label in labels:
        sub_dir_path = os.path.join(training_data_path, label)
        file_paths = get_file_paths(sub_dir_path)
        for i, path in enumerate(file_paths):
            print(f'{label}: {i}/{len(file_paths)}')
            image = read_image(path)
            face, _ = get_face_from_image(image, detector)
            if face is not None:
                x.append(face)
                y.append(label_to_idx_mapper[label])

    classifier.train(x, np.array(y))

    create_folder_if_not_exist(output_path)
    classifier.save(os.path.join(output_path, 'classifier'))
    save_pickle_file(os.path.join(output_path, 'mapper'), idx_to_label_mapper)
Exemple #7
0
def load_dataset():
    dataset_dir = "./Dataset/full_data/"
    dataset_paths = utils.get_file_paths(dataset_dir)

    for path in dataset_paths[:1]:
        data = utils.load_json(path)
        print(data[0])
        print(len(data))
Exemple #8
0
def main():
    if args.mode == 'train':
        content_path, mask_path = get_file_paths(args.image_path)
        content_path.sort()
        mask_path.sort()
        mask_size = list(map(os.path.getsize, mask_path))
        _, content_path = zip(
            *sorted(zip(mask_size, content_path), reverse=True))

        for path in content_path[20:80]:
            split(get_image(path), path, mode='sat')

    elif args.mode == 'validation':
        content_path, _ = get_file_paths(args.image_path)
        content_path.sort()
        for idx, path in enumerate(content_path):
            print("{}/{}".format(idx, len(content_path)))
            split(get_image(path), path, mode='sat')
Exemple #9
0
def get_cert(app_dir):
    logger.info("[*] Getting hardcoded certificates...")
    files = get_file_paths(app_dir)
    # an empty string
    certs = ''
    for file in files:
        extension = file.split('.')[-1]
        if re.search("cer|pem|cert|crt|pub|key|pfx|p12", extension):
            certs += html.escape(file) + "</br>"
    if len(certs) > 1:
        certs = "<tr><td>Certificate/Key Files Hardcoded inside the App.</td><td>" + \
            certs + "</td><tr>"
    return certs
Exemple #10
0
def main():
    _, mask_path = get_file_paths(args.image_path)
    mask_path.sort()
    assert len(mask_path) % 16 == 0

    for i in range(len(mask_path) // 16):
        group = mask_path[16 * i:16 * (i + 1)]
        whole_img = merge(group)

        fileID = group[0].split('/')[-1].split('-')[0]
        output_name = '{}_mask.png'.format(fileID)
        print(output_name)

        whole_img.save(os.path.join(args.output_path, output_name))
Exemple #11
0
    def eval(self):
        # print('loading model from {}...'.format(self.model.model_path))        
        # self.model.load(self.model.model_path)
        # print('Model is loaded!!!')
        self.model.build_model()
        content_path, _ = get_file_paths(self.test_path)
        content_path.sort()

        for path in content_path:
            fileID = path.split('/')[-1].split('_')[0]
            output_name = '{}_mask.png'.format(fileID)
            output_name = os.path.join(self.output_path, output_name)
            print(output_name)
            img = np.expand_dims(vgg_sub_mean(get_image(path)), axis=0)
            reconst_mask = self.model.decode(img)
            reconst_mask = mask_postprocess(reconst_mask[0])
            reconst_mask = image_resize(reconst_mask, size=(612, 612))
            skimage.io.imsave(output_name, reconst_mask)
Exemple #12
0
def show_evals():
    # FILES
    files = utils.nat_sort_list(utils.get_file_paths(config.OUT_EVALS_DIR))
    tables = {}
    if len(files) > 0:
        for f in files:
            set_tables(f, tables)

    # PLOTS
    plot_files = utils.nat_sort_list(get_rel_to_root_files(config.OUT_PLOT_DIR))
    plot_dict= {}
    if len(plot_files) > 0:
        for f in plot_files:
            set_plots(f, plot_dict)

    # utils.printFlaskMsg(str(files))
    # utils.printFlaskMsg(str(tables))
    # utils.printFlaskMsg(str(plot_dict))

    return render_template('evals.html', files=tables, plots=plot_dict)
Exemple #13
0
def batch_gen(dir, batch_size):
    content_path, mask_path = get_file_paths(dir)
    content_path.sort()
    mask_path.sort()

    while True:
        index = random.sample(range(1, len(content_path)), batch_size)
        try:
            contents = [
                vgg_sub_mean(get_image(content_path[i])) for i in index
            ]
            masks = [mask_preprocess(get_image(mask_path[i])) for i in index]

            contents = np.asarray(contents, dtype=np.float32)
            masks = np.asarray(masks, dtype=np.float32)

        except Exception as err:
            print("\nError: {}".format(err))
            continue

        yield contents, masks
    def __prepare_samples(self, datasets_info):
        """ Builds the feature and label vectors from the specified datasets

		Arguments:
		----------
			datasets_info:
				type: list
				info: list of dictionaries containing:
					- label (string)
					- folder (string)

		Returns:
		----------
			feats:
				type: numpy.array
				info: contains all the image arrays

			labels:
				type: numpy.array
				info: contains all the integer-encoded labels
		"""

        feats, labels = [], []

        for i, dataset in enumerate(datasets_info):

            images_paths = get_file_paths(folder_name=dataset['folder'],
                                          file_type='dataset')

            feats += [cv2.imread(path, 0) for path in images_paths]
            labels += [i] * len(images_paths)

            # Updates the corresponding ints <-> labels dict
            self.properties['labels'][i] = dataset['label']

        return feats, numpy.array(labels)
Exemple #15
0
for d in rotation_dirs:
    print(d)

# Start processing images in rotation directories
print("Starting image processing ... ")
for r_dir in rotation_dirs:
    # Try to find all supported sub directories named with rotation degree using rotation_dir_map
    for sub_dir_name, rotation_deg in rotation_dir_map.items():
        # Create sub directory path
        sub_dir_path = os.path.join(r_dir, sub_dir_name)
        if not os.path.exists(sub_dir_path):
            # Skip if sub directory not exists
            continue
        else:
            # Obtain all supported images with in sub directory
            image_paths = get_file_paths(sub_dir_path,
                                         supported_images,
                                         recursive=False)
            if len(image_paths) == 0:  # No image found in sub directory
                print('No image to process at path : ', sub_dir_path)
            else:
                # Rotate all images in sub directory at degree after which sub directory is named (rotation_deg).
                print("Rotating all images at path at %d deg:" % rotation_deg,
                      sub_dir_path)
                for path in image_paths:
                    rotate_image(path, rotation_deg, path)
                    print("Rotated %d deg" % rotation_deg,
                          os.path.basename(path))

print("Completed! All images are processed successfully.")
Exemple #16
0
# This Script is used to reduce the size of enhanced dataset images by a factor of 3.
import os
from PIL import Image
from utils import get_file_paths

# Scale by which to reduce each dimension of image
REDUCE_SCALE = 3

# Dataset Directory Name
dataset_dir_name = 'MangoLeavesDatabase'

# Retrieve all image paths in the dataset directory
image_files = get_file_paths(os.path.join(os.getcwd(), dataset_dir_name), extension=['.jpg'], recursive=True)
print("Resizing images ...")
for img_file in image_files:
    # Read Image
    img = Image.open(img_file)
    # Get original width and height
    w, h = img.size
    # Resize the image by applying REDUCE_SCALE in both dimensions
    resized_img = img.resize((w // REDUCE_SCALE, h // REDUCE_SCALE), Image.ANTIALIAS)
    # Save resized image by overwriting original image.
    resized_img.save(img_file, 'JPEG')
print("Resizing Completed!")

Exemple #17
0
from utils import get_file_paths, get_image
import numpy as np

color2index = {
    (0, 255, 255): 0,
    (255, 255, 0): 1,
    (255, 0, 255): 2,
    (0, 255, 0): 3,
    (0, 0, 255): 4,
    (255, 255, 255): 5,
    (0, 0, 0): 6
}

hist = [0] * 7
_, mask_path = get_file_paths('dataset/train/')
for ii, path in enumerate(mask_path):
    img = get_image(path)
    label = np.ndarray(shape=img.shape[:2], dtype=np.uint8)
    label[:, :] = -1
    for rgb, idx in color2index.items():
        label[(img == rgb).all(2)] = idx

    unique, counts = np.unique(label, return_counts=True)
    for i, class_ in enumerate(unique):
        if class_ != -1:
            hist[class_] += counts[i]
        else:
            print("\n-1 is in {}!".format(path))
    print('\r({}/{}) hist:{}'.format(ii + 1, len(mask_path), hist), end='')
Exemple #18
0
    def eval(self, output_mode):
        # print('loading model from {}...'.format(self.model.model_path))
        # self.model.load(self.model.model_path)
        # print('Model is loaded!!!')
        def merge(group, size=(612, 612), num_per_side=13):
            np_mask = np.zeros((2448, 2448, 7))
            # img = Image.new('RGB', (2448, 2448))
            for idx, chunk in enumerate(group):
                offset_x = idx // num_per_side * size[0] // 4
                offset_y = idx % num_per_side * size[1] // 4
                np_mask[offset_x:offset_x + size[0], offset_y:offset_y +
                        size[1], :] = np_mask[offset_x:offset_x + size[0],
                                              offset_y:offset_y +
                                              size[1], :] + chunk[:, :, :]
            img = mask_postprocess(np_mask)
            img = Image.fromarray(img)
            return img

        self.model.build_model()
        content_path, _ = get_file_paths(self.test_path)
        content_path.sort()

        group = []

        for cnt, path in enumerate(content_path):
            # assert len(content_path) % 169 == 0

            img = np.expand_dims(vgg_sub_mean(get_image(path)), axis=0)
            reconst_mask = self.model.decode(img)

            if output_mode == 'img':
                fileID = path.split('/')[-1].split('_')[0]
                output_name = '{}_mask.png'.format(fileID)
                output_name = os.path.join(self.output_path, output_name)
                print("({}/{}) {}".format(cnt, len(content_path), output_name))

                reconst_mask = mask_postprocess(reconst_mask[0])
                reconst_mask = image_resize(reconst_mask, size=(612, 612))
                skimage.io.imsave(output_name, reconst_mask)
            elif output_mode == 'npz':
                if cnt % 169 == 0 and cnt != 0:
                    print('Saving {}'.format(output_name))
                    img = merge(group)
                    img.save(output_name)
                    group = []

                fileID = path.split('/')[-1].split('-')[0]
                output_name = '{}_mask.png'.format(fileID)
                output_name = os.path.join(self.output_path, output_name)
                print("({}/{}) {}".format(cnt + 1, len(content_path),
                                          output_name))

                scale = 612. / 512.
                reconst_mask = scipy.ndimage.interpolation.zoom(
                    reconst_mask[0], zoom=(scale, scale, 1), mode='reflect')

                group.append(reconst_mask)

        # Saving the last figure
        if output_mode == 'npz':
            print('Saving {}'.format(output_name))
            img = merge(group)
            img.save(output_name)
Exemple #19
0
# Prepare dataset directories
print("Prepare dataset directories ...")
DATASET_DIR = os.path.join(os.getcwd(), 'MangoLeavesDatabase')
VARIETY_DIRS = [
    'alphonso',
    'amrapali',
    'chausa',
    'dusheri',
    'langra',
]
OUTPUT_DIRECTORY = os.path.join(DATASET_DIR, 'contour_output')

ts = time.time()
print("Processing Images ...")
for variety in VARIETY_DIRS:
    image_paths = get_file_paths(os.path.join(DATASET_DIR, variety), extension=['.jpg'], recursive=True)
    for img_path in image_paths:
        # Read an Image from dataset
        original_img = cv2.imread(img_path)
        # Image File Name 
        img_file_name = os.path.basename(img_path)
        # Convert color channels from BGR to RGB
        img2rgb = cv2.cvtColor(original_img, cv2.COLOR_BGR2RGB)
        # Apply K-Means to reduce color space in image
        img_pixels = np.float32(img2rgb.reshape((-1, 3)))
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        K = 5 # no of clusters
        ret,labels,centers = cv2.kmeans(img_pixels, K, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
        centers = np.uint8(centers)
        cluster_img = centers[labels.flatten()]
        # Change all non-green pixels to white in clustered image
Exemple #20
0
    for i, p in enumerate(cluster_img):
        max_intensity = max(p)
        if max_intensity == p[2]:
            cluster_img[i] = [255, 255, 255]
    cluster_img = cluster_img.reshape(img2rgb.shape)
    # Apply Morphological opening
    # kernel = np.ones((5,5), np.uint8)
    # opening = cv2.morphologyEx(cluster_img, cv2.MORPH_CLOSE, kernel)
    # Apply thresholding to clustered opening image
    cluster_img2gray = cv2.cvtColor(cluster_img, cv2.COLOR_RGB2GRAY)
    retval, th_img = cv2.threshold(cluster_img2gray, 0, 255,
                                   cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    return cv2.cvtColor(img, cv2.COLOR_RGB2BGR)


src_path = input("Enter source dataset path : ")
image_file_paths = get_file_paths(src_path, extension=['.jpg'], recursive=True)
total_images = len(image_file_paths)
print("Total Images : " + str(total_images))
print("Removing shadows and isolating leaf in images :-")
i = 0
for img_path in image_file_paths:
    filename, ext = os.path.basename(img_path).split(".")
    dirname = os.path.dirname(img_path)
    save_path = os.path.join(dirname, filename + "_p." + ext)
    img = remove_shadow_and_isolate(img_path, 10)
    cv2.imwrite(save_path, img)
    i += 1
    print(str(i) + "/" + str(total_images) + " processed.")
print("Completed!")
Exemple #21
0
        retval, rvec, tvec = aruco.estimatePoseCharucoBoard(
            charucoCorners, charucoIds, board, cameraMatrix, distCoeffs)
        if retval:
            aruco.drawAxis(outImage, cameraMatrix, distCoeffs, rvec, tvec, 0.1)

    cv2.imshow("detected", outImage)
    cv2.waitKey(0)
    cv2.destroyAllWindows()


if __name__ == '__main__':
    args = utils.get_options()

    input_image_dirpath = osp.join(osp.dirname(__file__), args.in_dir)
    # recognize any extentions
    image_paths, image_names = utils.get_file_paths(input_image_dirpath, "*")

    # read camera parameters
    camera_param_filepath = osp.join(osp.dirname(__file__),
                                     args.camera_param_path)
    cameraMatrix, distCoeffs, rvecs, tvecs, stdDevIn, stdDevEx = \
        utils.read_pickle(camera_param_filepath)

    # read parameters from arguments
    dictionary = utils.get_aruco_dict(args.aruco_dict)
    squareL = args.square_length
    markerL = args.marker_length
    tb = args.v_margin
    lr = args.h_margin
    pixels_per_mm = args.pixels_per_mm
    # read parameters from configuration pickle file
Exemple #22
0
dataset_path = 'MangoLeavesDatabase'
directories = [
    os.path.join(dataset_path, 'alphonso'),
    os.path.join(dataset_path, 'langra'),
    os.path.join(dataset_path, 'chausa'),
    os.path.join(dataset_path, 'amrapali'),
    os.path.join(dataset_path, 'dusheri'),
]
# Output path
output_path = os.path.join(dataset_path, 'output')
print("Processing Images ...")
# Start processing images
for dir in directories:
    dir_name = os.path.basename(dir)
    # Get all jpg image files within directory
    image_files = get_file_paths(dir, extension=['.jpg'], recursive=True)
    for image_file in image_files:
        img = Image.open(image_file)
        contrast = ImageEnhance.Contrast(img)
        contrasted_image = contrast.enhance(CONTRAST)
        brightness = ImageEnhance.Brightness(contrasted_image)
        bright_contrasted_img = brightness.enhance(BRIGHTNESS)
        sharpness = ImageEnhance.Sharpness(bright_contrasted_img)
        sharp_bright_contrasted_img = sharpness.enhance(SHARPNESS)
        color = ImageEnhance.Color(sharp_bright_contrasted_img)
        colored_sharp_bright_contrasted_img = color.enhance(COLOR)
        img_file_name = os.path.basename(image_file)
        _, ext = img_file_name.split('.')
        filename = dir_name
        # Output Path
        out_file_path = os.path.join(output_path, dir_name)
Exemple #23
0
@description: This script encode all the midi music in the collected dataset into the 
desined data representation. The encoded data representations will be saved in .npy format
under the same directory of the midi file.
"""

import utils
import numpy as np
import os

# load all the data paths in the midi dataset.
path = '..\\midis\\'
try:
    data_paths = [os.path.join(path, o) \
                  for o in os.listdir(path) \
                  if os.path.isdir(os.path.join(path, o))]
except OSError as e:
    print('Error: Invalid datapath!!!')

# convert all the midis into data representation and save as .npy file.
# the encoded data file will be monophinic and wrapped within one octave.
for data_path in data_paths:
    midi_files = utils.get_file_paths(data_path)

    for midi_file in midi_files:
        data_cur = utils.load_data(midi_file)
        utils.to_monophonic(data_cur)
        data_cur = utils.to_octave(data_cur)

        datafile = midi_file[:len(midi_file) - 4] + '.npy'
        np.save(datafile, data_cur)
# Leaves Dataset Folder Name
dataset = 'PreprocessedDatabase'
# Get Current Working directory
working_dir = os.getcwd()
# Generate paths for varieties
paths = {
    'alphonso': os.path.join(working_dir, dataset, 'alphonso/'), # For now only using leaf front images
    'amrapali': os.path.join(working_dir, dataset, 'amrapali/'),
    'chausa': os.path.join(working_dir, dataset, 'chausa/'),
    'dusheri': os.path.join(working_dir, dataset, 'dusheri/'),
    'langra': os.path.join(working_dir, dataset, 'langra/'),
}
# Generate a dictionary storing lists of image paths of a particular variety accessible using corresponding variety name. 
image_dict = dict()
for label, path in paths.items():
    image_dict[label] = utils.get_file_paths(path, ['.jpg'])
# CSV file output path
csv_file_output_path = os.path.join(working_dir, dataset, 'labeled_dataset.csv')
print("Start processing images ...")

# Generate Pandas DataFrame containing extracted features for each image file. If M features are extracted from N images then DataFrame will be of N x M dimension.
# Columns of the data
cols = ['aspectratio', 'area', 'perimeter', 'formfactor', 'meanR', 'meanG', 'meanB', 'veinarea1', 'veinarea2', 'elongation', 'label']
# Create an empty dataframe with above columns
data = pd.DataFrame(columns=cols)
# Extract image features of each image of each variety
for label, path_list in image_dict.items():
    i=0
    total = len(path_list)
    print("Variety : " , label, "\tTotal Images : " , total)
    for image_path in path_list:
Exemple #25
0
    cap.release()
    cv2.destroyAllWindows()


if __name__ == '__main__':
    args = utils.get_options()

    videos_dirpath = args.in_dir
    videos_dirpath = osp.join(osp.dirname(__file__), videos_dirpath)
    if not osp.exists(videos_dirpath):
        print("Not found directory for video files...")
        exit()

    # delete files under save dir and make save dir
    resimg_dirpath = osp.join(osp.dirname(__file__), args.out_dir)
    if osp.exists(resimg_dirpath):
        # recognize any extentions
        resimg_paths, resimg_names = utils.get_file_paths(resimg_dirpath, '*')
        [os.remove(mpath) for mpath in resimg_paths]
    os.makedirs(resimg_dirpath, exist_ok=True)

    video_paths, video_names = utils.get_file_paths(videos_dirpath, '*')
    for i, (v_path, v_name) in enumerate(zip(video_paths, video_names)):
        if not (osp.splitext(video_name)[1] in ['.mp4', '.avi']):
            print("Check file extention: " + v_path)
            continue
        detect_marker_video(utils.get_aruco_dict(args.aruco_dict),
                            v_path,
                            savename=i,
                            savedirpath=resimg_dirpath)
Exemple #26
0
    dictionary = utils.get_aruco_dict(args.aruco_dict)
    squareL = args.square_length
    markerL = args.marker_length
    tb = args.v_margin
    lr = args.h_margin
    pixels_per_mm = args.pixels_per_mm
    # read parameters from configuration pickle file
    if args.input_board_cfg_pkl:
        board_cfg_pkl_path = osp.join(osp.dirname(__file__),
                                      args.board_cfg_pkl_path)
        board_cfg = utils.read_pickle(board_cfg_pkl_path)
        dictionary = utils.get_aruco_dict(board_cfg['dict_label'])
        squareL = board_cfg['square_length']
        markerL = board_cfg['marker_length']
        tb = board_cfg['margin_tb']
        lr = board_cfg['margin_lr']

    img_paths, _ = utils.get_file_paths(calib_image_dirpath, '*')
    calibrate_with_ChArUco_board(result_filepath_no_ext,
                                 get_calib_images(img_paths, resimgs=True),
                                 args.calib_result_format,
                                 dictionary,
                                 squareL,
                                 markerL,
                                 tb,
                                 lr,
                                 pixels_per_mm,
                                 isUndistort=args.is_undistort,
                                 isPrintResult=args.is_print_calib_result,
                                 undistort_res_dirpath=undistort_res_dirpath)
Exemple #27
0
    if not osp.exists(imgs_dirpath):
        print("Not found directory for image files...")
        exit()

    cam_param_path = osp.join(
        osp.dirname(__file__), args.camera_param_path)
    with open(cam_param_path, 'rb') as f:
        camera_params = pickle.load(f)
    cameramat, distcoeff, rvecs, tvecs, stdIn, stdEx = camera_params

    # delete files under save dir and make save dir
    resimg_dirpath = osp.join(
        osp.dirname(__file__), args.out_dir)
    if osp.exists(resimg_dirpath):
        # recognize any extentions
        resimg_paths, resimg_names = utils.get_file_paths(
            resimg_dirpath, '*')
        [os.remove(mpath) for mpath in resimg_paths]
    os.makedirs(resimg_dirpath, exist_ok=True)

    marker_length = 0.02  # [m]
    img_paths, img_names = utils.get_file_paths(imgs_dirpath, '*')
    for i, (img_path, img_name) in enumerate(zip(img_paths, img_names)):
        if not (osp.splitext(img_name)[1] in ['.png', '.jpg', '.bmp']):
            print("Check file extention: "+img_path)
            continue
        estimate_marker_pose_image(
            utils.get_aruco_dict(args.aruco_dict),
            marker_length,
            img_path,
            cameramat,
            distcoeff,
Exemple #28
0
    if isSave:
        if savedirpath is None:
            print("Error: Please specify save marker path.")
            return -1
        marker_path = osp.join(savedirpath,
                               str(marker_i) + '.' + str(num_pixels) + '.png')
        cv2.imwrite(marker_path, marker)
    if isShow:
        utils.imshow(img=marker, width=800)


if __name__ == '__main__':
    args = utils.get_options()

    # delete files under save dir and make save dir
    marker_savedirpath = osp.join(osp.dirname(__file__), args.out_dir)
    if osp.exists(marker_savedirpath):
        # recognize any extentions
        marker_paths, marker_names = utils.get_file_paths(
            marker_savedirpath, '*')
        [os.remove(mpath) for mpath in marker_paths]
    os.makedirs(marker_savedirpath, exist_ok=True)

    # generate into save dir
    for i in range(args.num_markers):
        create_aruco_markers(utils.get_aruco_dict(args.aruco_dict),
                             i,
                             args.num_pixels,
                             isSave=True,
                             savedirpath=marker_savedirpath)
Exemple #29
0
import os
from utils import get_file_paths
from PIL import JpegImagePlugin, Image

# Get the path to current working directory
working_dir = os.getcwd()

# Get all file paths in current working directory
file_paths = get_file_paths(working_dir, '.jpg')

# Get the dimensions of each image file.
widths = list()
heights = list()
for i in range(0, len(file_paths)):
    img = Image.open(file_paths[i])
    w, h = img.size
    widths.append(w)
    heights.append(h)

# Calculate the minimum of each dimension
min_width = min(widths)
print("Minimum Width: " + str(min_width))
min_height = min(heights)
print("Minimum Height: " + str(min_height))

# Crop each image and save it
output_dir = 'cropped'
for i in range(0, len(file_paths)):
    img = Image.open(file_paths[i])
    # Get original width and height of image
    w, h = img.size