Beispiel #1
0
def get_data_files(main_directory):
    """
    Using glob to get the paths of all the file names
    to be uploaded
    :param main_directory: the main directory to begin searching
    :return: list of log file paths,
             list of song file paths
    """
    plant_files_list = globlin(main_directory + '/plant/*.*', recursive=True)
    animal_files_list = globlin(main_directory + '/animal/*.*', recursive=True)
    human_files_list = globlin(main_directory + '/human/*.*', recursive=True)

    return plant_files_list, animal_files_list, human_files_list
def getFileList(cateogry_path):
    user_feedback('Unpacking ' + cateogry_path + ' folders...')
    image_paths = globlin('D:/capstone data/' + cateogry_path + '/**/*.*',
                          recursive=True)
    target_path = 'D:/capstone data/' + cateogry_path + '/' + cateogry_path
    move_files(image_paths, target_path)
    user_feedback('Completed')
Beispiel #3
0
 def get_feature_file_list(self, path):
     feature_list = globlin(path + f'/{self.category}/*.*')
     if len(feature_list) >= 1:
         logging.info(
             f'Data Quality Check passed. Found {len(feature_list)} features for {self.category}s'
         )
     else:
         raise ValueError(
             f"Data Quality Check failed. No features found for category {self.category}."
         )
 def get_csv_file_list(self, path):
     csv_list = globlin(path)
     if len(csv_list) >= 1:
         logging.info(
             f'{len(csv_list)} CSV files for {self.category}s found')
         return True, len(csv_list), csv_list
     else:
         raise ValueError(
             f"Data Quality Check failed. No CSV files found for category {self.category}."
         )
Beispiel #5
0
    def __init__(self,
                 path_features,
                 path_images,
                 path_output,
                 category,
                 *args, **kwargs):

        super(FeatureExtractorOperator, self).__init__(*args, **kwargs)
        self.feature_names = globlin(path_features)
        self.category = category
        self.cateogry_list = self.get_data_files(path_images)
        self.output_path = path_output
def get_data_files(main_directory):
    """
        Using glob to get the paths of all the file names
        to be uploaded
        
        :param main_directory: the main directory to begin searching
        :return: list of log file paths, 
                 list of song file paths
        
    """
    print('************************************')
    print('Log data list')
    print('************************************')
    log_files_list = globlin(main_directory + '/*/*.json', recursive=True)
    song_files_list = globlin(main_directory + '/*/*/*/*/*.json',
                              recursive=True)
    print(log_files_list)
    print('************************************')
    print('Song data list')
    print('************************************')
    print(song_files_list)
    return log_files_list, song_files_list
Beispiel #7
0
    def get_data_files(self, main_directory):
        """
        Using glob to get the paths of all the file names
        to be uploaded
        :param main_directory: the main directory to begin searching
        :return: list of log file paths,
                 list of song file paths
        """
        category_list = globlin(main_directory + f'/{self.category}/*.*', recursive=True)
        # animal_files_list = globlin(main_directory + '/animal/*.*', recursive=True)
        # human_files_list = globlin(main_directory + '/human/*.*', recursive=True)

        return category_list
 def execute(self, context):
     KEY, SECRET = self.get_key_secret()
     file_list = globlin(self.path + f'/{self.category}/*.*',
                         recursive=True)
     for file in file_list:
         try:
             key_path = 'raw_data' + '/' + file.split(
                 '/')[-2] + '/' + file.split('/')[-1]
             multi_part_upload_with_s3(file, key_path, self.bucket_name)
             logging.info(
                 f'File upload -- {file} -- Completed successfully.')
         except:
             raise ValueError(f'Upload for file -- {file} -- failed.')
Beispiel #9
0
    def calibrate_camera(self):
        calibration_img_fnames = globlin('./camera_cal/*.*')
        imgpoints = []
        objpoints = []
        for path in calibration_img_fnames:
            img = cv2.imread(path)
            imgpoints, objpoints = self.calc_obj_img_points(
                img, objpoints, imgpoints)

        img = cv2.imread('./camera_cal/calibration1.jpg')
        undistorted_img, mtx, dist = self.camera_calibration(
            img, objpoints, imgpoints)

        pickle_out = open("wide_dist_pickle.p", "wb")
        pickle.dump({'mtx': mtx, 'dist': dist}, pickle_out)
        pickle_out.close()

        return undistorted_img
Beispiel #10
0
 def get_img_features(self, main_path):
     feature_paths = globlin(main_path + f'/{self.category}/*.*')
     return feature_paths
Beispiel #11
0
import cv2
from cv2 import dnn_superres
from glob import glob as globlin  ## 7bb <3
import progressbar

# Create an SR object
sr = dnn_superres.DnnSuperResImpl_create()

# Read the desired model
path = "EDSR_x3.pb"
sr.readModel(path)

# Set the desired model and scale to get correct pre- and post-processing
sr.setModel("edsr", 2)

paths = globlin('./video/*.*')
with progressbar.ProgressBar(max_value=len(paths)) as bar:
    for index, path in enumerate(paths):
        # Read image
        image = cv2.imread(path)
        # Upscale the image
        result = sr.upsample(image)
        # Save the image
        cv2.imwrite(path.replace('video', 'video2'), result)
        bar.update(index)
Beispiel #12
0

def log_upload_progress(index, path):
    f = open(path, 'w')
    f.write("Index to continue from: " + str(index))
    f.close()


def read_index_to_continue_from(path):
    with open(path, 'r') as file:
        data = file.read().replace('\n', '')
        return int(data.split(' ')[-1])


if __name__ == '__main__':
    # plant_files_list, animal_files_list, human_files_list = get_data_files('../../capstone data/compressed')
    # full_list_directories = plant_files_list + animal_files_list + human_files_list
    zip_file_list = globlin('../../capstone data/compressed/*.*',
                            recursive=True)
    index_to_continue = read_index_to_continue_from('log.txt')
    for index, file in enumerate(zip_file_list):
        if index < index_to_continue:
            print('skipping -- ' + str(index) + '/' +
                  str(index_to_continue - 1),
                  end="\r")
            continue
        print('\n')
        key_path = 'raw_data' + '/' + file.split('/')[-2] + '/' + file.split(
            '/')[-1]
        multi_part_upload_with_s3(file, key_path)
        log_upload_progress(index, 'log.txt')
    # Loops through all images in a local folder
    index = 1
    for filename in set_of_images:
        id = filename.split('/')[-1].split('.')[0]
        outfile_name = f'../../capstone data/imgFeatures/{filename.split("/")[-2]}/{id}.npz'
        if check_if_file_name_exists(outfile_name) == True:
            index += 1
            user_feedback_progress(f"Features for image {index}/{len(set_of_images)} already exist -- {id}", 'sub')
            continue
        user_feedback_progress(f"Extracting Features for image {index}/{len(set_of_images)} -- {id}", 'sub')
        # Loads and pre-process the image
        try:
            img = load_image(filename)
            # Calculate the image feature vector of the img
            features = module(img)
            # Remove single-dimensional entries from the 'features' array
            feature_set = np.squeeze(features)
            # Saves the image feature vectors into a file for later use
            # Saves the 'feature_set' to a text file
            np.savetxt(outfile_name, feature_set, delimiter=',')
        except:
            print(f'\nFile may not exist, please verify {id}')
        index += 1

if __name__ == '__main__':
    feature_names = globlin('../../capstone data/imgFeatures/*/*.*')
    plant_files_list, animal_files_list, human_files_list = get_data_files('../../capstone data')
    get_image_feature_vectors(plant_files_list)
    # get_image_feature_vectors(animal_files_list)
    # get_image_feature_vectors(human_files_list)
def get_img_features(main_path, category):
    feature_paths = globlin(main_path + f'/{category}/*.*')
    return feature_paths
Beispiel #15
0
def get_feature_file_list(path):
    feature_list = globlin(path)
    if len(feature_list) >= 1:
        return True, len(feature_list)
    else:
        return False