Example #1
0
def save_model_metadata(model, current_date_time):
    create_directory('models/{}/models_{}/metadata'.format(
        object_type, current_date_time),
                     format=False)
    metadata = dict()

    metadata['project'] = dict()
    metadata['project']['type'] = os.getenv('PROJECT_TYPE').lower()
    metadata['project']['object_type'] = object_type
    metadata['project']['date_time'] = current_date_time

    metadata['models'] = dict()

    model_files = get_subfiles('models/{}/models_{}'.format(
        object_type, current_date_time))

    for model_name in model_files:
        epoch = int(model_name.split('.')[1].split('_')[1].split('-')[0])
        metadata['models'][str(epoch)] = model_name

    # save model config in json format
    model_config_path = save_model_config(model, current_date_time)

    metadata['config'] = dict()
    metadata['config']['model'] = model_config_path

    metadata['config']['data_preparation'] = dict()
    metadata['config']['data_preparation']['source_data'] = os.getenv(
        'SATELLITE_IMAGE_PATH')
    metadata['config']['data_preparation']['tilesize'] = int(
        os.getenv('TILESIZE'))
    metadata['config']['data_preparation']['step'] = int(os.getenv('STEP'))
    metadata['config']['data_preparation']['width'] = int(os.getenv('WIDTH'))
    metadata['config']['data_preparation']['height'] = int(os.getenv('HEIGHT'))
    metadata['config']['data_preparation']['percent_image_with_no_annotations'] = \
        float(os.getenv('PERCENT_IMAGE_WITH_NO_ANNOTATIONS'))
    metadata['config']['data_preparation']['min_annotations_per_image'] = int(
        os.getenv('MIN_ANNOTATIONS_PER_IMAGE'))

    metadata['config']['data_preprocessing'] = dict()
    metadata['config']['data_preprocessing']['percent_valid'] = float(
        os.getenv('PERCENTVALID'))
    metadata['config']['data_preprocessing']['percent_test'] = float(
        os.getenv('PERCENTTEST'))

    metadata['config']['train'] = dict()
    metadata['config']['train']['epochs'] = int(os.getenv('EPOCHS'))
    metadata['config']['train']['train_datasetsize'] = int(
        os.getenv('TRAIN_DATASETSIZE'))
    metadata['config']['train']['valid_datasetsize'] = int(
        os.getenv('VALID_DATASETSIZE'))
    metadata['config']['train']['batchsize'] = int(os.getenv('BATCHSIZE'))

    with open(
            'models/{}/models_{}/metadata/metadata_{}.json'.format(
                object_type, current_date_time, current_date_time), 'w') as f:
        json.dump(metadata, f)
Example #2
0
def clone_kw_repo(root, question, attrib_value, tag):
    """ This function clones the keyword repository if the user wants
    individual drivers of that repository. Otherwise, it just sets the
    attribute "all_drivers" of that repository tag as 'yes'

    :Arguments:

    1. root (xml.etree.ElementTree.Element) = parent node of the current tag
    from data.xml
    2. question (xml.etree.ElementTree.Element) = question tag nested under
    the current tag from data.xml
    3. attrib_value (str) = url of the repository in question
    4. tag (xml.etree.ElementTree.Element) = current tag to which the
    repository tags would be appended.

    :Returns:

    1. attrib_value (str) = valid url

    """
    aff_pattern = re.compile("^(|y|yes)$", re.IGNORECASE)
    neg_pattern = re.compile("^(n|no)$", re.IGNORECASE)
    if not check_url_is_a_valid_repo(attrib_value):
        attrib_value = raw_input("Please enter a valid url: ")
        attrib_value = clone_kw_repo(root, question, attrib_value, tag)
    else:
        if root.tag == "drivers":
            name = get_repository_name(attrib_value)
            answer = raw_input("Do you want to clone all the drivers? "
                               "(yes[Enter]/no): ")
            answer = transform_response(answer)
            tag.set("all_drivers", answer)
            if neg_pattern.match(answer):
                current_dir = os.path.dirname(os.path.realpath(__file__))
                path = os.path.join(current_dir, "temp")
                create_dir(path)
                os.chdir(path)
                subprocess.call(["git", "clone", attrib_value])
                os.chdir(current_dir)
                temp, _ = get_subfiles(
                    os.path.join(path, name, 'ProductDrivers'))
                drivers = get_driver_list(temp)

                for i in range(1, len(drivers), 2):
                    print drivers[i - 1] + ". " + drivers[i]

                driver_numbers = get_corresponding_numbers()
                add_drivers_to_tags(tag, drivers, driver_numbers)
                shutil.rmtree(path, onerror=delete_read_only)
            elif not aff_pattern.match(answer):
                print "The command was not recognized. Please answer yes or no."
                attrib_value = clone_kw_repo(root, question, attrib_value, tag)
    return attrib_value
Example #3
0
def test_generator(test_path,
                   target_size=(256, 256),
                   flag_multi_class=False,
                   as_gray=False):
    test_images_filename = get_subfiles(test_path)
    for i in range(len(test_images_filename)):
        img = io.imread(os.path.join(test_path, test_images_filename[i]))
        img = img[:, :, :3]
        if as_gray:
            img = rgb2gray(img)
        img = trans.resize(img, target_size)
        # img = np.reshape(img, img.shape+(1,)) if (not flag_multi_class) else img
        img = np.reshape(img, (1, ) + img.shape)
        yield img
Example #4
0
def test():
    create_directories()

    # prepare test data
    if os.getenv('PREPARE_TEST_DATA_FLAG').upper() == 'TRUE':
        prepare_data()

    # get predictions
    if os.getenv('GET_PREDICTIONS_FLAG').upper() == 'TRUE':
        get_predictions()

    # get geo-coordinates
    if os.getenv('GEOREFERENCE_FLAG').upper() == 'TRUE':
        files = get_subfiles(prediction_images_path)
        all_geo_coordinates = Pool().map(get_individual_results, files)
        all_geo_coordinates = [
            point for object_geo_coordinates in all_geo_coordinates
            for point in object_geo_coordinates
        ]
        MapIntegration().combine_all_tower_geocoordinates(
            os.path.join(save_results_root_path, save_csv_files_path))
        print('\nSaved combine_all_tower_geocoordinates.csv successfully ...')

        # save all object locations as shaepfile
        print('\nPreparing Object Shapefile ...')
        save_object_shapefile_path = os.path.join(
            os.path.join(save_results_root_path, save_shapefiles_path))
        map_integration_object = MapIntegration()
        map_integration_object.save_object_locations_shapefile(
            all_geo_coordinates, save_object_shapefile_path)
        print('\nObject Shapefile saved successfully ...')

    # Get ndvi analysis for each detected object
    if os.getenv('ANALYZE_NDVI').upper() == 'TRUE':
        ndvi_analysis_object = NdviAnalysis(satellite_image_ms_path,
                                            save_object_shapefile_path,
                                            int(os.getenv('NDVI_RADIUS')),
                                            int(os.getenv('SIZE_X')),
                                            int(os.getenv('SIZE_Y')))
        save_ndvi_shapefile_path = os.path.join(save_results_root_path,
                                                save_shapefiles_path)
        ndvi_analysis_object.encroachment_analysis(save_ndvi_shapefile_path)
Example #5
0
def get_predictions():
    create_directory(os.getenv('PREDICTION_IMAGES_PATH'))

    # get all test files
    test_filenames = get_subfiles(os.getenv('TEST_IMAGES_PATH'))
    # test_filenames = test_filenames[:1000]

    # get test data generator
    testgen = test_generator(os.getenv('TEST_IMAGES_PATH'))
    test_batch_size = len(test_filenames)

    # start testing
    model = get_model()
    print('\nStarting testing ...')
    print('Using model - {}'.format(weight_file_path))
    results = model.predict_generator(testgen, test_batch_size, verbose=1)
    print('DONE !')

    # save predictions
    save_predictions(results, test_filenames)
Example #6
0
def prepare_data():
    test_files = get_subfiles(test_image_path)
    # prepare test data
    for test_file in test_files:
        src_file = gdal.Open(os.path.join(test_image_path, test_file),
                             gdal.GA_ReadOnly)
        width = src_file.RasterXSize
        height = src_file.RasterYSize

        counter = 1
        for i in range(0, width, tilesize):
            for j in range(0, height, tilesize):
                if (i + tilesize) < width and (j + tilesize) < height:
                    sub_region = src_file.ReadAsArray(i, j, tilesize, tilesize)
                elif (i + tilesize) < width and (j + tilesize) >= height:
                    sub_region = src_file.ReadAsArray(i, j, tilesize,
                                                      height - j - 1)
                elif (i + tilesize) >= width and (j + tilesize) < height:
                    sub_region = src_file.ReadAsArray(i, j, width - i - 1,
                                                      tilesize)
                else:
                    sub_region = src_file.ReadAsArray(i, j, width - i - 1,
                                                      height - i - 1)

                filename = test_file.split('.')[0] + '_' + str(i) + '_' + str(
                    j)

                # crop satellite image when labels are found and every time count_image_with_annot reaches
                # count_image_with_annot_max
                print('\ncounter: {}'.format(counter))
                print('Filename: {}'.format(filename))
                gdaltranString = "gdal_translate -of GTIFF -srcwin " + str(i) + ", " + str(j) + ", " + str(tilesize) \
                                 + ", " + str(tilesize) + " " + os.path.join(test_image_path, test_file) + " " + \
                                 cropped_tiff_images_path + "/" + filename + ".tif"
                os.system(gdaltranString)

        # convert cropped tiff images to png and save to master directory
        print('\nConverting tiff to png ...')
        convert_all_to_png(cropped_tiff_images_path, cropped_png_images_path)
        print('DONE !')
Example #7
0
    def combine_all_tower_geocoordinates(self, path):
        with open(os.path.join(os.getenv('SAVE_RESULTS_ROOT_PATH'), 'towers_locations_combined.csv'), 'w', newline='') as f:

            files = get_subfiles(path)
            csv_writer = csv.writer(f, delimiter=',')
            csv_writer.writerow(['index', 'X_img', 'Y_img', 'latitude', 'longitude'])
            c = 1
            for filename in files:
                filepath = os.path.join(path, filename)
                print('file: {}'.format(filename))
                dx = int(filename.split('.')[0].split('_')[1])
                dy = int(filename.split('.')[0].split('_')[2])
                with open(filepath, 'r') as fd:
                    csv_reader = csv.reader(fd, delimiter=',')
                    for row in csv_reader:
                        if row[0].lower().strip() == 'index':
                            continue
                        csv_writer.writerow([row[0], str(int(row[1])+dx), str(int(row[2])+dy), str(row[3]), str(row[4])])
                    fd.close()
                c += 1
            f.close()

            print('\nN files: {}'.format(c))
Example #8
0
def sort_by_string(current_path, string, base_dir):
    # Sort all files in the corresponding directory

    subdirs = utils.get_subdirs(current_path)
    subfiles = utils.get_subfiles(current_path)
    for i in range(len(subfiles)):
        subfiles[i] = subfiles[i].lower()

    for j in range(len(subdirs)):
        subdirs[j] = subdirs[j].lower()

    # If directory doesn't exist, create it
    if True in [string in subfile for subfile in subfiles
                ] and True not in [string in subdir for subdir in dir_path]:

        os.makedirs(dir_path + "\\" + string, exist_ok=True)
        subdirs.append(string)
        base_dir.append(string)

    # Move file to corresponding directory depending on a key in the file name
    while True in [string in subfile for subfile in subfiles]:

        os.replace(
            current_path + "\\" +
            str([subfile for subfile in subfiles if string in subfile][0]),
            dir_path + "\\" + string + "\\" +
            str([subfile for subfile in subfiles if string in subfile][0]))
        subfiles.remove(
            str([subfile for subfile in subfiles if string in subfile][0]))

    # Recursivity stop condition
    if len(subdirs) == 0 and len(subfiles) == 0:
        pass
    # Look for files in all directories and subdirectories
    else:
        for i in subdirs:
            sort_by_string(current_path + "\\" + i, string, base_dir)
Example #9
0
def prepare_master_data(label_ids=None):
    # create directories
    print(os.path.join(os.path.join(dataset_path, 'master')), 'images')
    create_directory(
        os.path.join(os.path.join(dataset_path, 'master'), 'images'))
    create_directory(
        os.path.join(os.path.join(dataset_path, 'master'), 'masks'))

    if label_ids is None:
        label_ids = []

    label2images = read_label2images(source_metadata_path)
    image_ids = get_image_ids(label_ids, label2images)

    all_image_files = get_subfiles(os.path.join(source_data_path, 'images'))

    for file in all_image_files:
        base_filename = file.split('.')[0]
        if base_filename in image_ids:
            copyfile(
                os.path.join(os.path.join(source_data_path, 'images'),
                             base_filename + '.jpg'),
                os.path.join(
                    os.path.join(os.path.join(dataset_path, 'master'),
                                 'images'), base_filename + '.jpg'))
            label_image = Image.open(
                os.path.join(os.path.join(source_data_path, 'labels'),
                             base_filename + '.png'))
            label_array = np.array(label_image)
            mask = np.zeros(label_array.shape)
            for i in range(len(label_ids)):
                res = np.where(label_array == label_ids[i])
                for j in range(len(res[0])):
                    mask[res[0][j]][res[1][j]] = i + 1
            save_mask(mask, base_filename)

    print('\nMaster data prepared successfully ...\n')
Example #10
0
def get_predictions():
    create_directory(os.getenv('PREDICTION_IMAGES_PATH'))

    test_filenames = get_subfiles(os.getenv('PNG_IMAGES_PATH'))

    weight_file_path = os.getenv('MODELPATH')

    model = unet(pretrained_weights=weight_file_path)

    testgen = test_generator(os.getenv('PNG_IMAGES_PATH'))

    test_batch_size = len(test_filenames)

    print('\nStarting testing ...\n')
    print('Using model - {}\n'.format(weight_file_path))
    results = model.predict_generator(testgen, test_batch_size, verbose=1)
    print('DONE !')

    # save predictions - images and masks
    print('\nSaving test results - masks')
    save_result(os.getenv('PREDICTION_IMAGES_PATH'),
                results,
                test_filenames,
                flag_multi_class=False,
                num_class=2)
    print('DONE !')

    if os.getenv('SAVE_COMBINED') == 'TRUE':
        create_directory(os.getenv('COMBINED_IMAGES_PATH'))
        imagesdir = os.getenv('PNG_IMAGES_PATH')
        masksdir = os.getenv('PREDICTION_IMAGES_PATH')
        suffix = '_predict'

        print('\nSaving test results - images and masks combined')
        save_images_and_masks(imagesdir, masksdir, suffix, save=True)
        print('DONE !')
Example #11
0
def save_images_and_masks(imagesdir,
                          masksdir,
                          suffix='_mask',
                          show=False,
                          save=False):
    all_images = get_subfiles(imagesdir)
    counter = 1
    for imagefilename in all_images:
        filename = imagefilename.split('.')[0]
        maskfilename = str(filename) + suffix + '.png'

        img = cv2.imread(imagesdir + '/' + imagefilename)
        mask = cv2.imread(masksdir + '/' + maskfilename)

        if os.getenv('COLORMAP_FLAG').upper() == 'TRUE':
            mask_modified = apply_color_map(mask)
        else:
            mask_modified = get_binary_image(mask)

        combined_img = np.concatenate((img, mask_modified), axis=1)

        if show:
            cv2.imshow("annotations - Filename: {}".format(filename),
                       combined_img)
            cv2.waitKey()
            cv2.destroyAllWindows()

        if save:
            print('\ncounter: {}'.format(counter))
            print('Filepath: {}'.format(
                os.getenv('COMBINED_IMAGES_PATH') +
                '/{}'.format(str(filename) + '.png'), combined_img))
            cv2.imwrite(
                os.getenv('COMBINED_IMAGES_PATH') +
                '/{}'.format(str(filename) + '.png'), combined_img)
            counter += 1
Example #12
0
def load_nwb_from_data(dir_path):

    # Get all files and directories present in the path
    files = utils.get_subfiles(dir_path)
    dirs = utils.get_subdirs(dir_path)
    files = files + dirs

    # Open YAML file with keywords, extension and keywords to exclude if existing then dump all data in a dict
    if os.path.isfile(dir_path + "\\" + [subfile for subfile in files if "default" in subfile][0]):
        with open(dir_path + "\\" + [subfile for subfile in files if "default" in subfile][0], 'r') as stream:
            data = yaml.safe_load(stream)
        # Remove the file from the list of files and directories so it isn't found twice
        files.remove([subfile for subfile in files if "default" in subfile][0])
    else:
        data = None

    home_data = dict()
    # Look for another YAML file containing the keywords, extensions and keywords to exclude
    for file in files:
        if "yaml" not in file:
            continue
        # p is a placeholder until we know every yaml file name
        if "subject" not in file and "ophys" not in file and "data" not in file and "p" not in file:
            with open(dir_path + "\\" + file, 'r') as stream:
                home_data = yaml.safe_load(stream)

    # If 2 files are provided, the one given by the user will take the priority
    if data is not None:
        difference = set(list(data.keys())) - set(list(home_data.keys()))
        for i in list(difference):
            home_data[i] = data[i]
    # First we create the nwb file because it will be needed for everything
    converttonwb = home_data.pop("ConvertToNWB")

    filtered_list = []
    for i in converttonwb:
        # If no extension is provided it means we are looking for a directory, so we filter the list of files and
        # directory to only contain directories
        if not converttonwb[i].get("extension"):
            filtered_list = [file for file in files if "." not in file]
        # Filter the file list depending on the extension provided in the YAML file
        else:
            for extension in converttonwb[i].get("extension"):
                filtered_list.extend([file for file in files if extension in file])
            # print("Filter result : " + str(filtered_list) + " by extension : " + str(converttonwb[i].get("extension")))
        # Conditional loop to remove all files or directories not containing the keywords
        # or containing excluded keywords
        counter = 0
        while counter < len(filtered_list):
            delete = False
            for keyword in converttonwb[i].get("keyword"):
                if keyword not in filtered_list[counter]:
                    # print("Keyword not found in : " + str(filtered_list))
                    del filtered_list[counter]
                    # print("New list : " + str(filtered_list))
                    delete = True
            if not delete:
                for keyword_to_exclude in converttonwb[i].get("keyword_to_exclude"):
                    if keyword_to_exclude in filtered_list[counter]:
                        # print("Excluded keyword found in : " + str(filtered_list))
                        del filtered_list[counter]
                        # print("New list : " + str(filtered_list))
                        delete = True
            if not delete:
                counter += 1
        print("Files to pass for " + i + ": " + str(filtered_list))
        # If files were found respecting every element, add the whole path to pass them as arguments
        yaml_path = os.path.join(dir_path, filtered_list[0])

    nwb_file = test_cicada_test_paul.create_nwb_file(yaml_path)

    order_list = []
    if home_data.get("order"):
        order_list = home_data.pop("order")

    while order_list:
        next_class = order_list.pop(0)
        # Get classname then instantiate it
        classname = getattr(test_cicada_test_paul, next_class)
        converter = classname(nwb_file)
        # Initialize a dict to contain the arguments to call convert
        arg_dict = {}
        print("Class name : " + str(next_class))
        # Loop through all arguments of the convert of the corresponding class
        for j in home_data[next_class]:
            filtered_list = []
            # If value if found it means the argument is not a file but a string/int/etc
            if home_data[next_class][j].get("value") and not home_data[next_class][j].get("extension") and \
                    (not home_data[next_class][j].get("keyword") or not home_data[next_class][j].get("keyword_to_exclude")):
                print(home_data[next_class][j].get("value")[0])
                arg_dict[j] = home_data[next_class][j].get("value")[0]
            else:
                # If no extension is provided it means we are looking for a directory, so we filter the list of files and
                # directory to only contain directories
                if not home_data[next_class][j].get("extension"):
                    filtered_list = [file for file in files if "." not in file]
                # Filter the file list depending on the extension provided in the YAML file
                else:
                    for extension in home_data[next_class][j].get("extension"):
                        filtered_list.extend([file for file in files if extension in file])
                    # print("Filter result : " + str(filtered_list) + " by extension : " +
                    # str(home_data[i][j].get("extension")))

                # Conditional loop to remove all files or directories not containing the keywords
                # or containing excluded keywords
                counter = 0
                while counter < len(filtered_list):
                    delete = False
                    for keyword in home_data[next_class][j].get("keyword"):
                        if keyword not in filtered_list[counter]:
                            # print("Keyword not found in : " + str(filtered_list))
                            del filtered_list[counter]
                            # print("New list : " + str(filtered_list))
                            delete = True
                    if not delete:
                        for keyword_to_exclude in home_data[next_class][j].get("keyword_to_exclude"):
                            if keyword_to_exclude in filtered_list[counter]:
                                # print("Excluded keyword found in : " + str(filtered_list))
                                del filtered_list[counter]
                                # print("New list : " + str(filtered_list))
                                delete = True
                    if not delete:
                        counter += 1
                print("Files to pass for " + j + ": " + str(filtered_list))
                # If files were found respecting every element, add the whole path to pass them as arguments
                if filtered_list:
                    arg_dict[j] = os.path.join(dir_path, filtered_list[0])
                    if "mat" in home_data[next_class][j].get("extension") and home_data[next_class][j].get("value"):
                        arg_dict[j] = [arg_dict[j]] + list(home_data[next_class][j].get("value"))

                # If no file found, put the argument at None
                else:
                    arg_dict[j] = None
        # print("Arguments to pass : "******"Class name : " + str(i))
        # Loop through all arguments of the convert of the corresponding class
        for j in home_data[i]:
            filtered_list = []
            # If value if found it means the argument is not a file but a string/int/etc
            if home_data[i][j].get("value") and not home_data[i][j].get("extension") and \
                    (not home_data[i][j].get("keyword") or not home_data[i][j].get("keyword_to_exclude")):
                print(home_data[i][j].get("value")[0])
                arg_dict[j] = home_data[i][j].get("value")[0]
            else:
                # If no extension is provided it means we are looking for a directory, so we filter the list of files and
                # directory to only contain directories
                if not home_data[i][j].get("extension"):
                    filtered_list = [file for file in files if "." not in file]
                # Filter the file list depending on the extension provided in the YAML file
                else:
                    for extension in home_data[i][j].get("extension"):
                        filtered_list.extend([file for file in files if extension in file])
                    # print("Filter result : " + str(filtered_list) + " by extension : " +
                          # str(home_data[i][j].get("extension")))
                # Conditional loop to remove all files or directories not containing the keywords
                # or containing excluded keywords
                counter = 0
                while counter < len(filtered_list):
                    delete = False
                    for keyword in home_data[i][j].get("keyword"):
                        if keyword not in filtered_list[counter]:
                            # print("Keyword not found in : " + str(filtered_list))
                            del filtered_list[counter]
                            # print("New list : " + str(filtered_list))
                            delete = True
                    if not delete:
                        for keyword_to_exclude in home_data[i][j].get("keyword_to_exclude"):
                            if keyword_to_exclude in filtered_list[counter]:
                                # print("Excluded keyword found in : " + str(filtered_list))
                                del filtered_list[counter]
                                # print("New list : " + str(filtered_list))
                                delete = True
                    if not delete:
                        counter += 1
                print("Files to pass for " + j + ": " + str(filtered_list))
                # If files were found respecting every element, add the whole path to pass them as arguments
                if filtered_list:
                    arg_dict[j] = os.path.join(dir_path, filtered_list[0])
                    if "mat" in home_data[i][j].get("extension") and home_data[i][j].get("value"):
                        arg_dict[j] = [arg_dict[j]] + list(home_data[i][j].get("value"))

                # If no file found, put the argument at None
                else:
                    arg_dict[j] = None

        #print("Arguments to pass : "******".nwb"
    with test_cicada_test_paul.NWBHDF5IO(os.path.join(dir_path, nwb_name), 'w') as io:
        io.write(nwb_file)

    print("NWB file created at : " + str(os.path.join(dir_path, nwb_name)))
Example #13
0
    create = input(
        "Folder does not exist, do you want to create it ? (yes/no)")
    if create == "yes":
        os.makedirs(sys.argv[1], exist_ok=True)
    else:
        exit()

print("Folder to build", sys.argv[1])
global dir_path
dir_path = sys.argv[1]

folder = os.path.basename(os.path.normpath(dir_path))
head, tail = os.path.split(os.path.normpath(dir_path))

id_dir = utils.get_subdirs(dir_path)
subfiles = utils.get_subfiles(dir_path)

for i in range(len(subfiles) - 1):
    if "data" in subfiles[i]:
        with open(dir_path + "\\" + subfiles[i], 'r') as stream:
            data_dict = yaml.safe_load(stream)
            if not data_dict.get("session_id"):
                id = data_dict.get("identifier")
            else:
                id = data_dict.get("session_id")
            stream.close()
            sort_by_string(dir_path, id, id_dir)

#
# region_list = ["a000", "a001", "a002"]
# region_dir = get_subdirs(dir_path)
Example #14
0
def train_valid_test_split(master_data_path,
                           train_path,
                           valid_path,
                           test_path,
                           percent_valid=0.2,
                           percent_test=0.2):
    # distribute files from master to train, valid and test
    all_data_filenames = get_subfiles(
        os.path.join(os.path.join(dataset_path, 'master'), 'images'))
    valid_filenames = random.sample(
        all_data_filenames,
        int((percent_valid / 100.0) * len(all_data_filenames)))
    test_filenames = random.sample(
        valid_filenames, int((percent_test / 100.0) * len(valid_filenames)))
    train_filenames = [
        x for x in all_data_filenames if x not in valid_filenames
    ]
    valid_filenames = [x for x in valid_filenames if x not in test_filenames]

    # create directories
    create_directory(train_path)
    create_directory(os.path.join(train_path, 'images'))
    create_directory(os.path.join(train_path, 'masks'))
    create_directory(test_path)
    create_directory(os.path.join(test_path, 'images'))
    create_directory(os.path.join(test_path, 'masks'))
    create_directory(valid_path)
    create_directory(os.path.join(valid_path, 'images'))
    create_directory(os.path.join(valid_path, 'masks'))

    # copy train files
    for file in train_filenames:
        copyfile(os.path.join(os.path.join(master_data_path, 'images'), file),
                 os.path.join(os.path.join(train_path, 'images'), file))
        mask_filename = file.split('.')[0] + '_mask.png'
        copyfile(
            os.path.join(os.path.join(master_data_path, 'masks'),
                         mask_filename),
            os.path.join(os.path.join(train_path, 'masks'), mask_filename))
    print('\nTrain files copied successfully ...')

    # copy validation files
    for file in valid_filenames:
        copyfile(os.path.join(os.path.join(master_data_path, 'images'), file),
                 os.path.join(os.path.join(valid_path, 'images'), file))
        mask_filename = file.split('.')[0] + '_mask.png'
        copyfile(
            os.path.join(os.path.join(master_data_path, 'masks'),
                         mask_filename),
            os.path.join(os.path.join(valid_path, 'masks'), mask_filename))
    print('\nValidation files copied successfully ...')

    # copy test files
    for file in test_filenames:
        copyfile(os.path.join(os.path.join(master_data_path, 'images'), file),
                 os.path.join(os.path.join(test_path, 'images'), file))
        copyfile(
            os.path.join(os.path.join(master_data_path, 'masks'),
                         mask_filename),
            os.path.join(os.path.join(test_path, 'masks'), mask_filename))
    print('\nTest files copied successfully ...')
Example #15
0
def backup_packages(backup_path):
    """
	Creates `packages` directory and places install list text files there.
	"""
    print_section_header("PACKAGES", Fore.BLUE)
    mkdir_warn_overwrite(backup_path)

    std_package_managers = ["brew", "brew cask", "gem"]

    for mgr in std_package_managers:
        # deal with package managers that have spaces in them.
        print_pkg_mgr_backup(mgr)
        command = "{} list".format(mgr)
        dest = "{}/{}_list.txt".format(backup_path, mgr.replace(" ", "-"))
        run_cmd_write_stdout(command, dest)

    # cargo
    print_pkg_mgr_backup("cargo")
    command = "ls {}".format(home_prefix(".cargo/bin/"))
    dest = "{}/cargo_list.txt".format(backup_path)
    run_cmd_write_stdout(command, dest)

    # pip
    print_pkg_mgr_backup("pip")
    command = "pip list --format=freeze".format(backup_path)
    dest = "{}/pip_list.txt".format(backup_path)
    run_cmd_write_stdout(command, dest)

    # npm
    print_pkg_mgr_backup("npm")
    command = "npm ls --global --parseable=true --depth=0"
    temp_file_path = "{}/npm_temp_list.txt".format(backup_path)
    run_cmd_write_stdout(command, temp_file_path)
    npm_dest_file = "{0}/npm_list.txt".format(backup_path)
    # Parse npm output
    with open(temp_file_path, mode="r+") as temp_file:
        # Skip first line of file
        temp_file.seek(1)
        with open(npm_dest_file, mode="w+") as dest:
            for line in temp_file:
                dest.write(line.split("/")[-1])

    os.remove(temp_file_path)

    # atom package manager
    print_pkg_mgr_backup("Atom")
    command = "apm list --installed --bare"
    dest = "{}/apm_list.txt".format(backup_path)
    run_cmd_write_stdout(command, dest)

    # sublime text 2 packages
    sublime_2_path = home_prefix(
        "Library/Application Support/Sublime Text 2/Packages/")
    if os.path.isdir(sublime_2_path):
        print_pkg_mgr_backup("Sublime Text 2")
        command = ["ls", sublime_2_path]
        dest = "{}/sublime2_list.txt".format(backup_path)
        run_cmd_write_stdout(command, dest)

    # sublime text 3 packages
    sublime_3_path = home_prefix(
        "Library/Application Support/Sublime Text 3/Installed Packages/")
    if os.path.isdir(sublime_3_path):
        print_pkg_mgr_backup("Sublime Text 3")
        command = ["ls", sublime_3_path]
        dest = "{}/sublime3_list.txt".format(backup_path)
        run_cmd_write_stdout(command, dest)
    else:
        print(sublime_3_path, "IS NOT DIR")

    # macports
    print_pkg_mgr_backup("macports")
    command = "port installed requested"
    dest = "{}/macports_list.txt".format(backup_path)
    run_cmd_write_stdout(command, dest)

    # system installs
    print_pkg_mgr_backup("macOS Applications")
    command = "ls /Applications/"
    dest = "{}/system_apps_list.txt".format(backup_path)
    run_cmd_write_stdout(command, dest)

    # Clean up empty package list files
    print(Fore.BLUE + "Cleaning up empty package lists..." + Style.RESET_ALL)
    for file in get_subfiles(backup_path):
        if os.path.getsize(file) == 0:
            os.remove(file)