Exemple #1
0
 def load_pretrained_model(self, model):
     model_name = get_model_name(model)
     pretrained_filename = self.get_last_checkpoints_filename(model_name)
     if pretrained_filename is not None and os.path.isfile(
             pretrained_filename):
         info(
             f"Found pretrained model at {pretrained_filename}, loading...")
         model = type(model).load_from_checkpoint(pretrained_filename)
     return model
Exemple #2
0
 def __init__(self, images_dir):
     super(ImageClassificationDataset, self).__init__()
     self.images_dir = images_dir
     self.images = self.get_images_list()
     info(f"images count = {len(self.images)}")
     self.means = [0.13499917, 0.13499917, 0.13499917]
     self.stdevs = [0.29748289, 0.29748289, 0.29748289]
     if self.image_channels == 1:
         self.means = [0.13500238, 0.13500238, 0.13500238]
         self.stdevs = [0.29748997, 0.29748997, 0.29748997]
Exemple #3
0
 def move_yolo_txt_files(self, yolo_txt_files, train_or_valid):
     target_dir = f"{self.work_dir}/{train_or_valid}/labels"
     confirm_dirs(target_dir)
     for txt_file in yolo_txt_files:
         txt_name = os.path.basename(txt_file)
         target_file = f"{target_dir}/{txt_name}"
         if os.path.isfile(target_file):
             continue
         info(f"{txt_file} -> {target_file}")
         shutil.move(txt_file, target_file)
Exemple #4
0
 def move_image_files(self, yolo_txt_files, train_or_valid):
     target_dir = f"{self.work_dir}/{train_or_valid}/images"
     confirm_dirs(target_dir)
     for txt_file in yolo_txt_files:
         txt_name = os.path.basename(txt_file).split('.')[0]
         source_file = f"{self.work_dir}/images/{txt_name}.jpg"
         target_file = f"{target_dir}/{txt_name}.jpg"
         if os.path.isfile(target_file):
             continue
         info(f"{source_file} -> {target_file}")
         shutil.move(source_file, target_file)
 def load_pretrained_model(self, save_name):
     # Check whether pretrained model exists. If yes, load it and skip training
     pretrained_filename = self.get_pretrained_filename(save_name)
     if os.path.isfile(pretrained_filename):
         info("Found pretrained model at %s, loading..." %
              pretrained_filename)
         model = LitModelTrainer.load_from_checkpoint(pretrained_filename)
     else:
         # pl.seed_everything(42)  # To be reproducable
         info(f"model name = {save_name}")
         model = self
     return model
Exemple #6
0
def update_anchors_txt_file(darknet_anchors_txt_path, cfg_file_path):
    #anchors_txt_path = f"{work_dir}/../darknet/anchors.txt"
    with open(darknet_anchors_txt_path, 'r') as f:
        anchors_result = f.readline()
        info(f"{anchors_result}")

    regex = re.compile(r'anchors = (.+)')
    match = regex.search(anchors_result)
    if not match:
        raise Exception("fail")
    anchors_txt = match.group(1)
    sed(cfg_file_path, f"219s/anchors = .+/anchors = {anchors_txt}")
    sed(cfg_file_path, f"268s/anchors = .+/anchors = {anchors_txt}")
Exemple #7
0
 def split_train_valid_data(self):
     self.restore_train_data()
     yolo_txt_files = glob.glob(f"{self.yolo_txt_dir}/*.txt")
     info(f"yolo_txt_files = {len(yolo_txt_files)}")
     train_yolo_txt_files, valid_yolo_txt_files = shuffle_split(
         yolo_txt_files)
     info(
         f"train_size={len(train_yolo_txt_files)} valid_size={len(valid_yolo_txt_files)}"
     )
     self.clean_yolo_txt_files()
     self.move_image_files(train_yolo_txt_files, "train")
     self.move_yolo_txt_files(train_yolo_txt_files, "train")
     self.move_image_files(valid_yolo_txt_files, "valid")
     self.move_yolo_txt_files(valid_yolo_txt_files, "valid")
Exemple #8
0
    def get_train_validation_loaders(self):
        train_dataset = self
        dataset_size = len(train_dataset)
        indices = list(range(dataset_size))
        split = int(np.floor(self.validation_split * dataset_size))
        if self.shuffle_dataset:
            np.random.seed(self.random_seed)
            np.random.shuffle(indices)
        train_indices, val_indices = indices[split:], indices[:split]

        info(f"train size={len(train_indices)} val size={len(val_indices)}")

        train_loader = self.__get_data_loader(train_indices)
        validation_loader = self.__get_data_loader(val_indices)
        return train_loader, validation_loader
def main():
    args = parse_args()
    images_dir = args.images
    dataset_dir = args.dataset_dir

    classes_dict = ClassesDict()
    train_images_dir = f"{dataset_dir}/train2021"
    val_images_dir = f"{dataset_dir}/val2021"

    info(f"images_dir = {images_dir}")
    images = glob.glob(f"{images_dir}/*.jpg")
    train_images, val_images = shuffle_split(images)

    info(f"train_images = {len(train_images)}")
    info(f"val_images = {len(val_images)}")

    restore_images_folder(train_images_dir, images_dir)
    restore_images_folder(val_images_dir, images_dir)
    move_image_files_to_dataset_folder(train_images, train_images_dir)
    move_image_files_to_dataset_folder(val_images, val_images_dir)

    generate_annotations_files(classes_dict, dataset_dir, train_images_dir,
                               "train2021")
    generate_annotations_files(classes_dict, dataset_dir, val_images_dir,
                               "val2021")
Exemple #10
0
 def compute_mean_std(self):
     """
     計算數據集的均值和標準差
     : return :
     """
     info(f"compute mean std...")
     dataset = self.__get_all_images()
     num_imgs = len(self.images)
     for data in dataset:
         data = np.asarray(data) / 255.0
         mean = np.mean(data, axis=(0, 1))
         std = np.std(data, axis=(0, 1))
         self.means += mean
         self.stdevs += std
         # img = data[0]
         # for i in range(3):
         #     # 一個通道的均值和標準差
         #     self.means[i] += img[i, :, :].mean()
         #     self.stdevs[i] += img[i, :, :].std()
     self.means = np.asarray(self.means) / num_imgs
     self.stdevs = np.asarray(self.stdevs) / num_imgs
     return self.means, self.stdevs
def main():
    ap = argparse.ArgumentParser()
    # ap.add_argument('--toggle', '--no-toggle', dest='toggle', action=NegateAction, nargs=0)
    ap.add_argument('--convert', dest='toggle', nargs=0)
    ap.add_argument('--update_anchors', dest='toggle', nargs=0)
    args = ap.parse_args()

    work_dir = "d:/demo/training_yolo",
    cfg_dir = f"{work_dir}/cfg",
    cfg_file_path = f"{cfg_dir}/yolov4-tiny-obj.cfg",
    config = {work_dir, cfg_dir, cfg_file_path}

    if args.update_anchors:
        run_update_anchors(args, config)
        return

    if args.convert:
        run_convert_annotation_xml_to_yolo_data(args, config)
        return

    info(f"--convert")
    info(f"--update_anchors")
def run_convert_annotation_xml_to_yolo_data(args, **config):
    info(f"convert annotation xml to yolo data")
    p = ConvertAnnotationXmlToYoloHelper()
    p.convert()
def run_update_anchors(args, **config):
    info(f"update anchars")
    update_anchors_txt_file(f"{config.work_dir}/../darknet/anchors.txt",
                            config.cfg_file_path)