예제 #1
0
def simulation_pipeline(params, batch_size, dataset_name, seed):
    """
    Main loop of the image simulator. Simulates images and saves them
    to the input data folder together with the configuration and
    the corresponding detections.

    Input:
    params       -- global parameters like paths, filenames etc.
    batch_size   -- number of images to simulate
    dataset_name -- name of the dataset to be produced
    seed         -- seed for the RNG (optional)
    """
    init_rng(seed or DEFAULT_SEED)

    dataset_path = setup_data_dir(params, dataset_name)
    sim_cfg = sim_config()
    detections = {}

    # Serialize config to dataset folder
    write_config(
        {
            'global': params,
            'simulator': sim_cfg,
            'others': {
                'seed': seed
            }
        }, os.path.join(dataset_path, DEFAULT_CONFIG_FILENAME))

    # Simulator main loop
    for i in range(batch_size):
        # Simulate image
        img, detection = simulate_road_img(sim_cfg['simulator'])

        # Store results
        filename = params['img_file_prefix'] + str(
            i) + params['img_file_suffix']
        write_img(img, os.path.join(dataset_path, filename))
        add_detection(detections, filename, detection=detection)

    write_detections(
        detections, os.path.join(dataset_path, params['detections_file_name']))

    #     self.datasetpath = dataset_path

    return dataset_path
예제 #2
0
    def _init_dataset(self, dataset_name, seed, params):
        '''
        If the dataset is found , the size is checked against the number of images in the folder and
        if size is more than number of images in the folder, randomly pick images from the folder so
        that number of images is equal to the user specified size.

        If the dataset is not found, the simulator is run and a folder containing simulation
        images and detection files, is created with the given dataset name.

        Final image list is stored into self.img_list
        '''

        # Checks if the dataset directory exists
        if os.path.isdir(self.dataset_path):
            # If dataset directory found: Collects the file names of all images and stores them to a list
            image_list = os.listdir(self.dataset_path)
            img_list = []
            for item in image_list:
                if ((re.search(".png", str(item)))
                        or (re.search(".jpeg", str(item)))
                        or (re.search(".jpg", str(item)))):
                    img_list.append(os.path.join(self.dataset_path, item))

            if len(img_list) < self.size:
                print(
                    'The size you requested is more than the total available images.'
                )
                self.size = len(img_list)
                self.img_list = img_list

            elif len(img_list) > self.size:
                print(
                    'The size you requested is less than the total available images. The desired number of images randomly will be picked.'
                )
                random.seed(seed)
                random.shuffle(img_list)
                self.img_list = []
                self.img_list = img_list[:self.size]
            # would contain number of images as specified by user.

            elif len(img_list) == self.size:
                self.img_list = img_list

        # If dataset directory not found: Runs the simulator and obtain img list from simulation dataset.
        else:
            self.dataset_path = simulation_pipeline(params, self.size,
                                                    dataset_name, seed)
            image_list = os.listdir(self.dataset_path)
            self.img_list = []
            for item in image_list:
                if ((re.search(".png", str(item)))
                        or (re.search(".jpeg", str(item)))
                        or (re.search(".jpg", str(item)))):
                    self.img_list.append(os.path.join(self.dataset_path, item))

        # Checks if detection file is present in the folder and if it is not present creates a true
        # negative detection file using label_true_negatives function from pipelines.py
        #will be done from pipeline, through the point cloud.
        if not os.path.isfile(
                os.path.join(self.dataset_path, self.detections_file_name)):
            label_true_negatives(self.dataset_path, self.detections_file_name)

        # CODE FOR CONFIG FILE TO RECORD DATASETS USED
        # Saves the dataset information for writing to config file
        if self.mode == Mode.TRAIN:
            params = read_config(self.cfg_path)
            params['Network']['total_dataset_number'] += 1
            dataset_key = 'Traing_Dataset_' + str(
                params['Network']['total_dataset_number'])
            #If augmentation is applied
            if self.augmentation:
                augmenetation_applied = [
                    i.__class__.__name__
                    for i in self.augmentation.augmentation_list
                ]
            else:
                augmenetation_applied = None
            dataset_info = {
                'name': dataset_name,
                'path': self.dataset_path,
                'size': self.size,
                'augmenetation_applied': augmenetation_applied,
                'seed': seed
            }
            params[dataset_key] = dataset_info
            write_config(params, params['cfg_path'], sort_keys=True)
        return self.img_list
    def _init_dataset(self, dataset_name, seed, params):
        '''
        Initialize Dataset: Get the list the list of images from the dataset folder.

        If the dataset is found , the size is checked against the number of images in the folder and
        if size is more than number of images in the folder, randomly pick images from the folder so
        that number of images is equal to the user specified size.

        If the dataset is not found, the simulator is run and a folder containing simulation
        images and detection files, is created with the given dataset name.

        Final image list is stored into self.img_list


        '''

        # Check if the dataset directory exists

        # If dataset directory found: Collect the file names of all images and store them to a list

        #Compare number of images in img_list with size provided by user and then assign img_list to self.img_list

        #If number of images < size: inform user about the availabe image count
        #and change value of self.size to number of images in img_list
        #assign img_list to self.img_list without changes

        # if number of images >size : inform user about the availabe image count
        # Randomly select images from img_list and  assign them into self.img_list such that self.img_list
        # would contain number of images as specified by user. (Use the seed specified by user for random function)

        # If number of images = size
        # assign img_list to self.img_list without changes

        # If dataset directory not found: Run the simulator and obtain img list from simulation dataset

        # Check if detection file is present in the folder and if it is not present create a true
        # negative detection file using label_true_negatives function from pipelines.py

        #DO NOT CHANGE: CODE FOR CONFIG FILE TO RECORD DATASETS USED
        #Save the dataset information for writing to config file
        if self.mode == Mode.TRAIN:
            params = read_config(self.cfg_path)
            params['Network']['total_dataset_number'] += 1
            dataset_key = 'Traing_Dataset_' + str(
                params['Network']['total_dataset_number'])
            #If augmentation is applied
            if self.augmentation:
                augmenetation_applied = [
                    i.__class__.__name__
                    for i in self.augmentation.augmentation_list
                ]
            else:
                augmenetation_applied = None
            dataset_info = {
                'name': dataset_name,
                'path': self.dataset_path,
                'size': self.size,
                'augmenetation_applied': augmenetation_applied,
                'seed': seed
            }
            params[dataset_key] = dataset_info
            write_config(params, params['cfg_path'], sort_keys=True)
    def _init_dataset(self, dataset_name, seed, params):
        '''
        Initialize Dataset: Get the list the list of images from the dataset.

        If the dataset is found , the size is checked against the number of images in the folder and
        if size is more than number of images in the folder, randomly pick images from the folder so
        that number of images is equal to the user specified size.

        If the dataset is not found, the simulator is run and a folder containing simulation
        images and detection files, is created with the given dataset name.

        Final image list is stored into self.img_list


        '''

        # Check if the directory exists
        if os.path.isdir(self.dataset_path):
            # Collect the file names of all images and store them to a list
            img_list = [
                name for name in os.listdir(self.dataset_path)
                if name.endswith(('.jpg', '.jpeg', '.png'))
            ]
            image_count_in_folder = len(img_list)
            #Compare number of images in img_list with size provided by user and then assign img_list to self.img_list

            #If number of images < size: inform user about the available image count
            #and change value of self.size to number of images in img_list
            #assign img_list to self.img_list without changes
            if image_count_in_folder < self.size:
                print(
                    '{0}\nImages availabe in folder: \t{1}\nGiven Size: \t\t\t{2}\n'
                    'Images used in dataset:  \t{1}\n\n\n'.format(
                        dataset_name, image_count_in_folder, self.size))
                self.size = len(img_list)
                self.img_list = img_list

            # if number of images >size : inform user about the availabe image count
            #Randomly select images from img_list and  assign them into self.img_list such that self.img_list
            # would contain number of images as specified by user. (Use the seed specified by user for random function)

            elif image_count_in_folder > self.size:
                np.random.seed(seed)
                idx = np.random.choice(image_count_in_folder, self.size)
                self.img_list = [img_list[i] for i in idx]

                print(
                    '{0}\nImages availabe in folder: \t{1}\nGiven Size: \t\t\t{2} \n'
                    'Images used in dataset:  \t{2}\n\n\n'.format(
                        dataset_name, image_count_in_folder, self.size))

                # If number of images = size
                # assign img_list to self.img_list without changes
            else:
                self.img_list = img_list

        else:

            # Run the simulator and save the results in the dataset path
            simulation_pipeline(params, self.size, dataset_name, seed)
            self.img_list = [
                name for name in os.listdir(self.dataset_path)
                if name.endswith(('.jpg', '.jpeg', '.png'))
            ]

        #Check if detection file is present in the folder and if it is not present create a true negative(dummy) detection file using
        #label_true_negatives function from pipelines.py

        if not os.path.exists(
                os.path.join(self.dataset_path, self.detections_file_name)):

            label_true_negatives(self.dataset_path, self.detections_file_name)

        #Save the dataset information for writing to config file
        if self.mode == Mode.TRAIN:
            params = read_config(self.cfg_path)
            params['Network']['total_dataset_number'] += 1
            dataset_key = 'Traing_Dataset_' + str(
                params['Network']['total_dataset_number'])
            #If augmentation is applied
            if self.augmentation:
                augmenetation_applied = [
                    i.__class__.__name__
                    for i in self.augmentation.augmentation_list
                ]
            else:
                augmenetation_applied = None
            dataset_info = {
                'name': dataset_name,
                'path': self.dataset_path,
                'size': self.size,
                'augmenetation_applied': augmenetation_applied,
                'seed': seed
            }
            params[dataset_key] = dataset_info
            write_config(params, params['cfg_path'], sort_keys=True)
예제 #5
0
    def _init_dataset(self, dataset_name, seed, params):
        '''
        Initialize Dataset: Get the list the list of images from the dataset folder.

        If the dataset is found , the size is checked against the number of images in the folder and
        if size is more than number of images in the folder, randomly pick images from the folder so
        that number of images is equal to the user specified size.

        If the dataset is not found, the simulator is run and a folder containing simulation
        images and detection files, is created with the given dataset name.

        Final image list is stored into self.img_list


        '''
        dir_path = os.path.join(DEFAULT_DATA_PATH, dataset_name)
        random.seed(seed)
        # Check if the dataset directory exists
        if (os.path.isdir(dir_path)):
            # If dataset directory found: Collect the file names of all images and store them to a list
            im_list = [
                name for name in os.listdir(dir_path)
                if name.endswith(('.jpg', '.jpeg', '.png'))
            ]
            im_num = len(im_list)
            if (im_num < self.size):
                print("Lesser number of images found! Available: ", im_num)
                self.size = im_num
                self.img_list = im_list
                #Compare number of images in img_list with size provided by user and then assign img_list to self.img_list
                #If number of images < size: inform user about the availabe image count
                #and change value of self.size to number of images in img_list
                #assign img_list to self.img_list without changes
            if (im_num > self.size):
                # if number of images >size : inform user about the availabe image count
                # Randomly select images from img_list and  assign them into self.img_list such that self.img_list
                # would contain number of images as specified by user. (Use the seed specified by user for random function)
                print("More number of images available! Available: ", im_num)
                self.img_list = random.sample(im_list, self.size)

            if (im_num == self.size):
                self.img_list = im_list

            else:
                # If dataset directory not found: Run the simulator and obtain img list from simulation dataset
                simulation_pipeline(params, self.size, dataset_name, seed)
                im_list = os.listdir(dir_path)
                im_list = np.ravel(im_list)
                self.img_list = im_list

        dir_path1 = os.path.join(dir_path, self.detections_file_name)
        exs = os.path.isfile(dir_path1)
        if not exs:
            label_true_negatives(dir_path, self.detections_file_name)

        # Check if detection file is present in the folder and if it is not present create a true
        # negative detection file using label_true_negatives function from pipelines.py

        #DO NOT CHANGE: CODE FOR CONFIG FILE TO RECORD DATASETS USED
        #Save the dataset information for writing to config file
        if self.mode == Mode.TRAIN:
            params = read_config(self.cfg_path)
            params['Network']['total_dataset_number'] += 1
            dataset_key = 'Traing_Dataset_' + str(
                params['Network']['total_dataset_number'])
            #If augmentation is applied
            if self.augmentation:
                augmenetation_applied = [
                    i.__class__.__name__
                    for i in self.augmentation.augmentation_list
                ]
            else:
                augmenetation_applied = None
            dataset_info = {
                'name': dataset_name,
                'path': self.dataset_path,
                'size': self.size,
                'augmenetation_applied': augmenetation_applied,
                'seed': seed
            }
            params[dataset_key] = dataset_info
            write_config(params, params['cfg_path'], sort_keys=True)