Ejemplo n.º 1
0
    def compute_neuron_coverage(self, single=True, combine=False):
        """
        This function applies a single transformation on a set of original images each time
        to check whether average of neuron coverage is affected by neuron coverage.
        :return:
        """
        result = {}
        images_dir = definitions.ROOT_DIR + "/evaluation/data/images" if single \
            else definitions.ROOT_DIR + "/evaluation/data/"
        processors = ImageProcessor.get_transformations()
        for item in processors:
            nc = NeuronCoverage(self.model)
            transformation_name, func = item[0], item[1]
            generators = self.get_generators(combine, transformation_name, func, processors, images_dir)

            trans_name = transformation_name
            for gen in generators:
                if combine:
                    trans_name = trans_name + " " + gen[0] if gen[0] != transformation_name \
                        else trans_name + ""
                evaluation_data_generator = gen[1]
                nc.fill_coverage_tracker(evaluation_data_generator)

            covered_neurons, total_neurons, coverage = nc.calculate_coverage()
            logger.info(f"Transformation: {trans_name} --- "
                        f"Covered Neurons: {covered_neurons}, "
                        f"Total Neurons: {total_neurons}, "
                        f"Coverage: {coverage}")
            result[trans_name] = coverage

        self.draw_bar_chart(result)
    def collect_data(self, number_of_images: int, mode="manual_mode"):
        self.launch_beam_ng(mode)
        logger.info("Start after 3 seconds...")
        time.sleep(5)
        logger.info(
            f"Start collecting {config.get('data_collecting.number_of_images')} training images"
        )
        i = 0
        exit_normally = True
        try:
            while i < number_of_images:
                # image is training image and steering is label
                img = self.bng.poll_sensors(
                    self.vehicle)['front_cam']['colour']
                steering = self.bng.poll_sensors(
                    self.vehicle)['electrics']['steering']
                logger.info(f"Saving data {i + 1} ...")
                self.save_data(img, steering, str(i))
                logger.info("Saved data successfully")
                i = i + 1
                time.sleep(int(config.get("data_collecting.sleep")))

        except Exception as ex:
            exit_normally = False
            logger.info(f"Error while collecting data {ex}")
        finally:
            self.bng.close()
            return exit_normally
Ejemplo n.º 3
0
def process_args(args):
    strategy = None
    if args.chosen_strategy == 1:
        strategy = StrategyOne()

    if args.chosen_strategy == 2:
        strategy = StrategyTwo()

    if strategy is None:
        logger.info(f"EXIT - Chosen strategy is not supported")
        exit(1)

    data_source = None
    try:
        if args.input_mode == "cmd":
            if not args.numbers_lists:
                logger.info(
                    f"EXIT - Please specify input data with the argument --l for the cmd input mode"
                )
                exit(1)
            data_source = CmdInput(args.numbers_lists)
        elif args.input_mode == "file":
            data_source = FileInput(args.file_path)

        if data_source is None:
            logger.error("Exit - Cannot get input data")
            exit(1)

    except Exception:
        exit(1)

    return strategy, data_source
Ejemplo n.º 4
0
    def train_model(self, training_data_path):
        validation_accuracy = []
        validation_loss = []
        train_data = pd.read_csv(training_data_path)
        train_data[['steering']] = train_data[['steering']].astype(float)
        labels = train_data[['steering']]

        kf = KFold(n_splits=10, random_state=7, shuffle=True)
        generator = ImageDataGenerator(width_shift_range=0.1,
                                       height_shift_range=0.1,
                                       zoom_range=0.3,
                                       rescale=1. / 255)

        fold = 1
        for train_index, val_index in kf.split(np.zeros(labels.size), labels):
            training_data = train_data.iloc[train_index]
            validation_data = train_data.iloc[val_index]

            train_data_generator = generator.flow_from_dataframe(
                training_data,
                directory=self.training_images_dir,
                x_col="image_name",
                y_col="steering",
                class_mode="raw",
                shuffle=True)
            validation_data_generator = generator.flow_from_dataframe(
                validation_data,
                directory=self.training_images_dir,
                x_col="image_name",
                y_col="steering",
                class_mode="raw",
                shuffle=True)

            callback1 = tf.keras.callbacks.ModelCheckpoint(
                self.save_model_dir + self.get_model_name(fold),
                monitor='val_loss',
                verbose=1,
                save_best_only=True,
                mode='max')
            callback2 = EarlyStopping(monitor='val_loss',
                                      patience=3,
                                      verbose=0)
            callbacks_list = [callback1, callback2]

            self.epoch_model.fit(train_data_generator,
                                 epochs=int(config.get("training.epochs")),
                                 callbacks=callbacks_list,
                                 validation_data=validation_data_generator)

            self.epoch_model.load_weights(self.save_model_dir + "model_" +
                                          str(fold) + ".h5")

            results = self.epoch_model.evaluate(validation_data_generator)
            results = dict(zip(self.epoch_model.metrics_names, results))
            validation_loss.append(results['loss'])

            tf.keras.backend.clear_session()
            fold += 1
            logger.info("Loss" + validation_loss.__str__())
 def save_csv_data(self):
     logger.info("Start saving csv file......")
     csv_path = definitions.ROOT_DIR + config.get(
         'data_collecting.csv_path')
     df = pd.DataFrame(self.collected_data,
                       columns=['image_name', 'steering'])
     if not os.path.isfile(csv_path):
         df.to_csv(csv_path, index=False, header=True)
     else:  # else it exists so append without writing the header
         df.to_csv(csv_path, index=False, mode='a', header=False)
 def save_data(self, img, steering, i: str = "0"):
     file_name = str(int(time.time())) + i + ".jpg"
     try:
         image_path = definitions.ROOT_DIR + config.get(
             'data_collecting.data_path') + file_name
         imageio.imwrite(image_path, np.asarray(img.convert('RGB')), "jpg")
         self.collected_data["image_name"].append(file_name)
         self.collected_data["steering"].append(steering)
     except Exception as ex:
         logger.info(f"Error while saving data -- {ex}")
         raise Exception
def clean_data_images():
    """
    This function remove images in images folder which do not exist in csv file.
    """
    root.file_config("config.yml")
    data = pd.read_csv(definitions.ROOT_DIR +
                       config.get("data_collecting.csv_path"))
    img_path = definitions.ROOT_DIR + config.get("data_collecting.data_path")
    count = 0
    for file in os.listdir(img_path):
        if data["image_name"].str.contains(file).any():
            pass
        else:
            count = count + 1
            os.remove(img_path + file)
    logger.info(f"Image data cleaning successfully! Removed {count} images")
def clean_data_csv():
    """
    One might remove recorded images while images are being collected.
    This function synchronizes data in csv files and images in images folder in such cases.
    """
    root.file_config("config.yml")
    data = pd.read_csv(definitions.ROOT_DIR +
                       config.get("data_collecting.csv_path"))
    img_path = definitions.ROOT_DIR + config.get("data_collecting.data_path")
    count = 0
    for i, row in data.iterrows():
        if not os.path.isfile(img_path + row["image_name"]):
            count = count + 1
            data.drop(index=i, inplace=True)
        elif row["steering"] < -5.0 or row["steering"] > 5:
            count = count + 1
            data.drop(index=i, inplace=True)
    data.to_csv(definitions.ROOT_DIR + config.get("data_collecting.csv_path"),
                index=False,
                mode='w',
                header=True)
    logger.info(f"CSV data cleaning successfully!")
Ejemplo n.º 9
0
def run_evaluator(model_file="model.h5"):
    root.file_config("config.yml")
    try:
        start = time.time()
        Evaluator(model_file).run()
        logger.info(f"--------- Execution time: {time.time() - start} seconds ---------")
    except Exception as ex:
        logger.info(f"Error while evaluating neuron coverage {ex}")
        logger.info(traceback.format_exc())
Ejemplo n.º 10
0
def run_trainer(model="inception_v3"):
    root.file_config("config.yml")
    try:
        start_time = time.time()
        em = EpochModel(model)
        em.train_model(definitions.ROOT_DIR +
                       config.get("data_collecting.csv_path"))
        logger.info(
            f"--------- Execution time: {time.time() - start_time} seconds ---------"
        )
    except Exception as ex:
        logger.info(f"Error while training model - {ex}")
        logger.info(traceback.format_exc())
def run_collector(number_of_images=0, mode="manual_mode"):
    root.file_config("config.yml")
    if not number_of_images:
        number_of_images = int(config.get("data_collecting.number_of_images"))

    collector = DataCollector()
    exit_ok = collector.collect_data(number_of_images, mode)
    # Save csv data
    if exit_ok:
        collector.save_csv_data()
        logger.info("Data collected successfully!")
    else:
        logger.info("Saving data...")
        collector.save_csv_data()
        logger.info(traceback.format_exc())
Ejemplo n.º 12
0
 def run(self):
     logger.info("------------Neuron coverage when applying a single transformation of a single image------------")
     self.compute_neuron_coverage(single=True, combine=False)
     logger.info("------------Neuron coverage when applying combined transformations of a set of images------------")
     self.compute_neuron_coverage(single=False, combine=True)
Ejemplo n.º 13
0
    parser.add_argument('--cm', dest='collect_mode', choices=['manual_mode', 'ai_mode'], default="manual_mode",
                        required=False, help='Data collect mode: manual_mode or ai_mode')
    parser.add_argument('--model', dest='model', choices=['inception_v3', 'cnn'],
                        default="inception_v3",
                        help='Enter one of the following model name for training: inception_v2, cnn')
    parser.add_argument('--mf', dest='model_file', type=str, default="model_1.h5",
                        help='Model file name for running evaluator')

    return parser.parse_args()


if __name__ == "__main__":
    args = get_console_arguments()

    if args.task == "collecting":
        logger.info("Task: Collecting")
        from src.training import data_collector

        data_collector.run_collector(int(args.number_of_images), args.collect_mode)

    if args.task == "data_cleaning":
        logger.info("Task: Cleaning training data")
        from src.training import data_cleaner

        data_cleaner.run_data_cleaner()

    elif args.task == "training":
        logger.info("Task: Training - " + args.model)
        from src.training import model_training

        model_training.run_trainer(args.model)
Ejemplo n.º 14
0
    data_source = None
    try:
        if args.input_mode == "cmd":
            if not args.numbers_lists:
                logger.info(
                    f"EXIT - Please specify input data with the argument --l for the cmd input mode"
                )
                exit(1)
            data_source = CmdInput(args.numbers_lists)
        elif args.input_mode == "file":
            data_source = FileInput(args.file_path)

        if data_source is None:
            logger.error("Exit - Cannot get input data")
            exit(1)

    except Exception:
        exit(1)

    return strategy, data_source


if __name__ == "__main__":
    logger.info("START")
    arguments = get_console_arguments()
    logger.info(pretty_args(arguments.__dict__))
    processed_args = process_args(arguments)  # Process console arguments
    solve(*processed_args)  # Solve puzzle
    logger.info("DONE")