Ejemplo n.º 1
0
    def __init__(self):
        """
      self.op_base_dir: Replace with the path leading to your local openpilot repository
      self.release_branch: Replace with the name of your most stable branch
      self.target_branch: Replace with the name of the target branch you want a squashed version of your release branch on
      self.commit_message: The commit message to be pushed to your target_branch. You can either use the included date function or remove it

      When you run this file, it will checkout your release branch immediately, make sure it's up to date with your remote by pulling before running
    """

        self.op_base_dir = 'D:/op/openpilot'
        self.release_branch = '073-clean'
        self.target_branch = 'Release4'
        self.commit_message = 'Release 4, (0.7.3) {} Release'.format(
            self.get_cur_date())

        self.eta_tool = ETATool(self.op_base_dir, up_speed)

        self.msg_count = 0
        self.total_steps = 3

        self.create_release()
Ejemplo n.º 2
0
    def __init__(self):
        """
      self.op_base_dir: Replace with the path leading to your local openpilot repository
      self.release_branch: Replace with the name of your most stable branch
      self.target_branch: Replace with the name of the target branch you want a squashed version of your release branch on
      self.commit_message: The commit message to be pushed to your target_branch. You can either use the included date function or remove it

      When you run this file, it will checkout your release branch immediately, make sure it's up to date with your remote by pulling before running
    """

        profile = 'ShaneSmiskol'  # change to your desired current profile

        self.op_base_dir = self.profiles[profile]['op_base_dir']
        self.release_branch = self.profiles[profile]['release_branch']
        self.target_branch = self.profiles[profile]['target_branch']
        self.commit_message = self.profiles[profile]['commit_message'].format(
            self.get_cur_date())

        self.eta_tool = ETATool(self.op_base_dir, up_speed)

        self.msg_count = 0
        self.total_steps = 3

        self.create_release()
Ejemplo n.º 3
0
    def __init__(self, cfg, force_reset=False):
        self.wandb_config = cfg
        self.eta_tool = ETATool()
        # self.W, self.H = 1164, 874
        self.y_hood_crop = 665  # pixels from top where to crop image to get rid of hood.
        self.cropped_shape = (665, 814, 3)  # (515, 814, 3)
        self.data_labels = ['RED', 'GREEN', 'YELLOW', 'NONE']

        self.transform_old_labels = {
            'RED': 'SLOW',
            'GREEN': 'GREEN',
            'YELLOW': 'SLOW',
            'NONE': 'NONE'
        }
        self.model_labels = ['SLOW', 'GREEN', 'NONE']
        self.use_model_labels = True

        self.proc_folder = 'data/.processed'

        # self.batch_size = 36
        self.batch_size = self.wandb_config.batch_size
        self.test_percentage = 0.15  # percentage of total data to be validated on
        self.dataloader_workers = 256  # used by keras to load input images, there is diminishing returns at high values (>~10)

        self.max_samples_per_class = 14500  # unused after transformed data is created

        self.model = None

        self.force_reset = force_reset
        self.finished_file = 'data/.finished'
        self.class_weight = {}

        self.datagen_threads = 0
        self.datagen_max_threads = 128  # used to generate randomly transformed data (dependant on your CPU, set lower if it starts to freeze)
        self.num_flow_images = 5  # number of extra images to randomly generate per each input image
        self.lock = Lock()
Ejemplo n.º 4
0
class TrafficLightsModel:
    def __init__(self, force_reset=False):
        self.eta_tool = ETATool()
        # self.W, self.H = 1164, 874
        self.y_hood_crop = 665  # pixels from top where to crop image to get rid of hood.
        self.cropped_shape = (665, 814, 3)  # (515, 814, 3)
        self.data_labels = ['RED', 'GREEN', 'YELLOW', 'NONE']

        self.transform_old_labels = {
            'RED': 'SLOW',
            'GREEN': 'GREEN',
            'YELLOW': 'SLOW',
            'NONE': 'NONE'
        }
        self.model_labels = ['SLOW', 'GREEN', 'NONE']
        self.use_model_labels = True

        self.proc_folder = 'data/.processed'

        # self.reduction = 2
        self.batch_size = 32
        self.test_percentage = 0.2  # percentage of total data to be validated on
        self.num_flow_images = 2  # number of extra images to randomly generate per each input image
        self.dataloader_workers = 16  # used by keras to load input images, there is diminishing returns at high values (>~10)

        self.max_samples_per_class = 6000  # unused after transformed data is created

        self.model = None

        self.force_reset = force_reset
        self.finished_file = 'data/.finished'
        self.class_weight = {}

        self.datagen_threads = 0
        self.datagen_max_threads = 24  # used to generate randomly transformed data (dependant on your CPU, set lower if it starts to freeze)

    def do_init(self):
        self.check_data()
        if self.needs_reset:
            self.reset_countdown()
            self.reset_data()
            self.create_validation_set()  # create validation set for model
            self.transform_images()

        self.set_class_weight()
        train_gen, valid_gen = self.get_generators()
        return train_gen, valid_gen

    def train_batches(self,
                      train_generator,
                      valid_generator,
                      restart=False,
                      epochs=50):
        if self.model is None or restart:
            self.model = self.get_model_1()

        # opt = keras.optimizers.RMSprop()
        # opt = keras.optimizers.Adadelta()
        # opt = keras.optimizers.Adagrad()
        opt = keras.optimizers.Adam(0.001 * .4)

        self.model.compile(loss='categorical_crossentropy',
                           optimizer=opt,
                           metrics=['accuracy'])

        self.model.fit_generator(train_generator,
                                 epochs=epochs,
                                 validation_data=valid_generator,
                                 workers=self.dataloader_workers,
                                 class_weight=self.class_weight)

    def get_model_1(self):
        # model = Sequential()
        # model.add(Dense(64, activation='relu', input_shape=(np.product(self.cropped_shape),)))
        # model.add(Dense(32, activation='relu'))
        # model.add(Dense(32, activation='relu'))
        # model.add(Dense(4, activation='softmax'))
        # return model

        kernel_size = (3, 3)  # almost no effect on model size

        print('USING NEW MODEL')
        model = Sequential()
        model.add(
            Conv2D(12,
                   kernel_size,
                   strides=1,
                   activation='relu',
                   input_shape=self.cropped_shape))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(BatchNormalization())

        model.add(Conv2D(24, kernel_size, strides=1, activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(48, kernel_size, strides=1, activation='relu'))
        model.add(MaxPooling2D(pool_size=(3, 3)))

        model.add(Conv2D(64, kernel_size, strides=1, activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(12, kernel_size, strides=1, activation='relu'))
        # model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten()
                  )  # this converts our 3D feature maps to 1D feature vectors
        model.add(Dense(32, activation='relu'))
        # model.add(Dropout(0.3))
        model.add(Dense(64, activation='relu'))
        model.add(Dense(64, activation='relu'))
        # model.add(Dropout(0.3))
        # kernel_size = (3, 3)  # (3, 3)
        #
        # model = Sequential()
        # model.add(Conv2D(12, kernel_size, activation='relu', input_shape=self.cropped_shape))
        # model.add(MaxPooling2D(pool_size=(3, 3)))
        # # model.add(BatchNormalization())
        #
        # model.add(Conv2D(12, kernel_size, activation='relu'))
        # model.add(MaxPooling2D(pool_size=(3, 3)))
        #
        # model.add(Conv2D(24, kernel_size, activation='relu'))
        # model.add(MaxPooling2D(pool_size=(3, 3)))
        #
        # model.add(Conv2D(36, kernel_size, activation='relu'))
        #
        #
        # model.add(Flatten())  # this converts our 3D feature maps to 1D feature vectors
        # model.add(Dense(32, activation='relu'))
        # # model.add(Dropout(0.3))
        # model.add(Dense(64, activation='relu'))
        # # model.add(Dropout(0.3))
        if not self.use_model_labels:
            model.add(Dense(len(self.data_labels), activation='softmax'))
        else:
            model.add(Dense(len(self.model_labels), activation='softmax'))
        return model

    def get_model_2(self):
        # model = Sequential()
        # model.add(Dense(64, activation='relu', input_shape=(np.product(self.cropped_shape),)))
        # model.add(Dense(32, activation='relu'))
        # model.add(Dense(32, activation='relu'))
        # model.add(Dense(4, activation='softmax'))
        # return model
        # model.add(Dropout(0.3))

        kernel_size = (3, 3)  # (3, 3)

        model = Sequential()
        model.add(
            Conv2D(12,
                   kernel_size,
                   activation='relu',
                   input_shape=self.cropped_shape))
        model.add(MaxPooling2D(pool_size=(3, 3)))
        # model.add(BatchNormalization())

        model.add(Conv2D(12, kernel_size, activation='relu'))
        model.add(MaxPooling2D(pool_size=(3, 3)))

        model.add(Conv2D(24, kernel_size, activation='relu'))
        model.add(MaxPooling2D(pool_size=(3, 3)))

        model.add(Conv2D(36, kernel_size, activation='relu'))
        print('USING OLD MODEL')

        model.add(Flatten()
                  )  # this converts our 3D feature maps to 1D feature vectors
        model.add(Dense(32, activation='relu'))
        # model.add(Dropout(0.3))
        model.add(Dense(64, activation='relu'))
        # model.add(Dropout(0.3))
        if not self.use_model_labels:
            model.add(Dense(len(self.data_labels), activation='softmax'))
        else:
            model.add(Dense(len(self.model_labels), activation='softmax'))
        return model

    def BGR2RGB(self, arr):  # easier to inference on, EON uses BGR images
        return cv2.cvtColor(arr, cv2.COLOR_BGR2RGB)

    def get_generators(self):
        train_dir = '{}/.train'.format(self.proc_folder)
        valid_dir = '{}/.validation'.format(self.proc_folder)
        train_generator = CustomDataGenerator(
            train_dir, self.data_labels, self.model_labels,
            self.transform_old_labels, self.use_model_labels,
            self.batch_size)  # keeps data in BGR format and normalizes
        valid_generator = CustomDataGenerator(valid_dir, self.data_labels,
                                              self.model_labels,
                                              self.transform_old_labels,
                                              self.use_model_labels,
                                              self.batch_size * 2)
        return train_generator, valid_generator

    def create_validation_set(self):
        print(
            'Your system may slow until the process completes. Try reducing the max threads if it locks up.'
        )
        print(
            'Do NOT delete anything in the `.processed` folder while it\'s working.\n'
        )
        print('Creating validation set!', flush=True)
        for idx, image_class in enumerate(
                self.data_labels):  # load all image names and class
            print('Working on class: {}'.format(image_class))
            class_dir = 'data/{}'.format(image_class)
            images = [{
                'img_path': '{}/{}'.format(class_dir, img),
                'img_name': img
            } for img in os.listdir(class_dir)]

            random.shuffle(images)
            while len(
                    images
            ) > self.max_samples_per_class:  # only keep up to max samples
                del images[random.randint(0, len(images) - 1)]

            train, valid = train_test_split(
                images, test_size=self.test_percentage)  # split by class

            for img in train:
                shutil.copyfile(
                    img['img_path'],
                    '{}/.train_temp/{}/{}'.format(self.proc_folder,
                                                  image_class,
                                                  img['img_name']))
            for img in valid:
                shutil.copyfile(
                    img['img_path'],
                    '{}/.validation_temp/{}/{}'.format(self.proc_folder,
                                                       image_class,
                                                       img['img_name']))
        print()

    def transform_and_crop_image(self, image_class, photo_path, datagen,
                                 is_train):
        self.datagen_threads += 1
        original_img = cv2.imread(photo_path)  # loads uint8 BGR array
        flowed_imgs = []
        if is_train:  # don't transform validation images
            imgs = np.array(
                [original_img for _ in range(self.num_flow_images)])
            # randomly transform images
            try:
                batch = datagen.flow(imgs, batch_size=self.num_flow_images)[0]
            except Exception as e:
                print(imgs)
                print(photo_path)
                raise Exception(
                    'Error in transform_and_crop_image: {}'.format(e))
            flowed_imgs = [img.astype(np.uint8) for img in batch
                           ]  # convert from float32 0 to 255 to uint8 0 to 255

        flowed_imgs.append(
            original_img
        )  # append original non flowed image so we can crop and copy original as well
        cropped_imgs = [self.crop_image(img) for img in flowed_imgs]
        for idx, img in enumerate(cropped_imgs):
            photo_name = photo_path.split(
                '/')[-1][:-4]  # get name from path excluding extension
            # print('{}/.train/{}/{}'.format(self.proc_folder, image_class, photo_name))
            if is_train:
                cv2.imwrite(
                    '{}/.train/{}/{}.{}.png'.format(self.proc_folder,
                                                    image_class, photo_name,
                                                    idx), img)
            else:
                cv2.imwrite(
                    '{}/.validation/{}/{}.{}.png'.format(
                        self.proc_folder, image_class, photo_name, idx), img)
        self.datagen_threads -= 1

    def process_class(self, image_class, photo_paths, datagen,
                      is_train):  # manages processing threads
        t = time.time()
        self.eta_tool.init(t, len(photo_paths))
        train_msg = 'train' if is_train else 'valid'
        for idx, photo_path in enumerate(photo_paths):
            self.eta_tool.log(idx, time.time())
            if time.time() - t > 15:
                # print('{}: Working on photo {} of {}.'.format(image_class, idx + 1, len(photos)))
                print('{} ({}): Time to completion: {}'.format(
                    image_class, train_msg, self.eta_tool.get_eta))
                t = time.time()

            threading.Thread(target=self.transform_and_crop_image,
                             args=(image_class, photo_path, datagen,
                                   is_train)).start()
            time.sleep(1 / 7.)  # spin up threads slightly slower
            while self.datagen_threads > self.datagen_max_threads:
                time.sleep(1)

        while self.datagen_threads != 0:  # wait for all threads to complete before continuing
            time.sleep(1)

        print('{} ({}): Finished!'.format(image_class, train_msg))

    def reset_countdown(self):
        if os.path.exists(
                self.proc_folder):  # don't show message if no data to delete
            print('WARNING: RESETTING PROCESSED DATA!', flush=True)
            print(
                'This means all randomly transformed images will be erased and regenerated. '
                'Which may take some time depending on the amount of data you have.',
                flush=True)
            time.sleep(2)
            for i in range(10):
                sec = 10 - i
                multi = 's' if sec > 1 else ''  # gotta be grammatically correcet
                print('Resetting data in {} second{}!'.format(sec, multi))
                time.sleep(1)
            print('RESETTING DATA NOW', flush=True)

    def crop_image(self, img_array):
        h_crop = 175  # horizontal, 150 is good, need to test higher vals
        t_crop = 0  # top, 100 is good. test higher vals
        return img_array[
            t_crop:self.y_hood_crop, h_crop:
            -h_crop]  # removes 150 pixels from each side, removes hood, and removes 100 pixels from top

    def transform_images(self):
        datagen = ImageDataGenerator(
            rotation_range=2.5,
            width_shift_range=0,
            height_shift_range=0,
            shear_range=0,
            zoom_range=0.1,
            horizontal_flip=False,  # todo: testing false
            fill_mode='nearest')

        print(
            'Randomly transforming and cropping input images, please wait...')
        for image_class in self.data_labels:
            photos_train = os.listdir('{}/.train_temp/{}'.format(
                self.proc_folder, image_class))
            photos_valid = os.listdir('{}/.validation_temp/{}'.format(
                self.proc_folder, image_class))

            photos_train = [
                '{}/.train_temp/{}/{}'.format(self.proc_folder, image_class,
                                              img) for img in photos_train
            ]  # adds path
            photos_valid = [
                '{}/.validation_temp/{}/{}'.format(self.proc_folder,
                                                   image_class, img)
                for img in photos_valid
            ]

            self.process_class(image_class, photos_train, datagen, True)
            self.process_class(
                image_class, photos_valid, datagen,
                False)  # no transformations, only crop for valid
        shutil.rmtree('{}/.train_temp'.format(self.proc_folder))
        shutil.rmtree('{}/.validation_temp'.format(self.proc_folder))

        open(self.finished_file, 'a').close(
        )  # create finished file so we know in the future not to process data
        print('All finished, moving on to training!')

    def reset_data(self):
        if os.path.exists(self.proc_folder):
            shutil.rmtree(self.proc_folder, ignore_errors=True)

        if os.path.exists(self.finished_file):
            os.remove(self.finished_file)
        io_sleep()

        os.mkdir(self.proc_folder)
        for image_class in self.data_labels:
            # os.mkdir('{}/{}'.format(self.proc_folder, image_class))
            os.makedirs('{}/.train/{}'.format(self.proc_folder, image_class))
            os.makedirs('{}/.train_temp/{}'.format(self.proc_folder,
                                                   image_class))
            os.makedirs('{}/.validation/{}'.format(self.proc_folder,
                                                   image_class))
            os.makedirs('{}/.validation_temp/{}'.format(
                self.proc_folder, image_class))

    def set_class_weight(self):
        if not self.use_model_labels:
            labels = self.data_labels
            label_img_count = {}
            for label in self.data_labels:
                label_img_count[label] = len(
                    os.listdir('{}/.train/{}'.format(self.proc_folder, label)))
        else:
            labels = self.model_labels
            label_img_count = {lbl: 0 for lbl in self.model_labels}
            for label in self.data_labels:
                model_label = self.transform_old_labels[label]
                label_img_count[model_label] += len(
                    os.listdir('{}/.train/{}'.format(self.proc_folder, label)))

        for label in label_img_count:
            self.class_weight[labels.index(label)] = 1 / (
                label_img_count[label] / max(label_img_count.values())
            )  # get class weight. class with 50 samples and max 100 gets assigned 2.0

        tmp_prnt = {
            labels[cls]: self.class_weight[cls]
            for cls in self.class_weight
        }
        print('Class weights: {}'.format(tmp_prnt))

    def one_hot(self, idx):
        if not self.use_model_labels:
            one = [0] * len(self.data_labels)
        else:
            one = [0] * len(self.model_labels)
        one[idx] = 1
        return one

    def check_data(self):
        if not os.path.exists('data'):
            print('DATA DIRECTORY DOESN\'T EXIST!')
            os.mkdir('data')
            raise Exception(
                'Please unzip the data.zip archive into data directory')
        data_files = os.listdir('data')
        if not all([i in data_files for i in self.data_labels]):
            raise Exception(
                'Please unzip the data.zip archive into data directory')

    @property
    def needs_reset(self):
        return not os.path.exists(self.finished_file) or not os.path.exists(
            self.proc_folder) or self.force_reset
Ejemplo n.º 5
0
class opReleaser:
    def __init__(self):
        """
      self.op_base_dir: Replace with the path leading to your local openpilot repository
      self.release_branch: Replace with the name of your most stable branch
      self.target_branch: Replace with the name of the target branch you want a squashed version of your release branch on
      self.commit_message: The commit message to be pushed to your target_branch. You can either use the included date function or remove it

      When you run this file, it will checkout your release branch immediately, make sure it's up to date with your remote by pulling before running
    """

        self.op_base_dir = 'D:/op/openpilot'
        self.release_branch = '073-clean'
        self.target_branch = 'Release4'
        self.commit_message = 'Release 4, (0.7.3) {} Release'.format(
            self.get_cur_date())

        self.eta_tool = ETATool(self.op_base_dir, up_speed)

        self.msg_count = 0
        self.total_steps = 3

        self.create_release()

    def create_release(self):
        # Checkout release branch
        r = self.run('git checkout {}'.format(self.release_branch))
        if any([
                True if r in i else False
                for i in ['Switched to branch', 'Already on']
        ]):
            raise Exception('Error checking out release branch!')

        # Delete old target branch if it exists
        self.attempt_delete()

        # Checkout a new orphan branch
        r = self.run('git checkout --orphan {}'.format(self.target_branch))
        if 'Switched to a new branch' not in r:
            raise Exception('Error switching to target branch!')

        # Add all files in release branch to new target branch
        self.message('Adding and committing all files to {} branch...'.format(
            self.target_branch))
        self.run('git add --all')

        # Commit all files
        r = self.run(['git', 'commit', '-am', self.commit_message],
                     no_convert=True)

        if 'create mode' not in r:
            raise Exception('Error adding files to current branch!')

        self.eta_tool.start_eta()

        # Replace current release on remote
        self.message('Now force pushing to remote (origin/{})...'.format(
            self.target_branch))
        self.run('git push -f --set-upstream origin {}'.format(
            self.target_branch))
        self.eta_tool.stop()

        # Check the release branch back out
        self.run('git checkout {}'.format(self.release_branch))

        # Finished
        print(
            '\nFinished! Squashed {} branch to 1 commit and force pushed to {} branch!'
            .format(self.release_branch, self.target_branch))
        print('Commit message: {}'.format(self.commit_message))

    def run(self, cmd, no_convert=False):
        if not no_convert:
            cmd = cmd.split(' ')
        return subprocess.check_output(cmd,
                                       cwd=self.op_base_dir,
                                       stderr=subprocess.STDOUT,
                                       encoding='utf8')

    def attempt_delete(self):
        try:
            r = self.run('git branch -D {}'.format(self.target_branch))
            self.message(r.replace('\n', ''))
        except subprocess.CalledProcessError:
            self.message('{} branch already deleted.'.format(
                self.target_branch))

    def get_cur_date(self):
        today = datetime.datetime.today()
        return today.strftime('%h %d, %Y').replace(' 0', ' ')

    def message(self, msg):
        print('[{}/{}]: {}'.format(self.msg_count + 1, self.total_steps, msg),
              flush=True)
        self.msg_count += 1