Esempio n. 1
0
    def infer(self, images: List[OrthographicImage],
              method: SelectionMethod) -> Action:
        if self.monte_carlo:  # Adapt monte carlo progress parameter s
            epoch_in_database = Loader.get_episode_count(Config.grasp_database)
            s_not_bounded = (epoch_in_database - 3500) * 1 / (4500 - 3500)
            self.inference.current_s = max(min(s_not_bounded, 1.0), 0.0)

        current_model_st_mtime = Loader.get_model_path(
            self.model).stat().st_mtime
        if self.watch_for_model_modification and current_model_st_mtime > self.model_last_modified + 0.5:  # [s]
            logger.warning(f'Reload model {self.model}.')
            try:
                self.inference.model = Loader.get_model(
                    self.model, output_layer=self.output_layer)
                self.model_last_modified = Loader.get_model_path(
                    self.model).stat().st_mtime
            except OSError:
                logger.info('Could not load model, probabily file locked.')

        if len(images) == 3:
            images[2].mat = images[2].mat[:, :, ::-1]  # BGR to RGB

        action = self.inference.infer(images, method)
        self.indexer.to_action(action)

        estimated_reward_lower_than_threshold = action.estimated_reward < Config.bin_empty_at_max_probability
        bin_empty = estimated_reward_lower_than_threshold and Epoch.selection_method_should_be_high(
            method)

        if bin_empty:
            return Action('bin_empty', safe=1)

        self.converter.calculate_pose(action, images)
        return action
Esempio n. 2
0
 def check_for_model_reload(self):
     current_model_st_mtime = Loader.get_model_path(
         self.model).stat().st_mtime
     if self.watch_for_model_modification and current_model_st_mtime > self.model_last_modified + 0.5:  # [s]
         logger.warning(f'Reload model {self.model}.')
         try:
             self.inference.model = Loader.get_model(
                 self.model, output_layer=self.output_layer)
             self.model_last_modified = Loader.get_model_path(
                 self.model).stat().st_mtime
         except OSError:
             logger.info('Could not load model, probabily file locked.')
Esempio n. 3
0
    def __init__(self, **params):
        self.model = Config.grasp_model
        self.watch_for_model_modification = True
        self.model_last_modified = Loader.get_model_path(
            self.model).stat().st_mtime

        self.monte_carlo = 40 if 'mc' in self.model[1] else None
        self.with_types = 'types' in self.model[1]

        self.output_layer = 'prob' if not self.with_types else ['prob', 'type']
        self.inference = InferencePlanarPose(
            model=Loader.get_model(self.model, output_layer=self.output_layer),
            box=Config.box,
            lower_random_pose=Config.lower_random_pose,
            upper_random_pose=Config.upper_random_pose,
            monte_carlo=self.monte_carlo,
            with_types=self.with_types,
        )
        self.inference.keep_indixes = None
        self.indexer = GraspIndexer(gripper_classes=Config.gripper_classes)
        self.converter = Converter(grasp_z_offset=Config.grasp_z_offset,
                                   box=Config.box)

        # # self.indexer = GraspFinalDIndexer(gripper_classes=Config.gripper_classes, final_d_classes=[0.0, 0.035])
        # self.indexer = LateralIndexer(
        #     angles=[(0, 0), (0.3, 0)],
        #     gripper_classes=[0.05, 0.07, 0.084],
        # )
        # self.converter = Converter(grasp_z_offset=Config.grasp_z_offset, box=Config.box)

        self.reinfer_next_time = True  # Always true in contrast to AgentPredict
Esempio n. 4
0
class Placing:
    def __init__(self, collections, mongo_host='localhost', data_path=None, image_format='png'):
        self.loader = Loader(mongo_host, data_path=data_path, image_format=image_format)
        self.model_path = self.loader.get_model_path(f'placing-3-32-part-type-2')  # [.h5]

        train_batch_size = 64
        validation_batch_size = 512

        self.image_shape = {
            'ed': (None, None, 1),
            'rd': (None, None, 1),
            'rc': (None, None, 3),
        }

        self.z_size = 48

        self.percent_validation_set = 0.2

        number_primitives = 4 if 'screw' in str(self.model_path.stem) else 3

        load_model = False
        use_beta_checkpoint_path = True
        checkpoint_path = self.model_path if not use_beta_checkpoint_path else self.model_path.with_suffix('.beta' + self.model_path.suffix)

        episodes = self.loader.yield_episodes(
            collections,
            query={'$or': [
                # {'actions': {'$size': 1}, 'actions.0.type': 'grasp'},
                {'actions': {'$size': 2}, 'actions.0.type': 'grasp', 'actions.1.type': 'place'},
            ]},
            projection={'_id': 0, 'id': 1, 'actions.pose': 1, 'actions.reward': 1, 'actions.images': 1}
        )
        train_episodes, validation_episodes = self.split_set(episodes)

        train_set = PlacingDataset(train_episodes, seed=42)
        train_data = train_set.get_data(shuffle='all')
        train_data = train_data.shuffle(len(train_episodes) * 6)
        train_data = train_data.batch(train_batch_size)
        train_data = train_data.prefetch(tf.data.experimental.AUTOTUNE)

        validation_data = PlacingDataset(validation_episodes, seed=43).get_data()
        validation_data = validation_data.cache()
        validation_data = validation_data.batch(validation_batch_size)
        validation_data = validation_data.prefetch(tf.data.experimental.AUTOTUNE)


        self.grasp_model = self.define_grasp_model(number_primitives=number_primitives)
        self.place_model = self.define_place_model()
        self.merge_model = self.define_merge_model()

        image_grasp_before = [
            tk.Input(shape=self.image_shape['ed'], name='image_grasp_before')
        ]
        image_place_before = [
            tk.Input(shape=self.image_shape['ed'], name='image_place_before')
        ]
        image_place_goal = [
            tk.Input(shape=self.image_shape['ed'], name='image_place_goal')
        ]

        reward_m, *z_m = self.grasp_model(image_grasp_before)
        reward_p, z_p = self.place_model(image_place_before + image_place_goal)
        reward = self.merge_model([z_m[0], z_p])

        losses = Losses()

        self.combined = tk.Model(inputs=(image_grasp_before + image_place_before + image_place_goal), outputs=[reward_m, reward_p, reward])
        self.combined.summary()
        self.combined.compile(
            optimizer=tk.optimizers.Adam(learning_rate=1e-4),
            loss=losses.binary_crossentropy,
            loss_weights=[1.0, 1.0, 4.0],
            metrics=[
                losses.binary_crossentropy,
                SplitMeanSquaredError(),
                SplitBinaryAccuracy(),
                SplitPrecision(),
                SplitRecall(),
            ],
        )

        callbacks = [
            tk.callbacks.ModelCheckpoint(
                str(checkpoint_path),
                monitor=f'val_loss',
                verbose=1,
                save_best_only=True
            ),
            tk.callbacks.EarlyStopping(monitor=f'val_loss', patience=60),
            tk.callbacks.ReduceLROnPlateau(factor=0.2, verbose=1, patience=20, min_lr=5e-7),
            tf.keras.callbacks.TensorBoard(log_dir=str(self.model_path.parent / 'logs' / f'placing-{time()}')),
        ]

        if load_model:
            self.combined.load_weights(str(self.model_path))
            evaluation = self.combined.evaluate(validation_data, batch_size=validation_batch_size, verbose=2)
            callbacks[0].best = evaluation[self.combined.metrics_names.index('loss')]

        self.combined.fit(
            train_data,
            validation_data=validation_data,
            epochs=1000,
            callbacks=callbacks,
            verbose=2,
        )

        self.combined.load_weights(str(checkpoint_path))
        if use_beta_checkpoint_path:
            self.combined.save(str(self.model_path), save_format='h5')

    def define_grasp_model(self, number_primitives: int):
        inputs = [
            tk.Input(shape=self.image_shape['ed'], name='image')
        ]

        conv_block = conv_block_gen(l2_reg=0.001, dropout_rate=0.35)
        conv_block_r = conv_block_gen(l2_reg=0.001, dropout_rate=0.5)

        x = conv_block(inputs[0], 32)
        x = conv_block(x, 32, strides=(2, 2))
        x = conv_block(x, 32)

        x_r = conv_block_r(x, 48)
        x_r = conv_block_r(x_r, 48)

        x_r = conv_block_r(x_r, 64)
        x_r = conv_block_r(x_r, 64)

        x_r = conv_block_r(x_r, 64)
        x_r = conv_block_r(x_r, 48, kernel_size=(2, 2))

        x = conv_block(x, 64)
        x = conv_block(x, 64)

        x = conv_block(x, 96)
        x = conv_block(x, 96)

        x = conv_block(x, 128)
        x = conv_block(x, 128, kernel_size=(2, 2))

        reward = tkl.Conv2D(number_primitives, kernel_size=(1, 1), activation='sigmoid', name='reward_grasp')(x_r)
        reward_training = tkl.Reshape((number_primitives,))(reward)

        z_trainings = []
        for i in range(1):
            z = tkl.Conv2D(self.z_size, kernel_size=(1, 1), activity_regularizer=tk.regularizers.l2(0.0005), name=f'z_m{i}')(x)
            z_training = tkl.Reshape((self.z_size,))(z)
            z_trainings.append(z_training)

        outputs = [reward_training] + z_trainings
        return tk.Model(inputs=inputs, outputs=outputs, name='grasp')

    def define_place_model(self):
        inputs = [
            tk.Input(shape=self.image_shape['ed'], name='image_before'),
            tk.Input(shape=self.image_shape['ed'], name='image_goal'),
        ]

        conv_block = conv_block_gen(l2_reg=0.001, dropout_rate=0.35)
        conv_block_r = conv_block_gen(l2_reg=0.001, dropout_rate=0.5)

        x = tkl.Concatenate()(inputs)

        x = conv_block(x, 32)
        x = conv_block(x, 32)

        x = conv_block(x, 32)
        x = conv_block(x, 32)
        x = conv_block(x, 32)
        x = conv_block(x, 32)

        x_r = conv_block_r(x, 32)
        x_r = conv_block_r(x_r, 32)

        x_r = conv_block_r(x_r, 48)
        x_r = conv_block_r(x_r, 48)
        x_r = conv_block_r(x_r, 48)
        x_r = conv_block_r(x_r, 48)

        x_r = conv_block_r(x_r, 48)
        x_r = conv_block_r(x_r, 48)

        x_r = conv_block_r(x_r, 64)
        x_r = conv_block_r(x_r, 48, kernel_size=(2, 2))

        x = conv_block(x, 48)
        x = conv_block(x, 48)

        x = conv_block(x, 64)
        x = conv_block(x, 64)
        x = conv_block(x, 64)
        x = conv_block(x, 64)

        x = conv_block(x, 96)
        x = conv_block(x, 96)

        x = conv_block(x, 128)
        x = conv_block(x, 128, kernel_size=(2, 2))

        reward = tkl.Conv2D(1, kernel_size=(1, 1), activation='sigmoid', name='reward_place')(x_r)
        reward_training = tkl.Reshape((1,))(reward)

        z = tkl.Conv2D(self.z_size, kernel_size=(1, 1), activity_regularizer=tk.regularizers.l2(0.0005), name='z_p')(x)
        z_training = tkl.Reshape((self.z_size,))(z)

        outputs = [reward_training, z_training]
        return tk.Model(inputs=inputs, outputs=outputs, name='place')

    def define_merge_model(self):
        input_shape = (self.z_size)

        z_m = tk.Input(shape=input_shape, name='z_m')
        z_p = tk.Input(shape=input_shape, name='z_p')

        dense_block = dense_block_gen(l2_reg=0.01, dropout_rate=0.2)
        x = z_m - z_p

        x = dense_block(x, 128)
        x = dense_block(x, 128)
        x = dense_block(x, 64)

        reward = tkl.Dense(1, activation='sigmoid', name='reward_merge')(x)
        return tk.Model(inputs=[z_m, z_p], outputs=[reward], name='merge')

    @staticmethod
    def binary_decision(string: str, p: float) -> bool:
        return float(int(hashlib.sha256(string.encode('utf-8')).hexdigest(), 16) % 2**16) / 2**16 < p

    def assign_set(self, data):
        collection, episode = data
        random_assign = self.binary_decision(episode['id'], self.percent_validation_set)
        episode['is_validation'] = random_assign # or (collection in [])
        episode['collection'] = collection
        return episode

    def split_set(self, data, verbose=1):
        episodes = list(map(self.assign_set, data))[-13000:]

        train_episodes = list(filter(lambda x: not x['is_validation'], episodes))
        validation_episodes = list(filter(lambda x: x['is_validation'], episodes))

        if verbose > 0:
            logger.info(f'Train on {len(train_episodes)} episodes.')
            logger.info(f'Validate on {len(validation_episodes)} episodes.')

        return train_episodes, validation_episodes