def __init__(self, name, task):
    self.name = name
    self.task = task
    self.total_iter = 0
    self.num_rotations = 24
    self.descriptor_dim = 16
    self.pixel_size = 0.003125
    self.input_shape = (320, 160, 6)
    self.camera_config = cameras.RealSenseD415.CONFIG
    self.models_dir = os.path.join('checkpoints', self.name)
    self.bounds = np.array([[0.25, 0.75], [-0.5, 0.5], [0, 0.28]])

    self.pick_model = Attention(
        input_shape=self.input_shape,
        num_rotations=1,
        preprocess=self.preprocess,
        lite=True)
    self.place_model = Attention(
        input_shape=self.input_shape,
        num_rotations=1,
        preprocess=self.preprocess,
        lite=True)
    self.match_model = Matching(
        input_shape=self.input_shape,
        descriptor_dim=self.descriptor_dim,
        num_rotations=self.num_rotations,
        preprocess=self.preprocess,
        lite=True)
    def __init__(self, name, task, n_rotations=36):
        super().__init__(name, task, n_rotations)

        self.attention = Attention(in_shape=self.in_shape,
                                   n_rotations=1,
                                   preprocess=utils.preprocess)
        self.transport = Attention(in_shape=self.in_shape,
                                   n_rotations=self.n_rotations,
                                   preprocess=utils.preprocess)
Exemple #3
0
    def __init__(self, name, task):
        super().__init__(name, task)

        self.attention_model = Attention(input_shape=self.input_shape,
                                         num_rotations=1,
                                         preprocess=self.preprocess)
        self.transport_model = Attention(input_shape=self.input_shape,
                                         num_rotations=self.num_rotations,
                                         preprocess=self.preprocess)
Exemple #4
0
    def __init__(self, name, task):
        super().__init__(name, task)

        self.attention_model = Attention(input_shape=self.input_shape,
                                         num_rotations=1,
                                         preprocess=self.preprocess)
        self.transport_model = Transport(input_shape=self.input_shape,
                                         num_rotations=self.num_rotations,
                                         crop_size=self.crop_size,
                                         preprocess=self.preprocess,
                                         per_pixel_loss=False,
                                         six_dof=False)

        self.rpz_model = Transport(input_shape=self.input_shape,
                                   num_rotations=self.num_rotations,
                                   crop_size=self.crop_size,
                                   preprocess=self.preprocess,
                                   per_pixel_loss=False,
                                   six_dof=True)

        self.transport_model.set_bounds_pixel_size(self.bounds,
                                                   self.pixel_size)
        self.rpz_model.set_bounds_pixel_size(self.bounds, self.pixel_size)

        self.six_dof = True

        self.p0_pixel_error = tf.keras.metrics.Mean(name='p0_pixel_error')
        self.p1_pixel_error = tf.keras.metrics.Mean(name='p1_pixel_error')
        self.p0_theta_error = tf.keras.metrics.Mean(name='p0_theta_error')
        self.p1_theta_error = tf.keras.metrics.Mean(name='p1_theta_error')
        self.metrics = [
            self.p0_pixel_error, self.p1_pixel_error, self.p0_theta_error,
            self.p1_theta_error
        ]
    def __init__(self, name, task, n_rotations=36):
        super().__init__(name, task, n_rotations)

        self.attention = Attention(in_shape=self.in_shape,
                                   n_rotations=1,
                                   preprocess=utils.preprocess)
        self.transport = TransportPerPixelLoss(in_shape=self.in_shape,
                                               n_rotations=self.n_rotations,
                                               crop_size=self.crop_size,
                                               preprocess=utils.preprocess)
Exemple #6
0
    def __init__(self, name, task, num_rotations=36):
        super().__init__(name, task, num_rotations, use_goal_image=True)

        self.attention_model = Attention(input_shape=self.input_shape,
                                         num_rotations=1,
                                         preprocess=self.preprocess)
        self.transport_model = TransportGoal(input_shape=self.input_shape,
                                             num_rotations=self.num_rotations,
                                             crop_size=self.crop_size,
                                             preprocess=self.preprocess)
Exemple #7
0
    def __init__(self, name, task):
        super().__init__(name, task)

        self.attention_model = Attention(input_shape=self.input_shape,
                                         num_rotations=1,
                                         preprocess=self.preprocess)
        self.transport_model = Transport(input_shape=self.input_shape,
                                         num_rotations=self.num_rotations,
                                         crop_size=self.crop_size,
                                         preprocess=self.preprocess,
                                         per_pixel_loss=True)
    def __init__(self, name, task, num_rotations=24):
        # (Oct 26) set attn_no_targ=False, and that should be all we need along w/shape ...
        super().__init__(name, task, num_rotations, use_goal_image=True, attn_no_targ=False)

        # (Oct 26) Stack the goal image for the Attention module -- model cannot pick properly otherwise.
        a_shape = (self.input_shape[0], self.input_shape[1], int(self.input_shape[2] * 2))

        self.attention_model = Attention(input_shape=a_shape,
                                         num_rotations=1,
                                         preprocess=self.preprocess)
        self.transport_model = TransportGoal(input_shape=self.input_shape,
                                             num_rotations=self.num_rotations,
                                             crop_size=self.crop_size,
                                             preprocess=self.preprocess)
    def __init__(self, name, task, num_rotations=24):
        super().__init__(name, task, num_rotations, use_goal_image=True, attn_no_targ=False)

        # We stack the goal image for both modules.
        in_shape = (self.input_shape[0], self.input_shape[1], int(self.input_shape[2] * 2))

        self.attention_model = Attention(input_shape=in_shape,
                                         num_rotations=1,
                                         preprocess=self.preprocess)
        self.transport_model = Transport(input_shape=in_shape,
                                         num_rotations=self.num_rotations,
                                         crop_size=self.crop_size,
                                         preprocess=self.preprocess,
                                         per_pixel_loss=False,  # NOTE: see docs above.
                                         use_goal_image=True)
    def __init__(self, name, task, n_rotations=36):
        super().__init__(name, task, n_rotations)

        # Stack the goal image for the vanilla Transport module.
        t_shape = (self.in_shape[0], self.in_shape[1],
                   int(self.in_shape[2] * 2))

        self.attention = Attention(in_shape=self.in_shape,
                                   n_rotations=1,
                                   preprocess=utils.preprocess)
        self.transport = Transport(in_shape=t_shape,
                                   n_rotations=self.n_rotations,
                                   crop_size=self.crop_size,
                                   preprocess=utils.preprocess,
                                   per_pixel_loss=False,
                                   use_goal_image=True)