def step_fn(batch):
            with tf.GradientTape() as tape:
                ae_features = self.encoder(self.prep_ae_input(batch))
                cvcnn = self.cvcnn(
                    self.prep_cvcnn_inputs(batch['conditioned_occ'],
                                           ae_features))
                ae_output = self.decoder(ae_features)

                p_occ = tf.nn.sigmoid(cvcnn['p_occ_logits'])
                output = {'predicted_occ': p_occ, 'predicted_free': 1 - p_occ}
                metrics = nn.calc_metrics(output, batch)

                cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=cvcnn['p_occ_logits'], labels=batch['gt_occ'])
                vcnn_loss = nn.reduce_sum_batch(cross_ent)

                ae_loss = nn.reduce_sum_batch(
                    tf.nn.sigmoid_cross_entropy_with_logits(
                        logits=ae_output, labels=batch['gt_occ']))
                metrics['loss/aux_loss'] = ae_loss

                loss = vcnn_loss + 0.1 * ae_loss

                variables = self.trainable_variables
                gradients = tape.gradient(loss, variables)
                clipped_gradients = [
                    tf.clip_by_value(g, -1e6, 1e6) for g in gradients
                ]
                self.opt.apply_gradients(
                    list(zip(clipped_gradients, variables)))
                return loss, metrics
        def step_fn(batch):
            with tf.GradientTape() as tape:
                output = self(batch)

                metrics = nn.calc_metrics(output, batch)

                if self.params['loss'] == 'cross_entropy':
                    cross_ent = tf.keras.losses.binary_crossentropy(
                        batch['gt_occ'], output['predicted_occ'])
                    loss = nn.reduce_sum_batch(cross_ent)

                if self.params['stacknet_version'] == 'v4':
                    ae_loss = tf.reduce_sum(
                        tf.keras.losses.binary_crossentropy(
                            batch['gt_occ'], output['aux_occ']))

                    metrics['loss/aux_loss'] = ae_loss
                    metrics['loss/vcnn_loss'] = loss
                    loss = loss + 0.1 * ae_loss

                variables = self.model.trainable_variables
                gradients = tape.gradient(loss, variables)

                clipped_gradients = [
                    tf.clip_by_value(g, -1e6, 1e6) for g in gradients
                ]

                self.opt.apply_gradients(
                    list(zip(clipped_gradients, variables)))
                return loss, metrics
Пример #3
0
        def step_fn(batch):
            with tf.GradientTape() as tape:
                output_logits = self(batch, training=True)

                x = tf.nn.sigmoid(output_logits['predicted_occ'])

                output = {'predicted_occ': x, 'predicted_free': 1 - x}

                metrics = nn.calc_metrics(output, batch)

                if self.params['loss'] == 'mse':
                    loss = self.mse_loss(metrics)

                elif self.params['loss'] == 'cross_entropy':
                    cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(
                        logits=output_logits['predicted_occ'],
                        labels=batch['gt_occ'])
                    loss = nn.reduce_sum_batch(cross_ent)

                variables = self.trainable_variables
                gradients = tape.gradient(loss, variables)

                self.opt.apply_gradients(list(zip(gradients, variables)))
                metrics.update(self.get_insights(variables, gradients))
                return loss, metrics
        def step_fn_multiloss(batch):
            with tf.GradientTape() as tape:
                output_logits = self.model(self.prep_input(batch))
                sample = tf.nn.sigmoid(output_logits)
                output = {
                    'predicted_occ': sample,
                    'predicted_free': 1 - sample
                }
                metrics = nn.calc_metrics(output, batch)

                cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=output_logits, labels=batch['gt_occ'])
                loss = nn.reduce_sum_batch(cross_ent)

                metrics['loss/0_step'] = loss
                m = metrics

                for i in range(5):
                    # Multistep part
                    # if True:
                    # i = 0
                    b = {
                        'conditioned_occ': tf.cast(output_logits > 0,
                                                   tf.float32)
                    }
                    output_logits = self.model(b)
                    cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(
                        logits=output_logits, labels=batch['gt_occ'])
                    step_loss = nn.reduce_sum_batch(cross_ent)
                    metrics['loss/{}_step'.format(i + 1)] = step_loss

                    # loss = loss + step_loss

                    # loss = tf.cond(metrics['pred|gt/p(predicted_occ|gt_occ)'] > 0.95,
                    #                lambda: tf.add(step_loss, loss),
                    #                lambda: loss)

                variables = self.model.trainable_variables
                gradients = tape.gradient(loss, variables)

                clipped_gradients = [
                    tf.clip_by_value(g, -1e6, 1e6) for g in gradients
                ]

                self.opt.apply_gradients(
                    list(zip(clipped_gradients, variables)))
                return loss, metrics
        def step_fn_multiloss(batch):
            with tf.GradientTape() as tape:
                ae_features = self.encoder(self.prep_ae_input(batch))
                ae_output = self.decoder(ae_features)
                ae_loss = nn.reduce_sum_batch(
                    tf.nn.sigmoid_cross_entropy_with_logits(
                        logits=ae_output, labels=batch['gt_occ']))

                cvcnn_inp = self.prep_cvcnn_inputs(batch['conditioned_occ'],
                                                   ae_features)
                loss = ae_loss

                for i in range(6):
                    cvcnn = self.cvcnn(cvcnn_inp)

                    if i == 0:
                        p_occ = tf.nn.sigmoid(cvcnn['p_occ_logits'])
                        output = {
                            'predicted_occ': p_occ,
                            'predicted_free': 1 - p_occ
                        }
                        metrics = nn.calc_metrics(output, batch)

                    cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(
                        logits=cvcnn['p_occ_logits'], labels=batch['gt_occ'])
                    step_loss = nn.reduce_sum_batch(cross_ent)
                    loss = loss + step_loss
                    metrics['loss/{}_step'.format(i)] = step_loss

                    cvcnn_inp = self.prep_cvcnn_inputs(
                        tf.cast(cvcnn['p_occ_logits'] > 0, tf.float32),
                        ae_features)

                metrics['loss/aux_loss'] = ae_loss

                variables = self.trainable_variables
                gradients = tape.gradient(loss, variables)
                clipped_gradients = [
                    tf.clip_by_value(g, -1e6, 1e6) for g in gradients
                ]
                self.opt.apply_gradients(
                    list(zip(clipped_gradients, variables)))
                return loss, metrics
        def step_fn(batch):
            with tf.GradientTape() as tape:
                output_logits = self.model(self.prep_input(batch))
                sample = tf.nn.sigmoid(output_logits)
                output = {
                    'predicted_occ': sample,
                    'predicted_free': 1 - sample
                }
                metrics = nn.calc_metrics(output, batch)

                cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=output_logits, labels=batch['gt_occ'])
                loss = nn.reduce_sum_batch(cross_ent)
                variables = self.model.trainable_variables
                gradients = tape.gradient(loss, variables)
                clipped_gradients = [
                    tf.clip_by_value(g, -1e6, 1e6) for g in gradients
                ]
                self.opt.apply_gradients(
                    list(zip(clipped_gradients, variables)))
                return loss, metrics
 def mse_loss(self, metrics):
     l_occ = nn.reduce_sum_batch(metrics['mse/occ'])
     l_free = nn.reduce_sum_batch(metrics['mse/free'])
     return l_occ + l_free