def step_fn(batch):
            with tf.GradientTape(persistent=True) as tape:
                ##### Forward pass
                known = stack_known(batch)
                mean, logvar = self.encode(known)
                z = self.reparameterize(mean, logvar)
                sample_logit = self.decode(z)
                sample = tf.nn.sigmoid(sample_logit)
                output = {
                    'predicted_occ': sample,
                    'predicted_free': 1 - sample
                }
                metrics = nn.calc_metrics(output, batch)

                #### vae loss
                vae_loss = compute_vae_loss(z,
                                            mean,
                                            logvar,
                                            sample_logit,
                                            labels=batch['gt_occ'])
                metrics['loss/vae'] = vae_loss

                ### gan loss
                fake_occ = tf.cast(sample_logit > 0, tf.float32)
                real_pair_est = self.discriminate(known, batch['gt_occ'])
                fake_pair_est = self.discriminate(known, fake_occ)
                gan_loss_g = 10000 * (1 + tf.reduce_mean(-fake_pair_est))
                gan_loss_d_no_gp = 1 + tf.reduce_mean(fake_pair_est -
                                                      real_pair_est)

                # gradient penalty
                gp = self.gradient_penalty(known, batch['gt_occ'], fake_occ)
                gan_loss_d = gan_loss_d_no_gp + gp

                metrics['loss/gan_g'] = gan_loss_g
                metrics['loss/gan_d'] = gan_loss_d
                metrics['loss/gan_gp'] = gp
                metrics['loss/gan_d_no_gp'] = gan_loss_d_no_gp

                ### apply
                generator_loss = vae_loss + gan_loss_g
                dis_loss = gan_loss_d

                vae_variables = self.encoder.trainable_variables + self.generator.trainable_variables
                vae_gradients = tape.gradient(generator_loss, vae_variables)
                clipped_vae_gradients = [
                    tf.clip_by_value(g, -1e6, 1e6) for g in vae_gradients
                ]
                self.opt.apply_gradients(
                    list(zip(clipped_vae_gradients, vae_variables)))

                dis_variables = self.discriminator.trainable_variables
                dis_gradients = tape.gradient(dis_loss, dis_variables)
                clipped_dis_gradients = [
                    tf.clip_by_value(g, -1e6, 1e6) for g in dis_gradients
                ]
                self.gan_opt.apply_gradients(
                    list(zip(clipped_dis_gradients, dis_variables)))

                return generator_loss, metrics
        def step_fn(batch):
            with tf.GradientTape() as tape:
                ae_features = self.encoder(self.prep_ae_input(batch))
                cvcnn = self.cvcnn(
                    self.prep_cvcnn_inputs(batch['conditioned_occ'],
                                           ae_features))
                ae_output = self.decoder(ae_features)

                p_occ = tf.nn.sigmoid(cvcnn['p_occ_logits'])
                output = {'predicted_occ': p_occ, 'predicted_free': 1 - p_occ}
                metrics = nn.calc_metrics(output, batch)

                cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=cvcnn['p_occ_logits'], labels=batch['gt_occ'])
                vcnn_loss = nn.reduce_sum_batch(cross_ent)

                ae_loss = nn.reduce_sum_batch(
                    tf.nn.sigmoid_cross_entropy_with_logits(
                        logits=ae_output, labels=batch['gt_occ']))
                metrics['loss/aux_loss'] = ae_loss

                loss = vcnn_loss + 0.1 * ae_loss

                variables = self.trainable_variables
                gradients = tape.gradient(loss, variables)
                clipped_gradients = [
                    tf.clip_by_value(g, -1e6, 1e6) for g in gradients
                ]
                self.opt.apply_gradients(
                    list(zip(clipped_gradients, variables)))
                return loss, metrics
        def step_fn(batch):
            with tf.GradientTape() as tape:
                output = self(batch)

                metrics = nn.calc_metrics(output, batch)

                if self.params['loss'] == 'cross_entropy':
                    cross_ent = tf.keras.losses.binary_crossentropy(
                        batch['gt_occ'], output['predicted_occ'])
                    loss = nn.reduce_sum_batch(cross_ent)

                if self.params['stacknet_version'] == 'v4':
                    ae_loss = tf.reduce_sum(
                        tf.keras.losses.binary_crossentropy(
                            batch['gt_occ'], output['aux_occ']))

                    metrics['loss/aux_loss'] = ae_loss
                    metrics['loss/vcnn_loss'] = loss
                    loss = loss + 0.1 * ae_loss

                variables = self.model.trainable_variables
                gradients = tape.gradient(loss, variables)

                clipped_gradients = [
                    tf.clip_by_value(g, -1e6, 1e6) for g in gradients
                ]

                self.opt.apply_gradients(
                    list(zip(clipped_gradients, variables)))
                return loss, metrics
Example #4
0
        def step_fn(batch):
            with tf.GradientTape() as tape:
                output_logits = self(batch, training=True)

                x = tf.nn.sigmoid(output_logits['predicted_occ'])

                output = {'predicted_occ': x, 'predicted_free': 1 - x}

                metrics = nn.calc_metrics(output, batch)

                if self.params['loss'] == 'mse':
                    loss = self.mse_loss(metrics)

                elif self.params['loss'] == 'cross_entropy':
                    cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(
                        logits=output_logits['predicted_occ'],
                        labels=batch['gt_occ'])
                    loss = nn.reduce_sum_batch(cross_ent)

                variables = self.trainable_variables
                gradients = tape.gradient(loss, variables)

                self.opt.apply_gradients(list(zip(gradients, variables)))
                metrics.update(self.get_insights(variables, gradients))
                return loss, metrics
        def step_fn_multiloss(batch):
            with tf.GradientTape() as tape:
                output_logits = self.model(self.prep_input(batch))
                sample = tf.nn.sigmoid(output_logits)
                output = {
                    'predicted_occ': sample,
                    'predicted_free': 1 - sample
                }
                metrics = nn.calc_metrics(output, batch)

                cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=output_logits, labels=batch['gt_occ'])
                loss = nn.reduce_sum_batch(cross_ent)

                metrics['loss/0_step'] = loss
                m = metrics

                for i in range(5):
                    # Multistep part
                    # if True:
                    # i = 0
                    b = {
                        'conditioned_occ': tf.cast(output_logits > 0,
                                                   tf.float32)
                    }
                    output_logits = self.model(b)
                    cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(
                        logits=output_logits, labels=batch['gt_occ'])
                    step_loss = nn.reduce_sum_batch(cross_ent)
                    metrics['loss/{}_step'.format(i + 1)] = step_loss

                    # loss = loss + step_loss

                    # loss = tf.cond(metrics['pred|gt/p(predicted_occ|gt_occ)'] > 0.95,
                    #                lambda: tf.add(step_loss, loss),
                    #                lambda: loss)

                variables = self.model.trainable_variables
                gradients = tape.gradient(loss, variables)

                clipped_gradients = [
                    tf.clip_by_value(g, -1e6, 1e6) for g in gradients
                ]

                self.opt.apply_gradients(
                    list(zip(clipped_gradients, variables)))
                return loss, metrics
        def step_fn(batch):
            with tf.GradientTape() as tape:
                output = self(batch, training=True)

                metrics = nn.calc_metrics(output, batch)

                if self.params['loss'] == 'cross_entropy':
                    loss = tf.reduce_sum(
                        tf.keras.losses.binary_crossentropy(
                            batch['gt_occ'], output['predicted_occ']))
                elif self.params['loss'] == 'mse':
                    loss = self.mse_loss(metrics)
                variables = self.trainable_variables
                gradients = tape.gradient(loss, variables)

                self.opt.apply_gradients(list(zip(gradients, variables)))
                return loss, metrics
        def step_fn_multiloss(batch):
            with tf.GradientTape() as tape:
                ae_features = self.encoder(self.prep_ae_input(batch))
                ae_output = self.decoder(ae_features)
                ae_loss = nn.reduce_sum_batch(
                    tf.nn.sigmoid_cross_entropy_with_logits(
                        logits=ae_output, labels=batch['gt_occ']))

                cvcnn_inp = self.prep_cvcnn_inputs(batch['conditioned_occ'],
                                                   ae_features)
                loss = ae_loss

                for i in range(6):
                    cvcnn = self.cvcnn(cvcnn_inp)

                    if i == 0:
                        p_occ = tf.nn.sigmoid(cvcnn['p_occ_logits'])
                        output = {
                            'predicted_occ': p_occ,
                            'predicted_free': 1 - p_occ
                        }
                        metrics = nn.calc_metrics(output, batch)

                    cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(
                        logits=cvcnn['p_occ_logits'], labels=batch['gt_occ'])
                    step_loss = nn.reduce_sum_batch(cross_ent)
                    loss = loss + step_loss
                    metrics['loss/{}_step'.format(i)] = step_loss

                    cvcnn_inp = self.prep_cvcnn_inputs(
                        tf.cast(cvcnn['p_occ_logits'] > 0, tf.float32),
                        ae_features)

                metrics['loss/aux_loss'] = ae_loss

                variables = self.trainable_variables
                gradients = tape.gradient(loss, variables)
                clipped_gradients = [
                    tf.clip_by_value(g, -1e6, 1e6) for g in gradients
                ]
                self.opt.apply_gradients(
                    list(zip(clipped_gradients, variables)))
                return loss, metrics
    def train_step(self, batch):
        with tf.GradientTape() as tape:
            known = stack_known(batch)
            mean, logvar = self.encode(known)
            true_angle = batch['angle']

            z = self.reparameterize(mean, logvar)

            z_corrected = self.replace_true_angle(z, true_angle)

            sample_logit = self.decode(z_corrected)

            z_f, sampled_angle = self.split_angle(z)
            mean_f, mean_angle = self.split_angle(mean)
            logvar_f, logvar_angle = self.split_angle(logvar)

            vae_loss = compute_vae_loss(z_f,
                                        mean_f,
                                        logvar_f,
                                        sample_logit,
                                        labels=batch['gt_occ'])
            angle_loss = compute_angle_loss(true_angle, mean_angle,
                                            logvar_angle)
            # angle_loss = compute_abs_angle_loss(true_angle, mean_angle)
            loss = vae_loss + angle_loss

        vae_variables = self.encoder.trainable_variables + self.generator.trainable_variables
        gradients = tape.gradient(loss, vae_variables)

        self.optimizer.apply_gradients(list(zip(gradients, vae_variables)))

        sample = tf.nn.sigmoid(sample_logit)
        output = {'predicted_occ': sample, 'predicted_free': 1 - sample}
        metrics = nn.calc_metrics(output, batch)

        metrics['loss/angle'] = angle_loss
        metrics['loss/vae'] = vae_loss
        metrics['values/variance'] = tf.reduce_mean(tf.exp(logvar_angle))

        m = {k: tf.reduce_mean(metrics[k]) for k in metrics}
        m['loss'] = loss
        return output, m
        def step_fn(batch):
            with tf.GradientTape() as tape:
                output_logits = self.model(self.prep_input(batch))
                sample = tf.nn.sigmoid(output_logits)
                output = {
                    'predicted_occ': sample,
                    'predicted_free': 1 - sample
                }
                metrics = nn.calc_metrics(output, batch)

                cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=output_logits, labels=batch['gt_occ'])
                loss = nn.reduce_sum_batch(cross_ent)
                variables = self.model.trainable_variables
                gradients = tape.gradient(loss, variables)
                clipped_gradients = [
                    tf.clip_by_value(g, -1e6, 1e6) for g in gradients
                ]
                self.opt.apply_gradients(
                    list(zip(clipped_gradients, variables)))
                return loss, metrics
        def step_fn_multiloss(batch):
            with tf.GradientTape() as tape:
                output = self(batch)
                metrics = nn.calc_metrics(output, batch)
                loss = tf.reduce_sum(
                    tf.keras.losses.binary_crossentropy(
                        batch['gt_occ'], output['predicted_occ']))
                loss = loss / self.batch_size
                metrics['loss/0_step'] = loss
                m = metrics

                # for i in range(1):
                # Multistep part
                if True:
                    i = 0
                    b = {'conditioned_occ': output['predicted_occ']}
                    output = self(b)
                    step_loss = tf.reduce_sum(
                        tf.keras.losses.binary_crossentropy(
                            batch['gt_occ'], output['predicted_occ']))
                    step_loss = step_loss / self.batch_size
                    metrics['loss/{}_step'.format(i + 1)] = step_loss

                    loss = tf.cond(
                        metrics['pred|gt/p(predicted_occ|gt_occ)'] > 0.95,
                        lambda: tf.add(step_loss, loss), lambda: loss)

                variables = self.model.trainable_variables
                gradients = tape.gradient(loss, variables)

                clipped_gradients = [
                    tf.clip_by_value(g, -1e6, 1e6) for g in gradients
                ]

                self.opt.apply_gradients(
                    list(zip(clipped_gradients, variables)))
                return loss, metrics
        def step_fn(batch):
            with tf.GradientTape() as tape:
                known = stack_known(batch)
                mean, logvar = self.encode(known)
                z = self.reparameterize(mean, logvar)
                sample_logit = self.decode(z)
                vae_loss = compute_vae_loss(z,
                                            mean,
                                            logvar,
                                            sample_logit,
                                            labels=batch['gt_occ'])

                sample = tf.nn.sigmoid(sample_logit)
                output = {
                    'predicted_occ': sample,
                    'predicted_free': 1 - sample
                }
                metrics = nn.calc_metrics(output, batch)

                vae_variables = self.encoder.trainable_variables + self.generator.trainable_variables
                gradients = tape.gradient(vae_loss, vae_variables)

                self.opt.apply_gradients(list(zip(gradients, vae_variables)))
                return vae_loss, metrics