Exemple #1
0
def train(args, model, train_loader, optimizer, epoch, training_step, writer):
    losses = utils.AverageMeter("Loss", ":.6f")
    progress = utils.ProgressMeter(len(train_loader), [losses],
                                   prefix="Epoch: [{}]".format(epoch))
    model.train()
    for batch_idx, batch in enumerate(train_loader):
        batch = [tensor.cuda() for tensor in batch]
        (
            obs_traj,
            pred_traj_gt,
            obs_traj_rel,
            pred_traj_gt_rel,
            non_linear_ped,
            loss_mask,
            seq_start_end,
        ) = batch
        optimizer.zero_grad()
        loss = torch.zeros(1).to(pred_traj_gt)
        l2_loss_rel = []
        loss_mask = loss_mask[:, args.obs_len:]

        if training_step == 1 or training_step == 2:
            model_input = obs_traj_rel
            pred_traj_fake_rel = model(model_input, obs_traj, seq_start_end, 1,
                                       training_step)
            l2_loss_rel.append(
                l2_loss(pred_traj_fake_rel, model_input, loss_mask,
                        mode="raw"))
        else:
            model_input = torch.cat((obs_traj_rel, pred_traj_gt_rel), dim=0)
            for _ in range(args.best_k):
                pred_traj_fake_rel = model(model_input, obs_traj,
                                           seq_start_end, 0)
                l2_loss_rel.append(
                    l2_loss(
                        pred_traj_fake_rel,
                        model_input[-args.pred_len:],
                        loss_mask,
                        mode="raw",
                    ))

        l2_loss_sum_rel = torch.zeros(1).to(pred_traj_gt)
        l2_loss_rel = torch.stack(l2_loss_rel, dim=1)
        for start, end in seq_start_end.data:
            _l2_loss_rel = torch.narrow(l2_loss_rel, 0, start, end - start)
            _l2_loss_rel = torch.sum(_l2_loss_rel, dim=0)  # [20]
            _l2_loss_rel = torch.min(_l2_loss_rel) / (
                (pred_traj_fake_rel.shape[0]) * (end - start))
            l2_loss_sum_rel += _l2_loss_rel

        loss += l2_loss_sum_rel
        losses.update(loss.item(), obs_traj.shape[1])
        loss.backward()
        optimizer.step()
        if batch_idx % args.print_every == 0:
            progress.display(batch_idx)
    writer.add_scalar("train_loss", losses.avg, epoch)
def cal_l2_losses(pred_traj_gt, pred_traj_gt_rel, pred_traj_fake,
                  pred_traj_fake_rel, loss_mask):
    g_l2_loss_abs = l2_loss(pred_traj_fake,
                            pred_traj_gt,
                            loss_mask,
                            mode='sum')
    g_l2_loss_rel = l2_loss(pred_traj_fake_rel,
                            pred_traj_gt_rel,
                            loss_mask,
                            mode='sum')
    return g_l2_loss_abs, g_l2_loss_rel
    def build_model(self):
        print("building model")

        # Placeholders noisy data
        self.noise = tf.placeholder(tf.float32, shape=self.input_shape, name='noise')
        # Placeholders for real training samples
        self.input_real = tf.placeholder(tf.float32, shape=self.input_shape, name='input_real')
        # Placeholders for fake generated samples
        self.input_fake = tf.placeholder(tf.float32, shape=self.input_shape, name='input_fake')
        # Placeholder for test samples
        self.input_test = tf.placeholder(tf.float32, shape=self.input_shape, name='input_test')

        self.generation = self.generator(inputs=self.noise, reuse=False, scope_name='generator')
        self.discrimination = self.discriminator(inputs=self.generation, reuse=False, scope_name='discriminator')

        # Generator wants to fool discriminator
        self.generator_loss = l2_loss(y=tf.ones_like(self.discrimination), y_hat=self.discrimination)

        # Merge the two generators and the cycle loss
        self.generator_loss = self.generator_loss

        # Discriminator loss
        self.discrimination_input_real = self.discriminator(inputs=self.input_real,
                                                            reuse=True, scope_name='discriminator')
        self.discrimination_input_fake = self.discriminator(inputs=self.input_fake,
                                                            reuse=True, scope_name='discriminator')

        self.discriminator_loss_input_real = l2_loss(y=tf.ones_like(self.discrimination_input_real),
                                                     y_hat=self.discrimination_input_real)
        self.discriminator_loss_input_fake = l2_loss(y=tf.zeros_like(self.discrimination_input_fake),
                                                     y_hat=self.discrimination_input_fake)

        self.discriminator_loss = (self.discriminator_loss_input_real + self.discriminator_loss_input_fake) / 2

        # Merge the two discriminators into one
        self.discriminator_loss = self.discriminator_loss

        # Categorize variables because we have to optimize the two sets of the variables separately
        trainable_variables = tf.trainable_variables()
        self.discriminator_vars = [var for var in trainable_variables if 'discriminator' in var.name]
        self.generator_vars = [var for var in trainable_variables if 'generator' in var.name]

        self.generation_test = self.generator(inputs=self.input_test, reuse=True, scope_name='generator')
def generator_step(batch, generator, discriminator, g_loss_fn, optimizer_g):
    """This step is similar to Social GAN Code"""
    if USE_GPU:
        batch = [tensor.cuda() for tensor in batch]
    else:
        batch = [tensor for tensor in batch]
    (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, loss_mask,
     seq_start_end, obs_ped_speed, pred_ped_speed) = batch

    losses = {}
    loss = torch.zeros(1).to(pred_traj_gt)
    g_l2_loss_rel = []

    loss_mask = loss_mask[:, OBS_LEN:]

    for _ in range(BEST_K):
        generator_out = generator(obs_traj, obs_traj_rel, seq_start_end,
                                  obs_ped_speed, pred_ped_speed, pred_traj_gt,
                                  TRAIN_METRIC, SPEED_TO_ADD)

        pred_traj_fake_rel = generator_out
        pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])

        if L2_LOSS_WEIGHT > 0:
            g_l2_loss_rel.append(L2_LOSS_WEIGHT * l2_loss(
                pred_traj_fake_rel, pred_traj_gt_rel, loss_mask, mode='raw'))

    g_l2_loss_sum_rel = torch.zeros(1).to(pred_traj_gt)
    if L2_LOSS_WEIGHT > 0:
        g_l2_loss_rel = torch.stack(g_l2_loss_rel, dim=1)
        for start, end in seq_start_end.data:
            _g_l2_loss_rel = g_l2_loss_rel[start:end]
            _g_l2_loss_rel = torch.sum(_g_l2_loss_rel, dim=0)
            _g_l2_loss_rel = torch.min(_g_l2_loss_rel) / torch.sum(
                loss_mask[start:end])
            g_l2_loss_sum_rel += _g_l2_loss_rel
        losses['G_l2_loss_rel'] = g_l2_loss_sum_rel.item()
        loss += g_l2_loss_sum_rel
    traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
    traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)
    ped_speed = torch.cat([obs_ped_speed, pred_ped_speed], dim=0)

    scores_fake = discriminator(traj_fake, traj_fake_rel, ped_speed,
                                seq_start_end)
    discriminator_loss = g_loss_fn(scores_fake)

    loss += discriminator_loss
    losses['G_discriminator_loss'] = discriminator_loss.item()
    losses['G_total_loss'] = loss.item()

    optimizer_g.zero_grad()
    loss.backward()
    optimizer_g.step()

    return losses
Exemple #5
0
def generator_step(batch, generator, discriminator, g_loss_fn, optimizer_g):

    batch = [tensor.cuda() for tensor in batch]
    # (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, vgg_list) = batch
    # (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel) = batch
    (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, n_l, l_m, V_obs,
     A_obs, V_pre, A_pre, vgg_list) = batch
    losses = {}
    loss = torch.zeros(1).to(pred_traj_gt)

    g_l2_loss_rel = []
    for _ in range(BEST_K):
        # generator_out = generator(obs_traj, obs_traj_rel, vgg_list)
        generator_out = generator(obs_traj, obs_traj_rel, V_obs, A_obs,
                                  vgg_list)  # 生成坐标差
        pred_traj_fake_rel = generator_out
        pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                         obs_traj[0, :, :, -1])  # 12*3*2 TVC

        g_l2_loss_rel.append(
            l2_loss(  # 生成坐标差和真实坐标差 n*1
                pred_traj_fake_rel,  # T V C
                pred_traj_gt_rel,  # N V C T
                mode='raw'))
    # 生成了K条轨迹并计算损失 K V
    npeds = obs_traj.size(1)  # V
    g_l2_loss_sum_rel = torch.zeros(1).to(pred_traj_gt)  # 1
    g_l2_loss_rel = torch.stack(g_l2_loss_rel, dim=1)  # 拼接张量 得v k
    _g_l2_loss_rel = torch.sum(g_l2_loss_rel, dim=0)  # 求和 得 k

    _g_l2_loss_rel = torch.min(_g_l2_loss_rel) / (npeds * PRED_LEN
                                                  )  # 取最小的然后取平均
    g_l2_loss_sum_rel += _g_l2_loss_rel
    losses['G_l2_loss_rel'] = g_l2_loss_sum_rel.item()
    loss += g_l2_loss_sum_rel  # 生成轨迹的损失
    pred_traj_fake = pred_traj_fake.permute(1, 2, 0)  # TVC——VCT
    pred_traj_fake_rel = pred_traj_fake_rel.permute(1, 2, 0)
    traj_fake = torch.cat([obs_traj[0], pred_traj_fake], dim=2)  # VCT T=20
    traj_fake_rel = torch.cat([obs_traj_rel[0], pred_traj_fake_rel], dim=2)

    scores_fake = discriminator(traj_fake, traj_fake_rel)  # 生成轨迹鉴别分数
    discriminator_loss = g_loss_fn(scores_fake)
    loss += discriminator_loss  # 加入鉴别器损失
    losses['G_discriminator_loss'] = discriminator_loss.item()
    losses['G_total_loss'] = loss.item()

    optimizer_g.zero_grad()
    loss.backward()
    optimizer_g.step()

    return losses
Exemple #6
0
    def _setup_loss_graph(self, s_output_tbi, s_target_tbi, s_step_size):
        """
        Connect a loss function to the graph
        See data.py for explanation of the slicing part
        """
        s_sliced_output_tbi = s_output_tbi[-s_step_size :]
        s_sliced_target_tbi = s_target_tbi[-s_step_size :]

        if self._options['loss_type'] == 'l2':
            return l2_loss(s_sliced_output_tbi, s_sliced_target_tbi)
        if self._options['loss_type'] == 'l1':
            return l1_loss(s_sliced_output_tbi, s_sliced_target_tbi)
        if self._options['loss_type'] == 'huber':
            delta = self._options['huber_delta']
            return huber_loss(s_sliced_output_tbi, s_sliced_target_tbi, delta)
        
        assert False, 'Invalid loss_type option'
        return tt.alloc(np.float32(0.))
Exemple #7
0
def generator_step(batch, generator, discriminator, g_loss_fn, optimizer_g):

    batch = [tensor.cuda() for tensor in batch]
    (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, vgg_list) = batch
    losses = {}
    loss = torch.zeros(1).to(pred_traj_gt)

    g_l2_loss_rel = []
    for _ in range(BEST_K):
        generator_out = generator(obs_traj, obs_traj_rel, vgg_list)

        pred_traj_fake_rel = generator_out
        pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1, :,
                                                                      0, :])

        g_l2_loss_rel.append(
            l2_loss(pred_traj_fake_rel, pred_traj_gt_rel, mode='raw'))

    npeds = obs_traj.size(1)
    g_l2_loss_sum_rel = torch.zeros(1).to(pred_traj_gt)
    g_l2_loss_rel = torch.stack(g_l2_loss_rel, dim=1)
    _g_l2_loss_rel = torch.sum(g_l2_loss_rel, dim=0)
    _g_l2_loss_rel = torch.min(_g_l2_loss_rel) / (npeds * PRED_LEN)
    g_l2_loss_sum_rel += _g_l2_loss_rel
    losses['G_l2_loss_rel'] = g_l2_loss_sum_rel.item()
    loss += g_l2_loss_sum_rel

    traj_fake = torch.cat([obs_traj[:, :, 0, :], pred_traj_fake], dim=0)
    traj_fake_rel = torch.cat([obs_traj_rel[:, :, 0, :], pred_traj_fake_rel],
                              dim=0)

    scores_fake = discriminator(traj_fake, traj_fake_rel)
    discriminator_loss = g_loss_fn(scores_fake)

    loss += discriminator_loss
    losses['G_discriminator_loss'] = discriminator_loss.item()
    losses['G_total_loss'] = loss.item()

    optimizer_g.zero_grad()
    loss.backward()
    optimizer_g.step()

    return losses
def mask_l2_loss(a, b, mask):
    return l2_loss(a[mask], b[mask])
    def build_model(self):
        if True:
            self.input_sequence_r = tf.placeholder(tf.float32,
                                                   shape=[
                                                       self.batch_size,
                                                       self.sequence_maximum,
                                                       self.input_dimension
                                                   ],
                                                   name="inputsequence_r")

            self.input_sequence_r_2 = tf.placeholder(tf.float32,
                                                     shape=[
                                                         self.batch_size,
                                                         self.sequence_maximum,
                                                         self.input_dimension
                                                     ],
                                                     name="inputsequence_r_2")

            self.decoder_input = tf.placeholder(tf.float32,
                                                shape=[
                                                    self.batch_size,
                                                    self.sequence_maximum,
                                                    self.input_dimension
                                                ],
                                                name="decoder_input")

            self.input_sequence_original = tf.placeholder(
                tf.float32,
                shape=[
                    self.batch_size, self.sequence_maximum,
                    self.input_dimension
                ],
                name="input_sequence_original")

            self.input_sequence_original_shift = tf.placeholder(
                tf.float32,
                shape=[
                    self.batch_size, self.sequence_maximum + 1,
                    self.input_dimension
                ],
                name="input_sequence_original_shift")

            self.input_mask_r = tf.placeholder(
                tf.float32,
                shape=[self.batch_size, self.sequence_maximum + 1],
                name="inputmask_r")

            self.sequence_length_r = tf.placeholder(tf.int32,
                                                    shape=(self.batch_size),
                                                    name="sequence_length_r")
            representation_sequence = self.encoder(self.input_sequence_r,
                                                   self.sequence_length_r)

            self.y = tf.placeholder(tf.float32,
                                    shape=(self.batch_size, self.num_catogory),
                                    name="y")

            self.representation_len_super = int(
                self.representation_dimention *
                self.representation_ratio_super)
            decoder_input_r=tf.expand_dims(representation_sequence\
                                               [:,self.representation_dimention-self.representation_len_super:],axis=1)
            representation_sequence_temp=tf.expand_dims(representation_sequence\
                                               [:,self.representation_dimention-self.representation_len_super:],axis=1)
            decoder_input_r = tf.concat([decoder_input_r, self.decoder_input],
                                        1)

            outputs_decoder_x_r = self.decoder(decoder_input_r,
                                               self.sequence_length_r)
            y_pred_ori, fcn_ori = self.discriminator(
                self.input_sequence_original, self.sequence_length_r)
            y_pred_ae, fcn_x_ae = self.discriminator(outputs_decoder_x_r,
                                                     self.sequence_length_r,
                                                     reuse=True)

            self.loss_decoder_WGAN = tf.reduce_mean(-y_pred_ae)
            self.loss_discriminator_WGAN = tf.reduce_mean(-y_pred_ori +
                                                          y_pred_ae)

            supervised_input = representation_sequence[:, 0:self.
                                                       representation_len_super]
            supervised_input_len = self.representation_len_super
            self.representation = supervised_input
            self.representation_len_super = supervised_input_len

            if self.fcn_num == 0:
                y_pred= utils.fcn_layer_scope(supervised_input,\
                        w_shape=[supervised_input_len,self.num_catogory],b_shape=[self.num_catogory],\
                                   scope="softmax_supervised",\
                                   activation=tf.nn.softmax)
            elif self.fcn_num == 1:
                fcn_layer1=utils.fcn_layer_scope(supervised_input,\
                        w_shape=[supervised_input_len,self.fcn_hiddenunit_num],b_shape=[self.fcn_hiddenunit_num],\
                                   scope="softmax_supervised1",\
                                   activation=tf.nn.tanh)  #utils.leaky_relu
                fcn_layer1_dropout = tf.nn.dropout(
                    fcn_layer1, keep_prob=self.dropout_outkeepratio_fcn)

                y_pred=utils.fcn_layer_scope(fcn_layer1_dropout,\
                        w_shape=[self.fcn_hiddenunit_num,self.num_catogory],b_shape=[self.num_catogory],\
                                   scope="softmax_supervised2",\
                                   activation=tf.nn.softmax)
            else:
                fcn_layer1=utils.fcn_layer_scope(supervised_input,\
                            w_shape=[supervised_input_len,self.fcn_hiddenunit_num],b_shape=[self.fcn_hiddenunit_num],\
                                   scope="softmax_supervised1",\
                                   activation=tf.nn.tanh)  #utils.leaky_relu
                fcn_layer1_dropout = tf.nn.dropout(fcn_layer1, keep_prob=0.7)

                fcn_layer2=utils.fcn_layer_scope(fcn_layer1_dropout,\
                         w_shape=[self.fcn_hiddenunit_num,self.fcn_hiddenunit_num],b_shape=[self.fcn_hiddenunit_num],\
                                   scope="softmax_supervised2",\
                                   activation=tf.nn.tanh)

                fcn_layer2_dropout = tf.nn.dropout(fcn_layer2, keep_prob=1.0)
                y_pred=utils.fcn_layer_scope(fcn_layer2_dropout,\
                        w_shape=[self.fcn_hiddenunit_num,self.num_catogory],b_shape=[self.num_catogory],\
                                   scope="softmax_supervised3",\
                                   activation=tf.nn.softmax)

            self.cross_entropy = -tf.reduce_sum(
                self.y * tf.log(y_pred + 1e-10)) / self.batch_size

            self.train_op_super = tf.train.AdamOptimizer(
                self.learning_rate).minimize(self.cross_entropy)
            correct_prediction = tf.equal(tf.argmax(y_pred, 1),
                                          tf.argmax(self.y, 1))

            self.accuracy = tf.reduce_mean(tf.cast(correct_prediction,
                                                   'float'))

            self.loss_encoder = utils.l2_loss(
                outputs_decoder_x_r, self.input_sequence_original_shift,
                self.input_mask_r) / self.batch_size
            self.loss_semi = self.weight_super * self.cross_entropy
            self.loss_decoder = self.loss_encoder * (
                1.0 - self.gan_ratio) + self.loss_decoder_WGAN * self.gan_ratio

            train_variables = tf.trainable_variables()
            self.encoder_variables = [
                v for v in train_variables if "encodernet" in v.name
            ]
            self.decoder_variables = [
                v for v in train_variables if "decodernet" in v.name
            ]
            self.discriminator_variables = [
                v for v in train_variables if "discriminatornet" in v.name
            ]

            self.softmax_variables = [
                v for v in train_variables if "softmax_supervised" in v.name
            ]

            self.encoder_train_op = self.optimizer(self.loss_encoder,
                                                   self.encoder_variables)
            self.decoder_train_op = self.optimizer(self.loss_decoder,
                                                   self.decoder_variables)
            self.discriminator_train_op = self.optimizer(
                self.loss_discriminator_WGAN, self.discriminator_variables)
            self.train_op_semi = tf.train.AdamOptimizer(
                self.learning_rate).minimize(self.loss_semi)
            self.train_op_softmax = self.optimizer(self.loss_semi,
                                                   self.softmax_variables)
Exemple #10
0
            random.shuffle(tlist)
        tlist, vlist = tlist[:n_train], tlist[n_train:]

    min_loss = 1e18

    trlog = {}
    trlog['train_loss'] = []
    trlog['val_loss'] = []
    trlog['min_loss'] = 0
    train_time = conv_time

    for epoch in range(1, args.max_epoch + 1):
        glp.train()
        step_time = time.time()
        output_vectors = glp(word_vectors, tlist)
        loss = l2_loss(output_vectors, fc_vectors[tlist])
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        step_time = time.time() - step_time
        train_time += step_time

        glp.eval()
        output_vectors = glp(word_vectors, tlist)
        train_loss = l2_loss(output_vectors, fc_vectors[tlist]).item()
        if v_val > 0:
            output_vectors = glp(word_vectors, vlist)
            val_loss = l2_loss(output_vectors, fc_vectors[vlist]).item()
            loss = val_loss
        else:
            val_loss = 0
Exemple #11
0
def generator_step(batch, generator, discriminator, g_loss_fn, optimizer_g):
    if USE_GPU:
        batch = [tensor.cuda() for tensor in batch]
    else:
        batch = [tensor for tensor in batch]
    if MULTI_CONDITIONAL_MODEL:
        (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, loss_mask, seq_start_end, obs_ped_speed, pred_ped_speed,
        obs_label, pred_label, obs_obj_rel_speed) = batch
    else:
        (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, loss_mask, seq_start_end, obs_ped_speed, pred_ped_speed, obs_obj_rel_speed) = batch

    losses = {}
    loss = torch.zeros(1).to(pred_traj_gt)
    g_l2_loss_rel = []

    loss_mask = loss_mask[:, OBS_LEN:]

    for _ in range(BEST_K):
        if MULTI_CONDITIONAL_MODEL:
            generator_out, final_enc_h = generator(obs_traj, obs_traj_rel, seq_start_end, obs_ped_speed, pred_ped_speed,
                                  pred_traj_gt, TRAIN_METRIC, None, obs_obj_rel_speed, obs_label=obs_label, pred_label=pred_label)
        else:
            generator_out, final_enc_h = generator(obs_traj, obs_traj_rel, seq_start_end, obs_ped_speed, pred_ped_speed,
                                      pred_traj_gt, TRAIN_METRIC, None, obs_obj_rel_speed, obs_label=None, pred_label=None)

        pred_traj_fake_rel = generator_out
        pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])

        if L2_LOSS_WEIGHT > 0:
            g_l2_loss_rel.append(L2_LOSS_WEIGHT * l2_loss(
                pred_traj_fake_rel,
                pred_traj_gt_rel,
                loss_mask,
                mode='raw'))

    g_l2_loss_sum_rel = torch.zeros(1).to(pred_traj_gt)
    if L2_LOSS_WEIGHT > 0:
        g_l2_loss_rel = torch.stack(g_l2_loss_rel, dim=1)
        for start, end in seq_start_end.data:
            _g_l2_loss_rel = g_l2_loss_rel[start:end]
            _g_l2_loss_rel = torch.sum(_g_l2_loss_rel, dim=0)
            _g_l2_loss_rel = torch.min(_g_l2_loss_rel) / torch.sum(loss_mask[start:end])
            g_l2_loss_sum_rel += _g_l2_loss_rel
        losses['G_l2_loss_rel'] = g_l2_loss_sum_rel.item()
        loss += g_l2_loss_sum_rel
    traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
    traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)
    ped_speed = torch.cat([obs_ped_speed, pred_ped_speed], dim=0)
    if MULTI_CONDITIONAL_MODEL:
        label_info = torch.cat([obs_label, pred_label], dim=0)
        scores_fake = discriminator(traj_fake, traj_fake_rel, ped_speed, label=label_info)
    else:
        scores_fake = discriminator(traj_fake, traj_fake_rel, ped_speed, label=None)
    discriminator_loss = g_loss_fn(scores_fake)

    loss += discriminator_loss
    losses['G_discriminator_loss'] = discriminator_loss.item()
    losses['G_total_loss'] = loss.item()

    optimizer_g.zero_grad()
    loss.backward()
    optimizer_g.step()

    return losses
Exemple #12
0
def check_accuracy(loader, generator, discriminator, d_loss_fn, limit=False):

    d_losses = []  #
    metrics = {}
    g_l2_losses_abs, g_l2_losses_rel = ([], ) * 2
    disp_error = []  # ADE FDE
    f_disp_error = []
    total_traj = 0

    mask_sum = 0
    generator.eval()
    with torch.no_grad():  #
        for batch in loader:
            batch = [tensor.cuda() for tensor in batch]
            # (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, vgg_list) = batch
            # (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel) = batch
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, n_l, l_m,
             V_obs, A_obs, V_pre, A_pre, vgg_list) = batch
            # pred_traj_fake_rel = generator(obs_traj, obs_traj_rel, vgg_list)
            pred_traj_fake_rel = generator(obs_traj, obs_traj_rel, V_obs,
                                           A_obs, vgg_list)  # T V C
            pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                             obs_traj[0, :, :,
                                                      -1])  # T V C——V C

            g_l2_loss_abs = l2_loss(pred_traj_fake, pred_traj_gt, mode='sum')
            g_l2_loss_rel = l2_loss(pred_traj_fake_rel,
                                    pred_traj_gt_rel,
                                    mode='sum')

            ade = displacement_error(pred_traj_fake, pred_traj_gt)  # TVC NVCT
            fde = final_displacement_error(pred_traj_fake[-1],
                                           pred_traj_gt[0, :, :, -1])  # VC  VC

            traj_real = torch.cat([obs_traj[:, :, 0, :], pred_traj_gt], dim=0)
            traj_real_rel = torch.cat(
                [obs_traj_rel[:, :, 0, :], pred_traj_gt_rel], dim=0)
            traj_fake = torch.cat([obs_traj[:, :, 0, :], pred_traj_fake],
                                  dim=0)
            traj_fake_rel = torch.cat(
                [obs_traj_rel[:, :, 0, :], pred_traj_fake_rel], dim=0)

            scores_fake = discriminator(traj_fake, traj_fake_rel)
            scores_real = discriminator(traj_real, traj_real_rel)

            d_loss = d_loss_fn(scores_real, scores_fake)
            d_losses.append(d_loss.item())

            g_l2_losses_abs.append(g_l2_loss_abs.item())
            g_l2_losses_rel.append(g_l2_loss_rel.item())
            disp_error.append(ade.item())
            f_disp_error.append(fde.item())

            mask_sum += (pred_traj_gt.size(1) * PRED_LEN)
            total_traj += pred_traj_gt.size(1)
            if limit and total_traj >= NUM_SAMPLES_CHECK:
                break

    metrics['d_loss'] = sum(d_losses) / len(d_losses)
    metrics['g_l2_loss_abs'] = sum(g_l2_losses_abs) / mask_sum
    metrics['g_l2_loss_rel'] = sum(g_l2_losses_rel) / mask_sum

    metrics['ade'] = sum(disp_error) / (total_traj * PRED_LEN)
    metrics['fde'] = sum(f_disp_error) / total_traj
    generator.train()
    return metrics
def train_step(inputs):
    with tf.GradientTape() as gen_tape, tf.GradientTape() as dis_tape:
        outputs = model(inputs)
        generation_A = outputs[0]
        generation_B = outputs[1]
        cycle_A = outputs[2]
        cycle_B = outputs[3]
        identity_A = outputs[4]
        identity_B = outputs[5]
        discrimination_A_real = outputs[6]
        discrimination_A_fake = outputs[7]
        discrimination_B_real = outputs[8]
        discrimination_B_fake = outputs[9]
        discrimination_A_dot_real = outputs[10]
        discrimination_A_dot_fake = outputs[11]
        discrimination_B_dot_real = outputs[12]
        discrimination_B_dot_fake = outputs[13]

        # Cycle loss.
        cycle_loss = l1_loss(inputs[0], cycle_A) + l1_loss(inputs[1], cycle_B)

        # Identity loss.
        identity_loss = l1_loss(inputs[0], identity_A) + l1_loss(
            inputs[1], identity_B)

        # Generator loss.
        generator_loss_A2B = l2_loss(tf.ones_like(discrimination_B_fake),
                                     discrimination_B_fake)
        generator_loss_B2A = l2_loss(tf.ones_like(discrimination_A_fake),
                                     discrimination_A_fake)

        two_step_generator_loss_A = l2_loss(
            tf.ones_like(discrimination_A_dot_fake), discrimination_A_dot_fake)
        two_step_generator_loss_B = l2_loss(
            tf.ones_like(discrimination_B_dot_fake), discrimination_B_dot_fake)

        generator_loss = generator_loss_A2B + generator_loss_B2A + two_step_generator_loss_A + \
                         two_step_generator_loss_B + hp.lambda_cycle * cycle_loss + hp.lambda_identity * identity_loss

        discriminator_loss_A_real = l2_loss(
            tf.ones_like(discrimination_A_real), discrimination_A_real)
        discriminator_loss_A_fake = l2_loss(
            tf.zeros_like(discrimination_A_fake), discrimination_A_fake)
        discriminator_loss_A = (discriminator_loss_A_real +
                                discriminator_loss_A_fake) / 2

        discriminator_loss_B_real = l2_loss(
            tf.ones_like(discrimination_B_real), discrimination_B_real)
        discriminator_loss_B_fake = l2_loss(
            tf.zeros_like(discrimination_B_fake), discrimination_B_fake)
        discriminator_loss_B = (discriminator_loss_B_real +
                                discriminator_loss_B_fake) / 2

        discriminator_loss_A_dot_real = l2_loss(
            tf.ones_like(discrimination_A_dot_real), discrimination_A_dot_real)
        discriminator_loss_A_dot_fake = l2_loss(
            tf.zeros_like(discrimination_A_dot_fake),
            discrimination_A_dot_fake)
        discriminator_loss_A_dot = (discriminator_loss_A_dot_real +
                                    discriminator_loss_A_dot_fake) / 2

        discriminator_loss_B_dot_real = l2_loss(
            tf.ones_like(discrimination_B_dot_real), discrimination_B_dot_real)
        discriminator_loss_B_dot_fake = l2_loss(
            tf.zeros_like(discrimination_B_dot_fake),
            discrimination_B_dot_fake)
        discriminator_loss_B_dot = (discriminator_loss_B_dot_real +
                                    discriminator_loss_B_dot_fake) / 2

        discriminator_loss = discriminator_loss_A + discriminator_loss_B + discriminator_loss_A_dot + \
                             discriminator_loss_B_dot

    generator_vars = model.generatorA2B.trainable_variables + model.generatorB2A.trainable_variables
    discriminator_vars = model.discriminator_A.trainable_variables + model.discriminator_B.trainable_variables + \
                         model.discriminator_A_dot.trainable_variables + model.discriminator_B_dot.trainable_variables

    grad_gen = gen_tape.gradient(generator_loss, sources=generator_vars)
    grad_dis = dis_tape.gradient(discriminator_loss,
                                 sources=discriminator_vars)
    generator_optimizer.apply_gradients(zip(grad_gen, generator_vars))
    discriminator_optimizer.apply_gradients(zip(grad_dis, discriminator_vars))

    gen_loss(generator_loss)
    disc_loss(discriminator_loss)
Exemple #14
0
    indicator_index = utils.get_indicator_index(reactor, INDICATOR)

    tau, delta_t = utils.calculate_idt(reactorNetwork, INDICATOR,
                                       indicator_index)
    original_taus.append(tau)

# modify mech, calc new predictions, compare with gt and save the best
counter = 1

# place holders
best_reactions = None
best_factors = None
best_taus = None

# initialize loss with original loss
original_loss = utils.l2_loss(idts, original_taus)
loss = original_loss
print('===============================================================')
print('Original loss: %.5f' % loss)

# random loop
t0 = time.time()
while True:
    # generate a mech
    t_iteration_0 = time.time()
    reactions_var = ct.Reaction.listFromFile(INPUT_MECH)

    # use Gaussian ditribution after 4 loops
    if counter <= 4:
        means = None
        print('Uniform search, ', end='')
def mask_l2_loss(a, b, mask):
    #good = [127]*len(mask)
    # print([i for i in range(127,127+40)])
    # from IPython import embed;embed();exit();
    return l2_loss(a[:127 + 40][mask], b[mask])
Exemple #16
0
    def build_model(self):

        # Placeholders for real training samples
        self.input_A_real = tf.placeholder(tf.float32,
                                           shape=self.input_shape,
                                           name='input_A_real')
        self.input_B_real = tf.placeholder(tf.float32,
                                           shape=self.input_shape,
                                           name='input_B_real')
        # Placeholders for fake generated samples
        self.input_A_fake = tf.placeholder(tf.float32,
                                           shape=self.input_shape,
                                           name='input_A_fake')
        self.input_B_fake = tf.placeholder(tf.float32,
                                           shape=self.input_shape,
                                           name='input_B_fake')
        # Placeholder for test samples
        self.input_A_test = tf.placeholder(tf.float32,
                                           shape=self.input_shape,
                                           name='input_A_test')
        self.input_B_test = tf.placeholder(tf.float32,
                                           shape=self.input_shape,
                                           name='input_B_test')

        self.generation_B = self.generator(inputs=self.input_A_real,
                                           reuse=False,
                                           scope_name='generator_A2B')
        self.cycle_A = self.generator(inputs=self.generation_B,
                                      reuse=False,
                                      scope_name='generator_B2A')

        self.generation_A = self.generator(inputs=self.input_B_real,
                                           reuse=True,
                                           scope_name='generator_B2A')
        self.cycle_B = self.generator(inputs=self.generation_A,
                                      reuse=True,
                                      scope_name='generator_A2B')

        self.generation_A_identity = self.generator(inputs=self.input_A_real,
                                                    reuse=True,
                                                    scope_name='generator_B2A')
        self.generation_B_identity = self.generator(inputs=self.input_B_real,
                                                    reuse=True,
                                                    scope_name='generator_A2B')

        self.discrimination_A_fake = self.discriminator(
            inputs=self.generation_A,
            reuse=False,
            scope_name='discriminator_A')
        self.discrimination_B_fake = self.discriminator(
            inputs=self.generation_B,
            reuse=False,
            scope_name='discriminator_B')

        # Cycle loss
        self.cycle_loss = l1_loss(y=self.input_A_real,
                                  y_hat=self.cycle_A) + l1_loss(
                                      y=self.input_B_real, y_hat=self.cycle_B)

        # Identity loss
        self.identity_loss = l1_loss(
            y=self.input_A_real, y_hat=self.generation_A_identity) + l1_loss(
                y=self.input_B_real, y_hat=self.generation_B_identity)

        # Place holder for lambda_cycle and lambda_identity
        self.lambda_cycle = tf.placeholder(tf.float32,
                                           None,
                                           name='lambda_cycle')
        self.lambda_identity = tf.placeholder(tf.float32,
                                              None,
                                              name='lambda_identity')

        # Generator loss
        # Generator wants to fool discriminator
        self.generator_loss_A2B = l2_loss(y=tf.ones_like(
            self.discrimination_B_fake),
                                          y_hat=self.discrimination_B_fake)
        self.generator_loss_B2A = l2_loss(y=tf.ones_like(
            self.discrimination_A_fake),
                                          y_hat=self.discrimination_A_fake)

        # Merge the two generators and the cycle loss
        self.generator_loss = self.generator_loss_A2B + self.generator_loss_B2A + self.lambda_cycle * self.cycle_loss + self.lambda_identity * self.identity_loss

        # Discriminator loss
        self.discrimination_input_A_real = self.discriminator(
            inputs=self.input_A_real, reuse=True, scope_name='discriminator_A')
        self.discrimination_input_B_real = self.discriminator(
            inputs=self.input_B_real, reuse=True, scope_name='discriminator_B')
        self.discrimination_input_A_fake = self.discriminator(
            inputs=self.input_A_fake, reuse=True, scope_name='discriminator_A')
        self.discrimination_input_B_fake = self.discriminator(
            inputs=self.input_B_fake, reuse=True, scope_name='discriminator_B')

        # Discriminator wants to classify real and fake correctly
        self.discriminator_loss_input_A_real = l2_loss(
            y=tf.ones_like(self.discrimination_input_A_real),
            y_hat=self.discrimination_input_A_real)
        self.discriminator_loss_input_A_fake = l2_loss(
            y=tf.zeros_like(self.discrimination_input_A_fake),
            y_hat=self.discrimination_input_A_fake)
        self.discriminator_loss_A = (self.discriminator_loss_input_A_real +
                                     self.discriminator_loss_input_A_fake) / 2

        self.discriminator_loss_input_B_real = l2_loss(
            y=tf.ones_like(self.discrimination_input_B_real),
            y_hat=self.discrimination_input_B_real)
        self.discriminator_loss_input_B_fake = l2_loss(
            y=tf.zeros_like(self.discrimination_input_B_fake),
            y_hat=self.discrimination_input_B_fake)
        self.discriminator_loss_B = (self.discriminator_loss_input_B_real +
                                     self.discriminator_loss_input_B_fake) / 2

        # Merge the two discriminators into one
        self.discriminator_loss = self.discriminator_loss_A + self.discriminator_loss_B

        # Categorize variables because we have to optimize the two sets of the variables separately
        trainable_variables = tf.trainable_variables()
        self.discriminator_vars = [
            var for var in trainable_variables if 'discriminator' in var.name
        ]
        self.generator_vars = [
            var for var in trainable_variables if 'generator' in var.name
        ]
        #for var in t_vars: print(var.name)

        # Reserved for test
        self.generation_B_test = self.generator(inputs=self.input_A_test,
                                                reuse=True,
                                                scope_name='generator_A2B')
        self.generation_A_test = self.generator(inputs=self.input_B_test,
                                                reuse=True,
                                                scope_name='generator_B2A')
    def build_model(self):

        # Placeholders for real training samples
        self.input_A_real = tf.placeholder(tf.float32, shape = self.input_shape, name = 'input_A_real')
        self.input_B_real = tf.placeholder(tf.float32, shape = self.input_shape, name = 'input_B_real')
        # Placeholders for fake generated samples
        self.input_A_fake = tf.placeholder(tf.float32, shape = self.input_shape, name = 'input_A_fake')
        self.input_B_fake = tf.placeholder(tf.float32, shape = self.input_shape, name = 'input_B_fake')
        # Placeholder for test samples
        self.input_A_test = tf.placeholder(tf.float32, shape = self.input_shape, name = 'input_A_test')
        self.input_B_test = tf.placeholder(tf.float32, shape = self.input_shape, name = 'input_B_test')

        self.generation_B = self.generator(inputs = self.input_A_real, reuse = False, scope_name = 'generator_A2B')
        self.cycle_A = self.generator(inputs = self.generation_B, reuse = False, scope_name = 'generator_B2A')

        self.generation_A = self.generator(inputs = self.input_B_real, reuse = True, scope_name = 'generator_B2A')
        self.cycle_B = self.generator(inputs = self.generation_A, reuse = True, scope_name = 'generator_A2B')

        self.generation_A_identity = self.generator(inputs = self.input_A_real, reuse = True, scope_name = 'generator_B2A')
        self.generation_B_identity = self.generator(inputs = self.input_B_real, reuse = True, scope_name = 'generator_A2B')

        self.discrimination_A_fake = self.discriminator(inputs = self.generation_A, reuse = False, scope_name = 'discriminator_A')
        self.discrimination_B_fake = self.discriminator(inputs = self.generation_B, reuse = False, scope_name = 'discriminator_B')

        # Cycle loss
        self.cycle_loss = l1_loss(y = self.input_A_real, y_hat = self.cycle_A) + l1_loss(y = self.input_B_real, y_hat = self.cycle_B)

        # Identity loss
        self.identity_loss = l1_loss(y = self.input_A_real, y_hat = self.generation_A_identity) + l1_loss(y = self.input_B_real, y_hat = self.generation_B_identity)

        # Place holder for lambda_cycle and lambda_identity
        self.lambda_cycle = tf.placeholder(tf.float32, None, name = 'lambda_cycle')
        self.lambda_identity = tf.placeholder(tf.float32, None, name = 'lambda_identity')

        # Generator loss
        # Generator wants to fool discriminator
        self.generator_loss_A2B = l2_loss(y = tf.ones_like(self.discrimination_B_fake), y_hat = self.discrimination_B_fake)
        self.generator_loss_B2A = l2_loss(y = tf.ones_like(self.discrimination_A_fake), y_hat = self.discrimination_A_fake)

        # Merge the two generators and the cycle loss
        self.generator_loss = self.generator_loss_A2B + self.generator_loss_B2A + self.lambda_cycle * self.cycle_loss + self.lambda_identity * self.identity_loss

        # Discriminator loss
        self.discrimination_input_A_real = self.discriminator(inputs = self.input_A_real, reuse = True, scope_name = 'discriminator_A')
        self.discrimination_input_B_real = self.discriminator(inputs = self.input_B_real, reuse = True, scope_name = 'discriminator_B')
        self.discrimination_input_A_fake = self.discriminator(inputs = self.input_A_fake, reuse = True, scope_name = 'discriminator_A')
        self.discrimination_input_B_fake = self.discriminator(inputs = self.input_B_fake, reuse = True, scope_name = 'discriminator_B')

        # Discriminator wants to classify real and fake correctly
        self.discriminator_loss_input_A_real = l2_loss(y = tf.ones_like(self.discrimination_input_A_real), y_hat = self.discrimination_input_A_real)
        self.discriminator_loss_input_A_fake = l2_loss(y = tf.zeros_like(self.discrimination_input_A_fake), y_hat = self.discrimination_input_A_fake)
        self.discriminator_loss_A = (self.discriminator_loss_input_A_real + self.discriminator_loss_input_A_fake) / 2

        self.discriminator_loss_input_B_real = l2_loss(y = tf.ones_like(self.discrimination_input_B_real), y_hat = self.discrimination_input_B_real)
        self.discriminator_loss_input_B_fake = l2_loss(y = tf.zeros_like(self.discrimination_input_B_fake), y_hat = self.discrimination_input_B_fake)
        self.discriminator_loss_B = (self.discriminator_loss_input_B_real + self.discriminator_loss_input_B_fake) / 2

        # Merge the two discriminators into one
        self.discriminator_loss = self.discriminator_loss_A + self.discriminator_loss_B

        # Categorize variables because we have to optimize the two sets of the variables separately
        trainable_variables = tf.trainable_variables()
        self.discriminator_vars = [var for var in trainable_variables if 'discriminator' in var.name]
        self.generator_vars = [var for var in trainable_variables if 'generator' in var.name]
        #for var in t_vars: print(var.name)

        # Reserved for test
        self.generation_B_test = self.generator(inputs = self.input_A_test, reuse = True, scope_name = 'generator_A2B')
        self.generation_A_test = self.generator(inputs = self.input_B_test, reuse = True, scope_name = 'generator_B2A')