def calc_metrics_polynomial(anchor,
                            target_transforms,
                            coeffs,
                            sample_set=None):
    """ Calculates displacement error and IoU metrics for 0.5 and 1.0 sec predictions"""
    # coeffs = np.reshape(coeffs, (4, 3))

    ious = np.empty(2)
    des = np.empty(2)
    for i, timestep in enumerate(np.linspace(0.5, 1, 2)):
        # generated_transform = np.dot(coeffs, np.array([timestep, timestep**2, timestep**3, timestep**4, timestep**5]))
        generated_transform = np.dot(
            coeffs, np.array([timestep, timestep**2, timestep**3,
                              timestep**4]))
        # generated_transform = np.dot(coeffs, np.array([timestep, timestep**2, timestep**3]))
        # generated_transform = np.dot(coeffs, np.array([timestep, timestep**2]))
        # print('generated_transform:', generated_transform)
        t_bb = data_extract_1obj.transform(anchor,
                                           target_transforms[:, (i * 5) + 4])
        g_bb = data_extract_1obj.transform(anchor, generated_transform)
        t_bb = data_extract_1obj.unnormalize_bb(t_bb, sample_set=sample_set)
        g_bb = data_extract_1obj.unnormalize_bb(g_bb, sample_set=sample_set)

        target = Rect.make_cXcYWH(t_bb[0], t_bb[1], t_bb[2], t_bb[3])
        generated = Rect.make_cXcYWH(g_bb[0], g_bb[1], g_bb[2], g_bb[3])

        ious[i] = Rect.get_IoU(target, generated)
        des[i] = Rect.get_DE(target, generated)

    return ious, des
def calc_metrics_all(anchor,
                     target_transforms,
                     generated_transforms,
                     sample_set=None,
                     offset_t=False,
                     normalized=True):
    """ Calculates displacement error and IoU metrics for 0.5 and 1.0 sec predictions"""
    ious = np.empty(10)
    des = np.empty(10)
    for i in range(10):
        if offset_t:
            t_bb = data_extract_1obj.transform_offset(
                anchor, target_transforms[:, i, 0])
            g_bb = data_extract_1obj.transform_offset(
                anchor, generated_transforms[:, i, 0])
        else:
            t_bb = data_extract_1obj.transform(anchor, target_transforms[:, i,
                                                                         0])
            g_bb = data_extract_1obj.transform(anchor,
                                               generated_transforms[:, i, 0])

        if normalized:
            t_bb = data_extract_1obj.unnormalize_bb(t_bb,
                                                    sample_set=sample_set)
            g_bb = data_extract_1obj.unnormalize_bb(g_bb,
                                                    sample_set=sample_set)

        target = Rect.make_cXcYWH(t_bb[0], t_bb[1], t_bb[2], t_bb[3])
        generated = Rect.make_cXcYWH(g_bb[0], g_bb[1], g_bb[2], g_bb[3])

        ious[i] = Rect.get_IoU(target, generated)
        des[i] = Rect.get_DE(target, generated)

    return ious, des
def calc_metrics(anchor,
                 target_transform,
                 generated_transform,
                 sample_set=None):
    """ Calculates displacement error and IoU metrics """
    t_bb = data_extract_1obj.transform(anchor, target_transform)
    g_bb = data_extract_1obj.transform(anchor, generated_transform)
    t_bb = data_extract_1obj.unnormalize_bb(t_bb, sample_set=sample_set)
    g_bb = data_extract_1obj.unnormalize_bb(g_bb, sample_set=sample_set)

    target = Rect.make_cXcYWH(t_bb[0], t_bb[1], t_bb[2], t_bb[3])
    generated = Rect.make_cXcYWH(g_bb[0], g_bb[1], g_bb[2], g_bb[3])

    iou = Rect.get_IoU(target, generated)
    de = Rect.get_DE(target, generated)

    return iou, de
def get_IoU(anchor,
            target_transform,
            generated_transform,
            sample_set=None,
            dataset='kitti_tracking'):
    if dataset == 'kitti_raw_tracklets':
        sample_set = None

    t_bb = data_extract_1obj.transform(anchor, target_transform)
    g_bb = data_extract_1obj.transform(anchor, generated_transform)
    t_bb = data_extract_1obj.unnormalize_bb(t_bb, sample_set=sample_set)
    g_bb = data_extract_1obj.unnormalize_bb(g_bb, sample_set=sample_set)

    target = Rect.make_cXcYWH(t_bb[0], t_bb[1], t_bb[2], t_bb[3])
    generated = Rect.make_cXcYWH(g_bb[0], g_bb[1], g_bb[2], g_bb[3])
    intersect = Rect.get_intersection(target, generated)

    if intersect:
        iou = intersect.area / (target.area + generated.area - intersect.area)
        assert (iou > 0), "Non-positive IoU!"
        return iou

    return 0
def calc_metrics_train(anchor,
                       target_transforms,
                       generated_transforms,
                       sample_set=None):
    """ Calculates displacement error and IoU metrics for 0.5 and 1.0 sec predictions"""

    ious = np.empty(2)
    des = np.empty(2)
    for i, j in enumerate([4, 9]):
        t_bb = data_extract_1obj.transform(anchor, target_transforms[:, j, 0])
        g_bb = data_extract_1obj.transform(anchor, generated_transforms[:, j,
                                                                        0])
        #t_bb = data_extract_1obj.transform_offset(anchor, target_transforms[:, j, 0])
        #g_bb = data_extract_1obj.transform_offset(anchor, generated_transforms[:, j, 0])
        t_bb = data_extract_1obj.unnormalize_bb(t_bb, sample_set=sample_set)
        g_bb = data_extract_1obj.unnormalize_bb(g_bb, sample_set=sample_set)

        target = Rect.make_cXcYWH(t_bb[0], t_bb[1], t_bb[2], t_bb[3])
        generated = Rect.make_cXcYWH(g_bb[0], g_bb[1], g_bb[2], g_bb[3])

        ious[i] = Rect.get_IoU(target, generated)
        des[i] = Rect.get_DE(target, generated)

    return ious, des
def draw_heatmap(anchor, transforms):
    '''
    transforms = [1000 x 4]
    '''
    print("ANCHOR", anchor.shape)
    # heatmap_overlay = np.zeros((375, 1242, 1), dtype=np.uint16)
    heatmap_overlay = np.zeros((375, 1242), dtype=np.uint16)
    for t in transforms:
        box = data_extract_1obj.transform(anchor, t)
        box = data_extract_1obj.unnormalize_bb(box)
        topleft_box = data_extract_1obj.center_to_topleft_bb(box)
        topleft_box = topleft_box.astype(int)
        if topleft_box[0] > 1242 or topleft_box[0] < 0 or topleft_box[
                1] > 375 or topleft_box[0] < 0:
            print("BOX:", box)

        # heatmap_overlay[topleft_box[1]:topleft_box[1]+topleft_box[3], topleft_box[0]:topleft_box[0]+topleft_box[2], 0] += 1   # cv2
        l = np.clip(topleft_box[1], 0, 375)
        t = np.clip(topleft_box[0], 0, 1242)
        r = np.clip(topleft_box[1] + topleft_box[3], 0, 375)
        b = np.clip(topleft_box[0] + topleft_box[2], 0, 1242)
        heatmap_overlay[l:r, t:b] += 1  # matplotlib

    # heatmap_overlay.clip(0, 255)
    # viridis = cm.get_cmap('viridis', 256)
    # quadmesh = plt.pcolormesh(heatmap_overlay, cmap=viridis, rasterized=True, vmin=-4, vmax=4)
    # plt.show()

    # heatmap_overlay = cv2.convertScaleAbs(heatmap_overlay)
    # print("HEATMAP1D", heatmap_overlay.shape, np.max(heatmap_overlay), np.sum(heatmap_overlay))
    # heatmap_overlay = cv2.cvtColor(heatmap_overlay, cv2.COLOR_GRAY2RGB)
    # print("HEATMAP", heatmap_overlay.shape, np.max(heatmap_overlay), np.sum(heatmap_overlay))
    # cv2.applyColorMap(heatmap_overlay, cv2.COLORMAP_JET)

    # return heatmap_overlay
    return heatmap_overlay
def draw_p_and_gt(set_info,
                  prior_bbs,
                  t_p,
                  t_gt,
                  output_dir,
                  unnormalized=True,
                  display=False,
                  heatmap=None,
                  sigma=None,
                  draw_past=False):
    '''
    Draws past, proposed, and ground truth future frames.

    Arguments:
        set_info: [<sequence dir>, <anchor frame filename>, <final frame filename>, <object id>, <object class>]
    '''
    bbs = np.copy(prior_bbs)
    img_file = os.path.join(set_info[0], 'image_02', 'data',
                            set_info[2] + '.png')
    seq_name = os.path.basename(set_info[0])
    print(img_file, seq_name)

    # cv2
    # img = cv2.imread(img_file)

    # matplotlib
    img = mpimg.imread(img_file)
    fig, ax = plt.subplots(1)
    ax.imshow(img)

    print(prior_bbs[-1], t_p)
    pred = data_extract_1obj.transform(prior_bbs[-1], t_p)
    gt = data_extract_1obj.transform(prior_bbs[-1], t_gt)
    pred = data_extract_1obj.unnormalize_bb(pred)
    gt = data_extract_1obj.unnormalize_bb(gt)

    # Unnormalize bbs
    if unnormalized:
        bbs[:, 0] = bbs[:, 0] * 1242
        bbs[:, 1] = bbs[:, 1] * 375
        bbs[:, 2] = bbs[:, 2] * 1242
        bbs[:, 3] = bbs[:, 3] * 375

    # Convert to [L, T, W, H]
    for i in range(len(bbs)):
        bbs[i] = data_extract_1obj.center_to_topleft_bb(bbs[i])
    pred = data_extract_1obj.center_to_topleft_bb(pred)
    gt = data_extract_1obj.center_to_topleft_bb(gt)

    # Convert to int
    bbs = bbs.astype(int)
    pred = pred.astype(int)
    gt = gt.astype(int)

    a = 1.0 - (.08 * len(bbs))  # .2  # .4
    color = (4, 165, 239)
    for i, bb in enumerate(bbs):
        # cv2
        # overlay = img.copy()
        # cv2.rectangle(overlay, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]), color, 2)
        # cv2.addWeighted(overlay, a, img, 1 - a, 0, img)

        # matplotlib
        if draw_past:
            rect = patches.Rectangle((bb[0], bb[1]),
                                     bb[2],
                                     bb[3],
                                     linewidth=2,
                                     edgecolor=(239 / 255, 165 / 255, 4 / 255),
                                     alpha=a,
                                     fill=False)
            ax.add_patch(rect)

        a += .08  # .2

    # cv2
    # img = cv2.rectangle(img, (pred[0], pred[1]), (pred[0] + pred[2], pred[1] + pred[3]), (255, 97, 0), 2)
    # img = cv2.rectangle(img, (gt[0], gt[1]), (gt[0] + gt[2], gt[1] + gt[3]), (0, 255, 0), 2)

    # matplotlib
    true_rect = patches.Rectangle((gt[0], gt[1]),
                                  gt[2],
                                  gt[3],
                                  linewidth=2,
                                  edgecolor='w',
                                  fill=False)
    ax.add_patch(true_rect)
    gen_rect = patches.Rectangle((pred[0], pred[1]),
                                 pred[2],
                                 pred[3],
                                 linestyle='--',
                                 linewidth=2,
                                 edgecolor='r',
                                 fill=False)  #(0/255, 72/255, 255/255)
    ax.add_patch(gen_rect)

    if heatmap is not None:
        # print("HEATMAP", heatmap.shape)
        # print("IMG", img.shape)

        # cv2
        # heatmap = cv2.convertScaleAbs(heatmap)
        # heatmap = cv2.cvtColor(heatmap, cv2.COLOR_GRAY2RGB)
        # cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
        # cv2.addWeighted(heatmap, 0.6, img, 0.4, 0, img)

        # matplotlib
        viridis = cm.get_cmap('viridis', 256)
        # gist_heat = cm.get_cmap('gist_heat', 256)

        # heatmap = np.where(heatmap != 0, np.log2(heatmap), 0)
        # ax.imshow(heatmap, cmap=viridis, alpha=0.75)
        # plt.show()

        heatmap = np.sqrt(heatmap)
        ax.imshow(heatmap, cmap=viridis, alpha=0.75)

    plt.axis('off')
    # ax.set_title("sigma_x: {:0.3f}, sigma_y: {:0.3f}, sigma_w: {:0.3f}, sigma_h: {:0.3f}".format(sigma[0], sigma[1], sigma[2], sigma[3]), fontsize=8)
    plt.tight_layout()
    # plt.show()

    if display:
        cv2.imshow('ImageWindow', img)
        cv2.waitKey()
    else:
        # cv2
        # cv2.imwrite(os.path.join(output_dir, seq_name+'_'+set_info[2]+'_'+set_info[3]+'.png'), img)

        # matplotlib
        output_file = os.path.join(
            output_dir,
            seq_name + '_' + set_info[2] + '_' + set_info[3] + '_noheat')
        # plt.savefig(output_file + '.png', format='png')
        # plt.savefig(output_file + '.eps', format='eps', dpi=1000)
        plt.savefig(output_file + '.pdf', format='pdf', dpi=1000)
    return
Esempio n. 8
0
def training_steps_GAN(x_train, x_val, y_train, y_val, train_info, val_info, model_components):
    """ """
    [model_name, starting_step, data_cols,
     label_cols, label_dim,
     generator_model, discriminator_model, combined_model,
     epochs, batch_size, k_d, k_g,
     show, output_dir] = model_components
    steps_per_epoch = len(x_train) // batch_size
    nb_steps = steps_per_epoch*epochs
    val_input = x_val.reshape((len(y_train), 40))
    val_target = y_val

    # Store loss values for returning.
    G_losses = np.empty((nb_steps, 3))          # [g_loss, g_loss_adv, smoothL1]
    D_losses = np.zeros((nb_steps, 3))          # [D_loss, D_loss_real, D_loss_fake]
    val_losses = np.empty((epochs, 3))
    train_ious = np.empty(nb_steps)
    val_ious = np.empty(epochs)
    train_des = np.zeros(nb_steps)
    val_des = np.zeros(epochs)

    # Store average discrim prediction for generated and real samples every epoch.
    avg_gen_pred, avg_real_pred = [], []

    if not os.path.exists(os.path.join(output_dir, 'weights')):
        os.makedirs(os.path.join(output_dir, 'weights'))
    lossFile = open(os.path.join(output_dir, 'losses.txt'), 'w')

    # Log model structure to json
    with open(os.path.join(output_dir, 'D_model.json'), "w") as f:
        f.write(discriminator_model.to_json(indent=4))
    with open(os.path.join(output_dir, 'G_model.json'), "w") as f:
        f.write(generator_model.to_json(indent=4))

    # # PRETRAIN D
    # for i in range(10*steps_per_epoch):
    #     batch = data_extract_1obj.get_batch(train_data, batch_size)
    #     gen_input = batch[:, :10*4]  # Only keep first 10 bounding boxes for gen input (11th is the target)

    #     g_z = generator_model.predict(gen_input)
    #     g_z = np.concatenate((gen_input, g_z), axis=1)

    #     ### TRAIN ON REAL (y = 1) w/ noise
    #     discriminator_model.train_on_batch(batch, np.random.uniform(low=0.999, high=1.0, size=batch_size))      # 0.7, 1.2 GANs need noise to prevent loss going to zero

    #     ### TRAIN ON GENERATED (y = 0) w/ noise
    #     discriminator_model.train_on_batch(g_z, np.random.uniform(low=0.0, high=0.001, size=batch_size))    # 0.0, 0.3
    #     if not i % steps_per_epoch:
    #         print(i // steps_per_epoch)

    for i in range(1, nb_steps+1):  # range(1, nb_steps+1)
        K.set_learning_phase(1)

        # TRAIN DISCRIMINATOR on real and generated images
        # for _ in range(k_d):
        #     batch = data_extract_1obj.get_batch(train_data, batch_size)
        #     gen_input = batch[:, :10*4]  # Only keep first 10 bounding boxes for gen input (11th is the target)

        #     g_z = generator_model.predict(gen_input)
        #     g_z = np.concatenate((gen_input, g_z), axis=1)

        #     ### TRAIN ON REAL (y = 1) w/ noise
        #     D_loss_real = discriminator_model.train_on_batch(batch, np.random.uniform(low=0.999, high=1.0, size=batch_size))      # 0.7, 1.2 GANs need noise to prevent loss going to zero

        #     ### TRAIN ON GENERATED (y = 0) w/ noise
        #     D_loss_fake = discriminator_model.train_on_batch(g_z, np.random.uniform(low=0.0, high=0.001, size=batch_size))    # SIGMIOD # 0.0, 0.3
        #     # D_loss_fake = discriminator_model.train_on_batch(g_z, np.random.uniform(low=-1.0, high=-0.999, size=batch_size))    # TANH
        #     D_loss = 0.5 * np.add(D_loss_real, D_loss_fake)

        # Only keep most recent loss value (if k_d > 1)
        # D_losses[i-1] = np.array([D_loss, D_loss_real, D_loss_fake])

        # TRAIN GENERATOR on real inputs and outputs
        for _ in range(k_g):
            batch_ids = data_extract_1obj.get_batch_ids(len(x_train), batch_size)
            gen_input = x_train[batch_ids].reshape((batch_size, 40))
            gen_target = y_train[batch_ids]

            # gen_input = batch[:, :10*4]  # Only keep first 10 bounding boxes for gen input (11th is the target)
            # gen_target = batch[:, -4:]  # Get last (target) bounding box

            ### TRAIN (y = 1) bc want pos feedback for tricking discrim (want discrim to output 1)
            G_loss = combined_model.train_on_batch(gen_input, {'discriminator': np.random.uniform(low=0.999, high=1.0, size=batch_size),
                                                               'generator': gen_target})
            coeffs = G_loss[4]

            # Calculate IoU and DE metrics.
            batch_ious = np.empty(batch_size)
            batch_des = np.empty(batch_size)
            for j in range(batch_size):
                batch_ious[j], batch_des[j] = calc_metrics_polynomial(gen_input[j][-4:], gen_target[j][:, 9], coeffs[j])

            avg_iou = np.mean(batch_ious)
            avg_de = np.mean(batch_des)

        G_losses[i-1] = G_loss[:3]
        train_ious[i-1] = avg_iou
        train_des[i-1] = avg_de

        # Evaluate on validation / Save weights / Log loss every epoch
        if not i % steps_per_epoch:
            K.set_learning_phase(0)
            epoch = i // steps_per_epoch

            # Evaluate on validation set
            num_val_samples = len(val_input)
            val_loss = combined_model.test_on_batch(val_input, {'discriminator': np.random.uniform(low=0.999, high=1.0, size=num_val_samples),
                                                                'generator': val_target})
            y_preds = val_loss[4]

            val_batch_ious = np.empty(num_val_samples)
            val_batch_des = np.empty(num_val_samples)
            for j in range(num_val_samples):
                val_batch_ious[j], val_batch_des[j] = calc_metrics_polynomial(val_input[j][-4:], val_target[j][:, 9], coeffs[j])

            # Print first sample.
            t_bb = data_extract_1obj.transform(val_input[0][-4:], val_target[0][:, 9])
            t_bb = data_extract_1obj.unnormalize_bb(t_bb, sample_set=None)
            g_bb = data_extract_1obj.transform(val_input[0][-4:], y_preds[0][:, 9])
            g_bb = data_extract_1obj.unnormalize_bb(g_bb, sample_set=None)
            print("proposal: ", g_bb)
            print("target: ", t_bb)

            val_avg_iou = np.mean(val_batch_ious)
            val_avg_de = np.mean(val_batch_des)

            val_losses[epoch-1] = val_loss[:3]
            val_ious[epoch-1] = val_avg_iou
            val_des[epoch-1] = val_avg_de

            # Evaluate discriminator predictions
            # a_g_p, a_r_p = test_discrim(train_data, generator_model, discriminator_model, combined_model)
            # avg_gen_pred.append(a_g_p)
            # avg_real_pred.append(a_r_p)

            # Log loss info to console / file
            print('Epoch: {} of {}'.format(epoch, nb_steps // steps_per_epoch))
            print('D_losses: {}'.format(D_losses[i-1]))
            print('G_losses: {}'.format(G_losses[i-1]))
            print('val_losses: {}'.format(val_losses[epoch-1]))
            print('ious: {}, {}'.format(train_ious[i-1], val_ious[epoch-1]))
            print('des: {}, {}'.format(train_des[i-1], val_des[epoch-1]))
            # print('avg_gen_pred: {} | avg_real_pred: {}\n'.format(a_g_p, a_r_p))

            lossFile.write('Epoch: {} of {}.\n'.format(epoch, nb_steps // steps_per_epoch))
            lossFile.write('D_losses: {}\n'.format(D_losses[i-1]))
            lossFile.write('G_losses: {}\n'.format(G_losses[i-1]))
            lossFile.write('val_losses: {}\n'.format(val_losses[epoch-1]))
            lossFile.write('ious: {}, {}\n'.format(train_ious[i-1], val_ious[epoch-1]))
            lossFile.write('des: {}, {}\n'.format(train_des[i-1], val_des[epoch-1]))
            # lossFile.write('avg_gen_pred: {} | avg_real_pred: {}\n\n'.format(a_g_p, a_r_p))

            # Checkpoint: Save model weights
            model_checkpoint_base_name = output_dir + 'weights\\{}_weights_epoch-{}.h5'
            # os.path.join(output_dir, 'weights') ????????
            generator_model.save_weights(model_checkpoint_base_name.format('gen', epoch))
            discriminator_model.save_weights(model_checkpoint_base_name.format('discrim', epoch))

    return [G_losses, D_losses, val_losses, train_ious, val_ious, train_des, val_des, avg_gen_pred, avg_real_pred]