Exemplo n.º 1
0
def eval_cues_adp(model_type, sess_id, batch_size, size, set_name,
                  should_saveimg, is_verbose):
    """Evaluate weak segmentation cues for ADP (helper function)

    Parameters
    ----------
    model_type : str
        The name of the model to use for generating cues (i.e. 'M1', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'X1.7', 'M7',
        'M7bg', 'VGG16', or 'VGG16bg')
    sess_id : str
        The identifying string for the current session
    batch_size : int
        The batch size (>0)
    size : int
        The length of the resized input image
    set_name : str, optional
        The name of the name of the evaluation set, if ADP (i.e. 'tuning' or 'segtest')
    should_saveimg : bool, optional
        Whether to save debug images
    is_verbose : bool, optional
        Whether to activate message verbosity
    """

    if is_wpt:
        pt_str = ''
    else:
        pt_str = '_npt'
    ac = ADPCues('ADP_' + model_type + pt_str,
                 batch_size,
                 size,
                 model_dir=MODEL_ROOT,
                 data_dir=DATA_ROOT,
                 first_inds=first_inds)
    seed_size = 41
    OVERLAY_R = 0.75

    # Load network and thresholds
    if should_saveimg:
        out_dirs = {}
        for htt_class in ['morph', 'func']:
            out_dirs[htt_class] = os.path.join(OUT_ROOT, sess_id, htt_class)
            makedir_if_nexist([out_dirs[htt_class]])
    ac.build_model()

    # Load images
    if not os.path.exists('data'):
        os.makedirs('data')
    if is_verbose:
        print('\tGetting Grad-CAM weights for given network')
    alpha = ac.get_grad_cam_weights(np.zeros((1, size, size, 3)))

    # Read in image names
    img_names = ac.get_img_names(set_name)

    # Process images in batches
    n_batches = len(img_names) // ac.batch_size + 1
    for iter_batch in range(n_batches):
        start_time = time.time()
        if is_verbose:
            print('\tBatch #%d of %d' % (iter_batch + 1, n_batches))
        start_idx = iter_batch * ac.batch_size
        end_idx = min(start_idx + ac.batch_size - 1, len(img_names) - 1)
        cur_batch_sz = end_idx - start_idx + 1
        img_batch_norm, img_batch = ac.read_batch(img_names[start_idx:end_idx +
                                                            1])
        # Determine passing classes
        predicted_scores = ac.model.predict(img_batch_norm)
        is_pass_threshold = np.greater_equal(predicted_scores, ac.thresholds)

        # Generate Grad-CAM
        H = ac.grad_cam(alpha, img_batch_norm, is_pass_threshold)
        H = np.transpose(H, (0, 3, 1, 2))
        H = resize_stack(H, (seed_size, seed_size))

        # Split Grad-CAM into {morph, func}
        H_split = {}
        H_split['morph'], H_split['func'] = ac.split_by_httclass(H)
        is_pass = {}
        is_pass['morph'], is_pass['func'] = ac.split_by_httclass(
            is_pass_threshold)

        # Modify Grad-CAM for each HTT type separately
        seeds = {}
        for htt_class in ['morph', 'func']:
            seeds[htt_class] = np.zeros(
                (cur_batch_sz, len(ac.classes['valid_' + htt_class]),
                 seed_size, seed_size))
            seeds[htt_class][:,
                             ac.classinds[htt_class +
                                          '2valid']] = H[:, ac.
                                                         classinds['all2' +
                                                                   htt_class]]
            class_inds = [
                ac.classinds_arr[htt_class + '2valid'][is_pass[htt_class][i]]
                for i in range(cur_batch_sz)
            ]

            # Modify heatmaps
            if htt_class == 'morph':
                seeds[htt_class] = ac.modify_by_htt(
                    seeds[htt_class], img_batch,
                    ac.classes['valid_' + htt_class])
            elif htt_class == 'func':
                class_inds = [np.append(1, x) for x in class_inds]
                adipose_inds = [
                    i for i, x in enumerate(ac.classes['morph'])
                    if x in ['A.W', 'A.B', 'A.M']
                ]
                gradcam_adipose = seeds['morph'][:, adipose_inds]
                seeds[htt_class] = ac.modify_by_htt(
                    seeds[htt_class],
                    img_batch,
                    ac.classes['valid_' + htt_class],
                    gradcam_adipose=gradcam_adipose)
            # Update cues
            ac.update_cues(seeds[htt_class], class_inds, htt_class,
                           list(range(start_idx, end_idx + 1)), thresh)

            # Load GT segmentation images
            gt_batch = ac.read_gt_batch(htt_class,
                                        img_names[start_idx:end_idx + 1])
            # Process images one at a time
            for j in range(cur_batch_sz):
                # Separate GT segmentation images into R, G, B channels
                gt_r = gt_batch[j, :, :, 0]
                gt_g = gt_batch[j, :, :, 1]
                gt_b = gt_batch[j, :, :, 2]
                # Load predicted segmentations
                cues_i = ac.cues[htt_class]['%s_cues' % (start_idx + j)]
                cues = np.zeros(
                    (seed_size, seed_size, len(ac.colours[htt_class])))
                cues[cues_i[1], cues_i[2], cues_i[0]] = 1.0
                pred_segmask = np.zeros((size, size, 3))
                # Evaluate predicted segmentations
                for k, gt_colour in enumerate(ac.colours[htt_class]):
                    gt_mask = (gt_r == gt_colour[0]) & (
                        gt_g == gt_colour[1]) & (gt_b == gt_colour[2])
                    pred_mask = cv2.resize(
                        cues[:, :, k], (size, size),
                        interpolation=cv2.INTER_NEAREST) == 1.0
                    pred_segmask += np.expand_dims(pred_mask, axis=2) * \
                                    np.expand_dims(np.expand_dims(gt_colour, axis=0), axis=0)
                    ac.intersects[htt_class][k] += np.sum(gt_mask & pred_mask)
                    ac.unions[htt_class][k] += np.sum(gt_mask | pred_mask)
                    ac.predicted_totals[htt_class][k] += np.sum(pred_mask)
                    ac.gt_totals[htt_class][k] += np.sum(gt_mask)
                # Save debugging images to file
                if should_saveimg:
                    imgio.imsave(
                        os.path.join(
                            out_dirs[htt_class],
                            os.path.splitext(img_names[start_idx + j])[0] +
                            '.png'), np.uint8(pred_segmask))
                    imgio.imsave(
                        os.path.join(
                            out_dirs[htt_class],
                            os.path.splitext(img_names[start_idx + j])[0] +
                            '_overlay.png'),
                        np.uint8((1 - OVERLAY_R) * img_batch[j] +
                                 OVERLAY_R * pred_segmask))
        elapsed_time = time.time() - start_time
        if is_verbose:
            print('\t\tElapsed time: %s seconds (%s seconds/image)' %
                  (elapsed_time, elapsed_time / cur_batch_sz))
    # Calculate IoU, mIoU metrics
    iou = {}
    precision = {}
    recall = {}
    miou = {}
    mean_prec = {}
    mean_recall = {}
    for htt_class in ['morph', 'func']:
        iou[htt_class] = ac.intersects[htt_class] / ac.unions[htt_class]
        precision[htt_class] = ac.intersects[htt_class] / (
            ac.gt_totals[htt_class] + 1e-5)
        recall[htt_class] = ac.intersects[htt_class] / (
            ac.predicted_totals[htt_class] + 1e-5)
        miou[htt_class] = np.mean(iou[htt_class])
        mean_prec[htt_class] = np.mean(precision[htt_class])
        mean_recall[htt_class] = np.mean(recall[htt_class])

        if is_verbose:
            print('\tmIoU (%s): %s' % (htt_class, miou[htt_class]))

        eval_dir = os.path.join(EVAL_CUES_ROOT, sess_id)
        makedir_if_nexist([eval_dir])
        # Save to .xlsx metrics file
        df = pd.DataFrame(
            {
                'Class': ac.classes['valid_' + htt_class] + ['Mean'],
                'IoU': list(iou[htt_class]) + [miou[htt_class]],
                'Precision':
                list(precision[htt_class]) + [mean_prec[htt_class]],
                'Recall': list(recall[htt_class]) + [mean_recall[htt_class]]
            },
            columns=['Class', 'IoU', 'Precision', 'Recall'])
        xlsx_path = os.path.join(
            eval_dir, 'metrics_ADP-' + htt_class + '_' + set_name + '_' +
            model_type + '.xlsx')
        df.to_excel(xlsx_path)
Exemplo n.º 2
0
def segment_adp(sess_id, model_type, batch_size, size, set_name,
                should_saveimg, is_verbose):
    """Predict segmentation on requested dataset using the provided seeding model with HistoSegNet, for ADP

    Parameters
    ----------
    sess_id : str
        The identifying string for the current session
    model_type : str
        The name of the model to use for generating cues (i.e. 'M1', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'X1.7', 'M7',
        'M7bg', 'VGG16', or 'VGG16bg')
    batch_size : int
        The batch size (>0)
    size : int
        The length of the resized input image
    set_name : str, optional
        The name of the name of the evaluation set, if ADP (i.e. 'tuning' or 'segtest')
    should_saveimg : bool, optional
        Whether to save debug images
    is_verbose : bool, optional
        Whether to activate message verbosity
    """

    ac = ADPCues(sess_id, batch_size, size, model_dir=MODEL_CNN_ROOT)
    OVERLAY_R = 0.75

    # Load network and thresholds
    ac.build_model()

    # Load images
    if is_verbose:
        print('\tGetting Grad-CAM weights for given network')
    alpha = ac.get_grad_cam_weights(np.zeros((1, size, size, 3)))

    # Read in image names
    img_names = ac.get_img_names(set_name)

    # Process images in batches
    confusion_matrix = {}
    gt_count = {}
    out_dirs = {}
    for htt_class in ['morph', 'func']:
        confusion_matrix[htt_class] = np.zeros(
            (len(ac.classes['valid_' + htt_class]),
             len(ac.classes['valid_' + htt_class])))
        gt_count[htt_class] = np.zeros((len(ac.classes['valid_' + htt_class])))
        out_dirs[htt_class] = os.path.join(
            'out', 'ADP-' + htt_class + '_' + set_name + '_' + model_type)
        if not os.path.exists(out_dirs[htt_class]):
            os.makedirs(out_dirs[htt_class])
    n_batches = len(img_names) // batch_size + 1
    for iter_batch in range(n_batches):
        batch_start_time = time.time()
        if is_verbose:
            print('\tBatch #%d of %d' % (iter_batch + 1, n_batches))
        start_idx = iter_batch * batch_size
        end_idx = min(start_idx + batch_size - 1, len(img_names) - 1)
        cur_batch_sz = end_idx - start_idx + 1

        # Image reading
        start_time = time.time()
        img_batch_norm, img_batch = ac.read_batch(img_names[start_idx:end_idx +
                                                            1])
        if is_verbose:
            print(
                '\t\tImage read time: %0.5f seconds (%0.5f seconds / image)' %
                (time.time() - start_time,
                 (time.time() - start_time) / cur_batch_sz))

        # Generate patch confidence scores
        start_time = time.time()
        predicted_scores = ac.model.predict(img_batch_norm)
        is_pass_threshold = np.greater_equal(predicted_scores, ac.thresholds)
        if is_verbose:
            print(
                '\t\tGenerating patch confidence scores time: %0.5f seconds (%0.5f seconds / image)'
                % (time.time() - start_time,
                   (time.time() - start_time) / cur_batch_sz))

        # Generate Grad-CAM
        start_time = time.time()
        H = grad_cam(ac.model,
                     alpha,
                     img_batch_norm,
                     is_pass_threshold,
                     ac.final_layer,
                     predicted_scores,
                     orig_sz=[size, size],
                     should_upsample=True)
        H = np.transpose(H, (0, 3, 1, 2))
        # Split Grad-CAM into {morph, func}
        H_split = {}
        H_split['morph'], H_split['func'] = split_by_httclass(
            H, ac.classes['all'], ac.classes['morph'], ac.classes['func'])
        is_pass = {}
        is_pass['morph'], is_pass['func'] = split_by_httclass(
            is_pass_threshold, ac.classes['all'], ac.classes['morph'],
            ac.classes['func'])
        if is_verbose:
            print(
                '\t\tGenerating Grad-CAM time: %0.5f seconds (%0.5f seconds / image)'
                % (time.time() - start_time,
                   (time.time() - start_time) / cur_batch_sz))

        # Modify Grad-CAM for each HTT type separately
        Y_gradcam = {}
        Y_csgc = {}
        Y_crf = {}
        for htt_class in ['morph', 'func']:
            Y_gradcam[htt_class] = np.zeros(
                (cur_batch_sz, len(ac.classes['valid_' + htt_class]), size,
                 size))
            Y_gradcam[htt_class][:, ac.classinds[
                htt_class + '2valid']] = H[:, ac.classinds['all2' + htt_class]]

            # Inter-HTT Adjustments
            start_time = time.time()
            if htt_class == 'morph':
                Y_gradcam[htt_class] = modify_by_htt(
                    Y_gradcam[htt_class], img_batch,
                    ac.classes['valid_' + htt_class])
            elif htt_class == 'func':
                adipose_inds = [
                    i for i, x in enumerate(ac.classes['morph'])
                    if x in ['A.W', 'A.B', 'A.M']
                ]
                gradcam_adipose = Y_gradcam['morph'][:, adipose_inds]
                Y_gradcam[htt_class] = modify_by_htt(
                    Y_gradcam[htt_class],
                    img_batch,
                    ac.classes['valid_' + htt_class],
                    gradcam_adipose=gradcam_adipose)
            Y_csgc[htt_class] = get_cs_gradcam(
                Y_gradcam[htt_class], ac.classes['valid_' + htt_class],
                htt_class)
            if is_verbose:
                print(
                    '\t\t\tInter-HTT adjustments time [%s]: %0.5f seconds (%0.5f seconds / image)'
                    % (htt_class, time.time() - start_time,
                       (time.time() - start_time) / cur_batch_sz))

            # FC-CRF
            start_time = time.time()
            dcrf_config = np.load(
                os.path.join(MODEL_WSSS_ROOT,
                             htt_class + '_optimal_pcc.npy'))[0]
            Y_crf[htt_class] = dcrf_process(Y_csgc[htt_class], img_batch,
                                            dcrf_config)
            if is_verbose:
                print(
                    '\t\t\tCRF time [%s]: %0.5f seconds (%0.5f seconds / image)'
                    % (htt_class, time.time() - start_time,
                       (time.time() - start_time) / cur_batch_sz))

            # Update evaluation performance
            _, gt_batch = read_batch(
                os.path.join(ac.gt_root, 'ADP-' + htt_class),
                img_names[start_idx:end_idx + 1], cur_batch_sz, [1088, 1088],
                'ADP')
            for iter_img in range(cur_batch_sz):
                # Load GT segmentation
                gt_r = gt_batch[iter_img][:, :, 0]
                gt_g = gt_batch[iter_img][:, :, 1]
                gt_b = gt_batch[iter_img][:, :, 2]
                # Load predicted segmentation
                pred_idx = cv2.resize(Y_crf[htt_class][iter_img],
                                      dsize=(1088, 1088),
                                      interpolation=cv2.INTER_NEAREST)
                pred_segmask = np.zeros((1088, 1088, 3))
                # Evaluate predicted segmentation
                for k, gt_colour in enumerate(ac.colours[htt_class]):
                    gt_mask = (gt_r == gt_colour[0]) & (
                        gt_g == gt_colour[1]) & (gt_b == gt_colour[2])
                    pred_mask = pred_idx == k
                    confusion_matrix[htt_class][k, :] += np.bincount(
                        pred_idx[gt_mask],
                        minlength=len(ac.classes['valid_' + htt_class]))
                    ac.intersects[htt_class][k] += np.sum(gt_mask & pred_mask)
                    ac.unions[htt_class][k] += np.sum(gt_mask | pred_mask)
                    pred_segmask += np.expand_dims(pred_mask, axis=2) * \
                                    np.expand_dims(np.expand_dims(ac.colours[htt_class][k], axis=0), axis=0)
                    gt_count[htt_class][k] += np.sum(gt_mask)
                # Save outputted segmentation to file
                if should_saveimg:
                    orig_filepath = os.path.join(
                        ac.img_dir, img_names[start_idx + iter_img])
                    orig_img = cv2.cvtColor(cv2.imread(orig_filepath),
                                            cv2.COLOR_BGR2RGB)
                    pred_segmask_small = cv2.resize(
                        pred_segmask, (orig_img.shape[0], orig_img.shape[1]),
                        interpolation=cv2.INTER_NEAREST)
                    imgio.imsave(
                        os.path.join(
                            out_dirs[htt_class],
                            img_names[start_idx + iter_img].replace(
                                '.png', '') + '.png'),
                        pred_segmask_small / 256.0)
                    imgio.imsave(
                        os.path.join(
                            out_dirs[htt_class],
                            img_names[start_idx + iter_img].replace(
                                '.png', '') + '_overlay.png'),
                        (1 - OVERLAY_R) * orig_img / 256.0 +
                        OVERLAY_R * pred_segmask_small / 256.0)
        elapsed_time = time.time() - batch_start_time
        if is_verbose:
            print('\tElapsed time: %0.5f seconds (%0.5f seconds / image)' %
                  (elapsed_time, elapsed_time / cur_batch_sz))

    for htt_class in ['morph', 'func']:
        # Evaluate mIoU and write to .xlsx file
        mIoU = np.mean(ac.intersects[htt_class] /
                       (ac.unions[htt_class] + 1e-7))
        df = pd.DataFrame(
            {
                'Class':
                ac.classes['valid_' + htt_class] + ['Mean'],
                'IoU':
                list(ac.intersects[htt_class] /
                     (ac.unions[htt_class] + 1e-7)) + [mIoU]
            },
            columns=['Class', 'IoU'])
        eval_dir = os.path.join(
            'eval', 'ADP-' + htt_class + '_' + set_name + '_' + model_type)
        if not os.path.exists(eval_dir):
            os.makedirs(eval_dir)
        xlsx_path = os.path.join(
            eval_dir, 'metrics_ADP-' + htt_class + '_' + model_type + '.xlsx')
        df.to_excel(xlsx_path)

        # Generate confusion matrix for all classes and write to .png file
        count_mat = np.transpose(
            np.matlib.repmat(gt_count[htt_class],
                             len(ac.classes['valid_' + htt_class]), 1))
        title = "Confusion matrix\n"
        xlabel = 'Prediction'  # "Labels"
        ylabel = 'Ground-Truth'  # "Labels"
        xticklabels = ac.classes['valid_' + htt_class]
        yticklabels = ac.classes['valid_' + htt_class]
        heatmap(confusion_matrix[htt_class] / (count_mat + 1e-7),
                title,
                xlabel,
                ylabel,
                xticklabels,
                yticklabels,
                rot_angle=45)
        plt.savefig(os.path.join(
            eval_dir,
            'confusion_ADP-' + htt_class + '_' + model_type + '.png'),
                    dpi=96,
                    format='png',
                    bbox_inches='tight')
        plt.close()

        # Generate confusion matrix for only foreground classes and write to .png file
        title = "Confusion matrix\n"
        xlabel = 'Prediction'  # "Labels"
        ylabel = 'Ground-Truth'  # "Labels"
        if htt_class == 'morph':
            xticklabels = ac.classes['valid_' + htt_class][1:]
            yticklabels = ac.classes['valid_' + htt_class][1:]
            heatmap(confusion_matrix[htt_class][1:, 1:] /
                    (count_mat[1:, 1:] + 1e-7),
                    title,
                    xlabel,
                    ylabel,
                    xticklabels,
                    yticklabels,
                    rot_angle=45)
        elif htt_class == 'func':
            xticklabels = ac.classes['valid_' + htt_class][2:]
            yticklabels = ac.classes['valid_' + htt_class][2:]
            heatmap(confusion_matrix[htt_class][2:, 2:] /
                    (count_mat[2:, 2:] + 1e-7),
                    title,
                    xlabel,
                    ylabel,
                    xticklabels,
                    yticklabels,
                    rot_angle=45)
        plt.savefig(os.path.join(
            eval_dir,
            'confusion_fore_ADP-' + htt_class + '_' + model_type + '.png'),
                    dpi=96,
                    format='png',
                    bbox_inches='tight')
        plt.close()
Exemplo n.º 3
0
def gen_cues_adp(model_type, thresh, batch_size, size, cues_dir, set_name,
                 is_verbose):
    """Generate weak segmentation cues for ADP (helper function)

    Parameters
    ----------
    model_type : str
        The name of the model to use for generating cues (i.e. 'M1', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'X1.7', 'M7',
        'M7bg', 'VGG16', or 'VGG16bg')
    thresh: float
        Confidence value for thresholding activation maps [0-1]
    batch_size : int
        The batch size (>0)
    size : int
        The length of the resized input image
    cues_dir : str
        The directory to save the cues to
    set_name : str
        The name of the name of the evaluation set (i.e. 'tuning' or 'segtest')
    is_verbose : bool, optional
        Whether to activate message verbosity
    """

    if is_wpt:
        pt_str = ''
    else:
        pt_str = '_npt'
    ac = ADPCues('ADP_' + model_type + pt_str,
                 batch_size,
                 size,
                 model_dir=MODEL_ROOT,
                 data_dir=DATA_ROOT,
                 first_inds=first_inds)
    seed_size = 41

    # Load network and thresholds
    cues_dirs = {}
    for htt_class in ['morph', 'func']:
        cues_dirs[htt_class] = os.path.join(cues_dir, htt_class)
        makedir_if_nexist([cues_dirs[htt_class]])
    ac.build_model()

    # Load Grad-CAM weights
    if is_verbose:
        print('\tGetting Grad-CAM weights for given network')
    alpha = ac.get_grad_cam_weights(np.zeros((1, size, size, 3)))

    # Read in image names
    img_names = ac.get_img_names(set_name)

    # Process images in batches
    n_batches = len(img_names) // batch_size + 1
    for iter_batch in range(n_batches):
        start_time = time.time()
        if is_verbose:
            print('\tBatch #%d of %d' % (iter_batch + 1, n_batches))
        start_idx = iter_batch * batch_size
        end_idx = min(start_idx + batch_size - 1, len(img_names) - 1)
        cur_batch_sz = end_idx - start_idx + 1
        img_batch_norm, img_batch = ac.read_batch(img_names[start_idx:end_idx +
                                                            1])

        # Determine passing classes
        predicted_scores = ac.model.predict(img_batch_norm)
        is_pass_threshold = np.greater_equal(predicted_scores, ac.thresholds)

        # Generate Grad-CAM
        H = ac.grad_cam(alpha, img_batch_norm, is_pass_threshold)
        H = np.transpose(H, (0, 3, 1, 2))
        H = resize_stack(H, (seed_size, seed_size))

        # Split Grad-CAM into {morph, func}
        H_split = {}
        H_split['morph'], H_split['func'] = ac.split_by_httclass(H)
        is_pass = {}
        is_pass['morph'], is_pass['func'] = ac.split_by_httclass(
            is_pass_threshold)

        # Modify Grad-CAM for each HTT type separately
        seeds = {}
        for htt_class in ['morph', 'func']:
            seeds[htt_class] = np.zeros(
                (cur_batch_sz, len(ac.classes['valid_' + htt_class]),
                 seed_size, seed_size))
            seeds[htt_class][:,
                             ac.classinds[htt_class +
                                          '2valid']] = H[:, ac.
                                                         classinds['all2' +
                                                                   htt_class]]
            class_inds = [
                ac.classinds_arr[htt_class + '2valid'][is_pass[htt_class][i]]
                for i in range(cur_batch_sz)
            ]

            # Modify heatmaps
            if htt_class == 'morph':
                seeds[htt_class] = ac.modify_by_htt(
                    seeds[htt_class], img_batch,
                    ac.classes['valid_' + htt_class])
            elif htt_class == 'func':
                class_inds = [np.append(1, x) for x in class_inds]
                adipose_inds = [
                    i for i, x in enumerate(ac.classes['morph'])
                    if x in ['A.W', 'A.B', 'A.M']
                ]
                gradcam_adipose = seeds['morph'][:, adipose_inds]
                seeds[htt_class] = ac.modify_by_htt(
                    seeds[htt_class],
                    img_batch,
                    ac.classes['valid_' + htt_class],
                    gradcam_adipose=gradcam_adipose)

            # Update localization cues
            ac.update_cues(seeds[htt_class], class_inds, htt_class,
                           list(range(start_idx, end_idx + 1)), thresh)
        elapsed_time = time.time() - start_time
        if is_verbose:
            print('\t\tElapsed time: %s seconds (%s seconds/image)' %
                  (elapsed_time, elapsed_time / cur_batch_sz))
    # Save localization cues
    if is_verbose:
        print('\tSaving localization cues')
    pickle.dump(
        ac.cues['morph'],
        open(os.path.join(cues_dirs['morph'], 'localization_cues.pickle'),
             'wb'))
    pickle.dump(
        ac.cues['func'],
        open(os.path.join(cues_dirs['func'], 'localization_cues.pickle'),
             'wb'))