Exemplo n.º 1
0
def prefetch_test(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpus)
    opt = opts().update_dataset_info_and_set_heads(opt, COCO)
    print(opt)
    Logger(opt)

    split = 'val' if not opt.trainval else 'test'
    dataset = COCO(opt, split)
    detector = CtdetDetector(opt)

    data_loader = torch.utils.data.DataLoader(PrefetchDataset(opt, dataset, pre_process),
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=1,
                                              pin_memory=True)

    results = {}
    num_iters = len(dataset)
    bar = Bar(f'{opt.exp_id}', max=num_iters)
    time_stats = ['tot', 'net', 'dec', 'post', 'merge']
    avg_time_stats = {t: AverageMeter() for t in time_stats}

    for i, (img_id, images) in enumerate(data_loader):
        ret = detector.run(images)
        results[img_id.numpy().astype(np.int32)[0]] = ret['results']
        bar.suffix = f'{i}/{num_iters}|Elapsed: {bar.elapsed_td} |ETA: {bar.eta_td} '

        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            bar.suffix = bar.suffix + '|{} {tm.val:.3f} ({tm.avg:.3f}) '.format(t, tm=avg_time_stats[t])
        bar.next()
    bar.finish()

    dataset.run_eval(results, opt.save_dir)
Exemplo n.º 2
0
    def fit(self, train_loader, valid_loader, start_epoch=0, max_epochs=200):

        best_acc = 0.
        bar = IncrementalBar(max=max_epochs - start_epoch)
        for e in range(start_epoch, max_epochs):
            bar.message = '{:>5.2f}%%'.format(bar.percent)
            bar.suffix = '{}/{} [{}<{}\t{:.2f}it/s]'.format(
                bar.index, bar.max, bar.elapsed_td, bar.eta_td, bar.avg)
            bar.next()
            if e == self.milestones[0]:
                schedule_lr(self.optimizer)  # update learning rate once
            if e == self.milestones[1]:
                schedule_lr(self.optimizer)
            if e == self.milestones[2]:
                schedule_lr(self.optimizer)
            self.train(train_loader, self.model, self.criterion,
                       self.optimizer, e)

            accuracy, best_threshold, roc_curve_tensor = self.evaluate(
                self.conf, *valid_loader['agedb_30'])
            self.board_val('agedb_30', accuracy, best_threshold,
                           roc_curve_tensor, e)
            if accuracy > best_acc:
                best_acc = accuracy
                save_checkpoint(self.model, self.optimizer, self.conf,
                                best_acc, e)
        bar.finish()
Exemplo n.º 3
0
 def load(self, fp):
     self.size = 0
     file_size = os.fstat(fp.fileno()).st_size
     nblocks = 1 + (file_size - 1) // self.blocksize
     bar = IncrementalBar('Computing', max=nblocks)
     bar.suffix = '%(percent).1f%% - %(eta)ds'
     for block in bar.iter(file_read_iterator(fp, self.blocksize)):
         self.append(self._hash_block(block))
         self.size += len(block)
Exemplo n.º 4
0
 def load(self, fp):
     self.size = 0
     file_size = os.fstat(fp.fileno()).st_size
     nblocks = 1 + (file_size - 1) // self.blocksize
     bar = IncrementalBar('Computing', max=nblocks)
     bar.suffix = '%(percent).1f%% - %(eta)ds'
     for block in bar.iter(file_read_iterator(fp, self.blocksize)):
         self.append(self._hash_block(block))
         self.size += len(block)
Exemplo n.º 5
0
def get_progress_bar(**kwargs):
    bar = IncrementalBar(**kwargs)
    bar.suffix = "%(index)d / %(max)d (%(elapsed_td)s)"

    return bar
    def _run_epoch(self,
                   model,
                   dataloader,
                   optimize=False,
                   save_activations=False,
                   reweight=None,
                   bit_pretrained=False,
                   adv_metrics=False):
        """Runs the model on a given dataloader.
        
        Note:
            The latter item in the returned tuple is what is necessary to run 
            GEORGECluster.train and GEORGECluster.evaluate.
        
        Args:
            model(nn.Module): A PyTorch model.
            dataloader(DataLoader): The dataloader. The dataset within must
                subclass GEORGEDataset.
            optimize(bool, optional): If True, the model is trained on self.criterion.
            save_activations(bool, optional): If True, saves the activations in
                `outputs`. Default is False.
            bit_pretrained(bool, optional): If True, assumes bit_pretrained and does not evaluate
                performance metrics
                
        Returns:
            metrics(Dict[str, Any]) A dictionary object that stores the metrics defined
                in self.config['metric_types'].
            outputs(Dict[str, Any]) A dictionary object that stores artifacts necessary
                for model analysis, including labels, activations, and predictions.
        """
        dataset = dataloader.dataset
        self._check_dataset(dataset)
        type_to_num_classes = {
            label_type: dataset.get_num_classes(label_type)
            for label_type in LABEL_TYPES
            if label_type in dataset.Y_dict.keys()
        }
        outputs = {
            'metrics': None,
            'activations': [],
            'superclass': [],
            'subclass': [],
            'true_subclass': [],
            'alt_subclass': [],
            'targets': [],
            'probs': [],
            'preds': [],
            'losses': [],
            'reweight': [],
        }
        activations_handle = self._init_activations_hook(
            model, outputs['activations'])
        if optimize:
            progress_prefix = 'Training'
            model.train()
        else:
            progress_prefix = 'Evaluation'
            model.eval()

        per_class_meters = self._init_per_class_meters(type_to_num_classes)
        metric_meters = {
            k: AverageMeter()
            for k in ['loss', 'acc', 'loss_rw', 'acc_rw']
        }

        progress = self.config['show_progress']
        if progress:
            bar = ProgressBar(progress_prefix, max=len(dataloader), width=50)

        for batch_idx, (inputs, targets) in enumerate(dataloader):
            batch_size = len(inputs)
            if self.use_cuda:
                inputs, targets = move_to_device([inputs, targets],
                                                 device=self.device)

            type_to_labels = {}
            for label_type in type_to_num_classes.keys():
                type_to_labels[label_type] = targets[label_type]
                outputs[label_type].append(targets[label_type])

            if optimize and not bit_pretrained:
                logits = model(inputs)
                loss_targets = targets['superclass']
                co = self.criterion(logits, loss_targets, targets['subclass'])
                loss, (losses, corrects), _ = co
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
            else:
                with torch.no_grad():
                    logits = model(inputs)
                    loss_targets = targets['superclass']
                    if bit_pretrained:
                        if progress:
                            bar.suffix = PROGRESS_BAR_SUFFIX.format(
                                batch=batch_idx + 1,
                                size=len(dataloader),
                                total=format_timedelta(bar.elapsed_td),
                                eta=format_timedelta(bar.eta_td),
                                **{k: 0
                                   for k in prog_metric_names})
                            bar.next()
                        continue
                    co = self.criterion(logits, loss_targets,
                                        targets['subclass'])
                    loss, (losses, corrects), _ = co

            if not save_activations:
                outputs['activations'].pop()  # delete activations

            reweight_vec = None if reweight is None else reweight[
                targets['true_subclass']]

            metrics = self._compute_progress_metrics(losses,
                                                     corrects,
                                                     type_to_labels,
                                                     type_to_num_classes,
                                                     per_class_meters,
                                                     reweight=reweight_vec)
            acc, preds = compute_accuracy(logits.data,
                                          loss_targets.data,
                                          return_preds=True)

            outputs['probs'].append(
                F.softmax(logits, dim=1).detach().cpu()[:, 1])
            outputs['preds'].append(preds)
            outputs['losses'].append(losses.detach().cpu())
            outputs['targets'].append(loss_targets.detach().cpu())
            if reweight_vec is not None:
                outputs['reweight'].append(reweight_vec.cpu())

            self._update_metrics(metric_meters, acc, loss, losses, corrects,
                                 batch_size, reweight_vec)

            PROGRESS_BAR_STR = PROGRESS_BAR_SUFFIX

            if self.compute_auroc:
                sub_map = dataloader.dataset.get_class_map('subclass')
                assert (set(sub_map.keys()) == {0,
                                                1})  # must be a binary problem
                targets_cat, probs_cat = torch.cat(
                    outputs['targets']), torch.cat(outputs['probs'])
                auroc = compute_roc_auc(targets_cat, probs_cat)
                metrics['auroc'] = auroc
                has_alt_subclass = 'alt_subclass' in dataloader.dataset.Y_dict
                for key in ['subclass', 'true_subclass'
                            ] + ['alt_subclass'] * has_alt_subclass:
                    sub_map = dataloader.dataset.get_class_map(key)
                    neg_subclasses = sub_map[0]
                    pos_subclasses = sub_map[1]
                    if len(neg_subclasses) == len(pos_subclasses) == 1:
                        # only one subclass in each superclass
                        rob_auroc = auroc
                    else:
                        subclass_labels = torch.cat(outputs[key])
                        paired_aurocs = []
                        for neg_subclass in neg_subclasses:
                            for pos_subclass in pos_subclasses:
                                inds = ((subclass_labels == neg_subclass) |
                                        (subclass_labels
                                         == pos_subclass)).cpu()
                                subset_pair_auroc = compute_roc_auc(
                                    targets_cat[inds], probs_cat[inds])
                                paired_aurocs.append(subset_pair_auroc)
                        rob_auroc = min(paired_aurocs)
                    metrics[f'{key}_rob_auroc'] = rob_auroc
                if not has_alt_subclass:
                    metrics[alt_subclass_rob_auroc] = auroc
                PROGRESS_BAR_STR += ' | AUROC: {auroc:.4f} | R AUROC: {subclass_rob_auroc:.4f} | ' \
                                    'TR AUROC: {true_subclass_rob_auroc:.4f} | AR AUROC: {alt_subclass_rob_auroc:.4f}'

            if progress:
                bar.suffix = PROGRESS_BAR_STR.format(
                    batch=batch_idx + 1,
                    size=len(dataloader),
                    total=format_timedelta(bar.elapsed_td),
                    eta=format_timedelta(bar.eta_td),
                    **{
                        **metrics,
                        **{k: v.avg
                           for k, v in metric_meters.items()}
                    })
                bar.next()
        if progress:
            bar.finish()
        if activations_handle:
            activations_handle.remove()

        for k, v in outputs.items():
            if type(v) == list and len(v) > 0:
                outputs[k] = concatenate_iterable(v)

        if bit_pretrained:
            return outputs['metrics'], outputs

        outputs['metrics'] = metrics
        outputs['metrics'].update(
            {k: float(v.avg)
             for k, v in metric_meters.items()})
        outputs['metrics'].update(self._compute_aggregate_metrics(outputs))
        self._print_output_metrics(outputs)

        if adv_metrics:
            scaa = np.mean([
                ga.avg * 100
                for ga in np.array(per_class_meters[f'per_true_subclass_accs'])
            ])
            self.logger.info(
                f'All accs: {[ga.avg * 100 for ga in np.array(per_class_meters[f"per_true_subclass_accs"])]}'
            )
            self.logger.info(f'SCAA: {scaa:.3f}')
            ap = sklearn.metrics.average_precision_score(
                outputs['targets'],
                outputs['probs'],
                sample_weight=outputs['reweight']
                if reweight_vec is not None else None)
            self.logger.info(f'MaP: {ap:.4f}')

        return outputs['metrics'], outputs
Exemplo n.º 7
0
def train_torch(model,
                loader,
                loss_fn,
                optimizer,
                epochs,
                use_cuda,
                classification=True,
                name='Train'):
    model.train()

    epoch = 0

    top1_accuracy = None
    top5_accuracy = None

    if classification:
        top1_accuracy = utils.AverageMeter()
        top5_accuracy = utils.AverageMeter()

    average_loss = utils.AverageMeter()

    bar = None

    if logger.getEffectiveLevel() == logging.INFO:
        bar = IncrementalBar(name, max=epochs * len(loader))

    for i in range(epochs):
        if bar is not None:
            bar.suffix = 'Epoch {}/{}'.format(i + 1, epochs)

        for images, targets in loader:
            torch_images = torch.from_numpy(images)

            torch_targets = torch.from_numpy(targets)
            if classification:
                # Torch uses long ints for the labels
                torch_targets = torch_targets.long()

            torch_images.requires_grad_()

            if use_cuda:
                torch_images = torch_images.cuda()
                torch_targets = torch_targets.cuda()

            # Compute the outputs
            outputs = model(torch_images)
            loss = loss_fn(outputs, torch_targets)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # Compute the statistics
            outputs = outputs.detach().cpu().numpy()
            loss = loss.detach().cpu().numpy()
            average_loss.update(loss, len(images))

            if classification:
                top1_count = utils.top_k_count(outputs, targets, k=1)
                top5_count = utils.top_k_count(outputs, targets, k=5)

                top1_accuracy.update(1, top1_count)
                top1_accuracy.update(0, len(images) - top1_count)
                top5_accuracy.update(1, top5_count)
                top5_accuracy.update(0, len(images) - top5_count)

            if bar is not None:
                bar.next()

        logger.debug('\n=========')
        logger.debug('Epoch {}'.format(epoch))
        logger.debug('=========\n')
        logger.debug('Average Loss: {:2.2e}'.format(average_loss.avg))

        if classification:
            logger.debug('Top-1 Accuracy: {:2.2f}%'.format(top1_accuracy.avg *
                                                           100.0))
            logger.debug('Top-5 Accuracy: {:2.2f}%'.format(top5_accuracy.avg *
                                                           100.0))
    if bar is not None:
        bar.finish()

    if classification:
        top1_accuracy = top1_accuracy.avg
        top5_accuracy = top5_accuracy.avg

    return average_loss.avg, top1_accuracy, top5_accuracy
Exemplo n.º 8
0
    except Fault, fault:
        if fault.status != 409:
            raise
    else:
        return v

    if isinstance(fault.data, types.StringType):
        missing = json.loads(fault.data)
    elif isinstance(fault.data, types.ListType):
        missing = fault.data

    if '' in missing:
        del missing[missing.index(''):]

    bar = IncrementalBar('Uploading', max=len(missing))
    bar.suffix = '%(percent).1f%% - %(eta)ds'
    with open(path) as fp:
        for hash in missing:
            offset = hashes.index(unhexlify(hash)) * blocksize
            fp.seek(offset)
            block = fp.read(blocksize)
            client.update_container_data(container, StringIO(block))
            bar.next()
    bar.finish()

    return client.create_object_by_hashmap(container, object, map, **kwargs)


def download(client, container, object, path):

    res = client.retrieve_object_hashmap(container, object)
Exemplo n.º 9
0
def test(test_loader, model, optimizer, epoch, config, classes_name,
         segmentation_enable):

    # switch to evaluate mode
    model.eval()
    n_classes = config['yolo']['classes']

    end = time.time()
    #bar = Bar('Validating', max=len(test_loader))
    bar = IncrementalBar('Validating', max=len(test_loader), width=32)
    #for batch_idx, (inputs, targets) in enumerate(testloader):
    n_gt = [0] * n_classes
    correct = [0] * n_classes
    n_pred = [0] * n_classes
    n_iou = [0] * n_classes
    n_images = 0
    det_boxes = list()
    det_labels = list()
    det_scores = list()
    true_boxes = list()
    true_labels = list()
    true_difficulties = list()
    gt_box = 0
    pred_box = 0

    for batch_idx, (images, targets) in enumerate(test_loader):
        images = images.to(device)  # (batch_size (N), 3, H, W)
        labels = [torch.Tensor(l).to(device) for l in targets]
        bs = len(labels)
        # compute output
        with torch.no_grad():
            if segmentation_enable:
                detections, _ = model(
                    images
                )  # (N, num_defaultBoxes, 4), (N, num_defaultBoxes, n_classes)
            else:
                detections = model(
                    images
                )  # (N, num_defaultBoxes, 4), (N, num_defaultBoxes, n_classes)
            for sample_i in range(bs):

                # Get labels for sample where width is not zero (dummies)
                # print(len(labels[0]),labels[sample_i])
                target_sample = labels[sample_i]
                gt_box = gt_box + len(target_sample)
                tx1, tx2 = torch.unsqueeze(
                    (target_sample[..., 1] - target_sample[..., 3] / 2),
                    1), torch.unsqueeze(
                        (target_sample[..., 1] + target_sample[..., 3] / 2), 1)
                ty1, ty2 = torch.unsqueeze(
                    (target_sample[..., 2] - target_sample[..., 4] / 2),
                    1), torch.unsqueeze(
                        (target_sample[..., 2] + target_sample[..., 4] / 2), 1)
                box = torch.cat((tx1, ty1, tx2, ty2), 1)
                size = target_sample.size(0)

                true_boxes.append(box)
                true_labels.append(target_sample[..., 0])
                true_difficulties.append(torch.zeros(size,
                                                     requires_grad=False))
                #print(detections[0][sample_i].shape,detections[1][sample_i].shape)
                preds = detections[sample_i]
                pred_box = pred_box + len(preds)
                if preds is not None:
                    det_boxes.append(preds[..., :4])
                    det_labels.append((preds[..., 6] + 1).to(device))
                    conf = (preds[..., 4] * preds[..., 5]).to(device)
                    det_scores.append(conf)
                else:
                    empty = torch.empty(0).to(device)
                    det_boxes.append(empty)
                    det_labels.append(empty)
                    det_scores.append(empty)

                n_images = n_images + 1

        # measure elapsed time
        sum_gt = sum(n_gt)
        sum_n_pred = sum(n_pred)
        # plot progress
        bar.suffix = '({batch}/{size}) | Total: {total:} | ETA: {eta:}| n_img: {n_img:} | gt_box: {gt_box:} | pred_box: {pred_box:}'.format(
            batch=batch_idx + 1,
            size=len(test_loader),
            total=bar.elapsed_td,
            eta=bar.eta_td,
            n_img=n_images,
            gt_box=gt_box,
            pred_box=pred_box)
        bar.next()
        #if batch_idx == 50:
        #    break
    bar.finish()
    print("\nVal conf. is %f\n" % (model.yolo_losses[0].val_conf))
    model.yolo_losses[0].val_conf = adjust_confidence(
        gt_box, pred_box, model.yolo_losses[0].val_conf)
    model.yolo_losses[1].val_conf = adjust_confidence(
        gt_box, pred_box, model.yolo_losses[1].val_conf)

    # Calculate mAP
    APs, mAP, TP, FP = calculate_mAP(det_boxes, det_labels, det_scores,
                                     true_boxes, true_labels,
                                     true_difficulties, classes_name)
    pp.pprint(APs)
    print('\nMean Average Precision (mAP): %.3f' % mAP)
    return mAP
Exemplo n.º 10
0
def train(train_loader, model, optimizer, epoch, sampler, segmentation_enable):
    model.train()
    bar = IncrementalBar('Training', max=len(sampler), width=12)
    #batch_time = AverageMeter()
    #data_time = AverageMeter()
    losses = AverageMeter()
    recall = [AverageMeter(), AverageMeter()]
    iou = [AverageMeter(), AverageMeter()]
    obj = [AverageMeter(), AverageMeter()]
    no_obj = [AverageMeter(), AverageMeter()]
    conf_loss = [AverageMeter(), AverageMeter()]
    cls_loss = [AverageMeter(), AverageMeter()]
    cls_score = [AverageMeter(), AverageMeter()]
    count = [AverageMeter(), AverageMeter()]
    seg_obj = AverageMeter()
    seg_no_obj = AverageMeter()
    #end = time.time()
    for batch_idx, (images, targets, total_num,
                    seg_maps) in enumerate(train_loader):
        #print('\n1-',sum(sampler.get_mosaic_array()),'\n')
        #print('1-',sampler.mosaic_array,'\n')
        #print(targets)
        #data_time.update(time.time() - end)
        bs = images.size(0)
        #print(images.shape)
        #print(i,targets[0])
        optimizer.zero_grad()
        images = images.to(device)  # (batch_size (N), 3, H, W)
        if segmentation_enable:
            seg_maps = seg_maps.to(
                device)  # (batch_size (N), H, W, num seg class)
            outputs, seg_out = model(images, targets, seg_maps)
        else:
            outputs = model(images, targets, seg_maps)
        #losses0 = yolo_losses[0](outputs[0],targets)
        #losses1 = yolo_losses[1](outputs[1],targets)
        t_loss = list()

        for i, l in enumerate(outputs):
            #print(l[0])
            t_loss.append(l[0])
            recall[i].update(l[1])
            iou[i].update(l[2])
            obj[i].update(l[3])
            no_obj[i].update(l[4])
            cls_score[i].update(l[5])
            count[i].update(l[6])
            #conf_loss.update(l[5])
            #cls_loss.update(l[6])
        loss = sum(t_loss)
        if segmentation_enable:
            seg_obj.update(seg_out[1])
            seg_no_obj.update(seg_out[2])
            loss += seg_out[0]
        losses.update(loss.item(), bs)
        loss.backward()
        optimizer.step()
        # measure elapsed time
        #batch_time.update(time.time() - end)
        #end = time.time()
        if segmentation_enable:
            bar.suffix  = \
                '%(percent)3d%% | {total:} | {loss:.4f} | {cnt:2.1f} | {iou:.3f} | {obj:.3f} | {no_obj:.4f} | {cls:.3f} | {rec:.4f} | {seg_obj:.3f} | {seg_no_obj:.6f} |'\
                .format(
                total=bar.elapsed_td,
                loss=losses.avg,
                cnt=(count[0].avg+count[1].avg),
                iou=(iou[0].avg+iou[1].avg)/2.,
                obj=(obj[0].avg+obj[1].avg)/2.,
                no_obj=(no_obj[0].avg+no_obj[1].avg)/2.,
                cls=(cls_score[0].avg+cls_score[1].avg)/2.,
                rec=(recall[0].avg+recall[1].avg)/2.,
                seg_obj=seg_obj.avg,
                seg_no_obj = seg_no_obj.avg
                )
        else:
            bar.suffix  = \
                '%(percent)3d%% | {total:} | {loss:.4f} | {cnt1:2.1f} | {iou1:.3f} | {obj1:.3f} | {no_obj1:.4f} | {cls1:.3f} | {rec1:.3f}  | {cnt2:2.1f}  | {iou2:.3f} | {obj2:.3f} | {no_obj2:.4f}  | {cls2:.3f}  | {rec2:.3f}   |'\
                .format(
                #batch=batch_idx + 1,
                #size=len(train_loader),
                #data=data_time.avg,
                #bt=batch_time.avg,
                total=bar.elapsed_td,
                loss=losses.avg,
                #loss1=losses[0].avg,
                #loss2=losses[1].avg,
                cnt1=(count[0].avg),
                cnt2=(count[1].avg),
                #recall=recall.avg,
                iou1=iou[0].avg,
                iou2=iou[1].avg,
                obj1=obj[0].avg,
                no_obj1=no_obj[0].avg,
                cls1=cls_score[0].avg,
                obj2=obj[1].avg,
                no_obj2=no_obj[1].avg,
                cls2=cls_score[1].avg,
                rec1=recall[0].avg,
                rec2=recall[1].avg,
                #cls=cls_loss.avg,
                )
        bar.next(total_num)
    bar.finish()
    return losses.avg, (iou[0].avg + iou[1].avg) / 2
Exemplo n.º 11
0
    except Fault, fault:
        if fault.status != 409:
            raise
    else:
        return v

    if isinstance(fault.data, types.StringType):
        missing = json.loads(fault.data)
    elif isinstance(fault.data, types.ListType):
        missing = fault.data

    if '' in missing:
        del missing[missing.index(''):]

    bar = IncrementalBar('Uploading', max=len(missing))
    bar.suffix = '%(percent).1f%% - %(eta)ds'
    with open(path) as fp:
        for hash in missing:
            offset = hashes.index(unhexlify(hash)) * blocksize
            fp.seek(offset)
            block = fp.read(blocksize)
            client.update_container_data(container, StringIO(block))
            bar.next()
    bar.finish()

    return client.create_object_by_hashmap(container, object, map, **kwargs)


def download(client, container, object, path):

    res = client.retrieve_object_hashmap(container, object)
Exemplo n.º 12
0
from progress.bar import IncrementalBar


def to_color(s, c):
    return f'\x1b[{c}m{s}\x1b[0m' if use_color else s


def out_line(s='', color=37, indent=0, thread_name=None):
    t = ''
    if thread_name is not None and show_threads:
        t = to_color(f'<{thread_name}> ', thread_color)
    print(t + ' ' * indent + to_color(s, color))


def out(s='', color=37, indent=0, thread_name=None):
    for line in s.split('\n'):
        out_line(line, color, indent, thread_name)


use_color = True
show_threads = True
thread_color = 34

# stuff for progress bars
export_task_count = 0
filtered_export_task_count = 0
bar = IncrementalBar()
bar.suffix = "%(index)d / %(max)d (%(elapsed_td)s)"