Exemplo n.º 1
0
def get_composed_augmentations(aug_dict):
    if aug_dict is None:
        logger.info("Using No Augmentations")
        return None

    augmentations = []
    for aug_key, aug_param in aug_dict.items():
        augmentations.append(key2aug[aug_key](aug_param))
        logger.info("Using {} aug with params {}".format(aug_key, aug_param))
    return Compose(augmentations)
Exemplo n.º 2
0
def inference(cfg, model, val_loader):
    logger.info("Start infer")
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu'
                          ) if cfg.MODEL.DEVICE == 'cuda' else 'cpu'

    evaluator = create_supervised_evaluator(model,
                                            metrics={'accuracy': Accuracy()},
                                            device=device)

    # adding handlers using `evaluator.on` decorator API
    @evaluator.on(Events.EPOCH_COMPLETED)
    def print_validation_results(engine):
        metrics = evaluator.state.metrics
        avg_acc = metrics['accuracy']
        logger.info("Validation Results - Accuracy: {:.3f}".format(avg_acc))

    evaluator.run(val_loader)
Exemplo n.º 3
0
 def print_validation_results(engine):
     metrics = evaluator.state.metrics
     avg_acc = metrics['accuracy']
     logger.info("Validation Results - Accuracy: {:.3f}".format(avg_acc))
Exemplo n.º 4
0
from ignite.engine import Events
from ignite.engine import create_supervised_evaluator
from ignite.metrics import Accuracy

sys.path.append('../..')

from cvnet.utils.logger import logger


def inference(cfg, model, val_loader):
    logger.info("Start infer")
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu'
                          ) if cfg.MODEL.DEVICE == 'cuda' else 'cpu'

    evaluator = create_supervised_evaluator(model,
                                            metrics={'accuracy': Accuracy()},
                                            device=device)

    # adding handlers using `evaluator.on` decorator API
    @evaluator.on(Events.EPOCH_COMPLETED)
    def print_validation_results(engine):
        metrics = evaluator.state.metrics
        avg_acc = metrics['accuracy']
        logger.info("Validation Results - Accuracy: {:.3f}".format(avg_acc))

    evaluator.run(val_loader)


if __name__ == '__main__':
    logger.info('infer')