Example #1
0
    log = test_metrics.result()

    print(log)
    # summary(model, (1,496, 384))
    time_results = compute_precise_time(model, [496, 384], 96, loss_fn, device)
    print(time_results)
    reset_bn_stats(model)
    return


if __name__ == '__main__':
    args = argparse.ArgumentParser(description='PyTorch Template')
    args.add_argument('-c',
                      '--config',
                      default=None,
                      type=str,
                      help='config file path (default: None)')
    args.add_argument('-r',
                      '--resume',
                      default=None,
                      type=str,
                      help='path to latest checkpoint (default: None)')
    args.add_argument('-d',
                      '--device',
                      default=None,
                      type=str,
                      help='indices of GPUs to enable (default: all)')

    config = ConfigParser.from_args(args, mode='test')
    main(config)
        CustomArgs(['--gamma'], type=float, target='lr_scheduler;args;gamma'),
        CustomArgs(['--save_period'], type=int, target='trainer;save_period'),
        CustomArgs(['--reduce_dimension'],
                   type=int,
                   target='arch;args;reduce_dimension'),
        CustomArgs(['--layer2_dimension'],
                   type=int,
                   target='arch;args;layer2_output_dim'),
        CustomArgs(['--layer3_dimension'],
                   type=int,
                   target='arch;args;layer3_output_dim'),
        CustomArgs(['--layer4_dimension'],
                   type=int,
                   target='arch;args;layer4_output_dim'),
        CustomArgs(['--num_experts'], type=int,
                   target='arch;args;num_experts'),
        CustomArgs(['--distribution_aware_diversity_factor'],
                   type=float,
                   target='loss;args;additional_diversity_factor'),
        CustomArgs(['--pos_weight'], type=float,
                   target='arch;args;pos_weight'),
        CustomArgs(['--collaborative_loss'],
                   type=int,
                   target='loss;args;collaborative_loss'),
        CustomArgs(['--distill_checkpoint'],
                   type=str,
                   target='distill_checkpoint')
    ]
    config = ConfigParser.from_args(args, options)
    main(config)
Example #3
0
                        metavar='N', help='mini-batch size (default: 256)')
    parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
                        metavar='LR', help='initial learning rate')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                        help='momentum')
    parser.add_argument('--weight-decay', '--wd', default=5e-3, type=float,
                        metavar='W', help='weight decay (default: 5e-3)')
    parser.add_argument('--categorical', default=True, action="store_true")
    parser.add_argument('--continuous', default=False, action="store_true")

    # ========================= Monitor Configs ==========================
    parser.add_argument('--print-freq', '-p', default=20, type=int,
                        metavar='N', help='print frequency (default: 10)')
    parser.add_argument('--eval-freq', '-ef', default=5, type=int,
                        metavar='N', help='evaluation frequency (default: 5)')

    # ========================= Runtime Configs ==========================
    parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
                        help='number of data loading workers (default: 4)')

    CustomArgs = collections.namedtuple('CustomArgs', 'flags type target')
    options = [
        CustomArgs(['--exp_name'], type=str, target='name'),
    ]
    config = ConfigParser.from_args(parser, options)
    print(config)

    args = parser.parse_args()

    main(args, config)
Example #4
0
                      optimizer,
                      config=config,
                      data_loader=data_loader,
                      valid_data_loader=valid_data_loader,
                      lr_scheduler=lr_scheduler)
    trainer.train()


if __name__ == '__main__':
    args = argparse.ArgumentParser()
    args.add_argument('-c',
                      '--config',
                      default=None,
                      type=str,
                      required=True,
                      help='path to config file (default: None)')
    args.add_argument('-r',
                      '--resume',
                      default=None,
                      type=str,
                      help='path to latest checkpoint (default: None)')
    args.add_argument('-d',
                      '--device',
                      default=None,
                      type=str,
                      help='indices of GPUs to enable (default: all)')
    args.add_argument('-s', '--seed', default=1234, type=str)

    config = ConfigParser.from_args(args)
    main(config)
Example #5
0
from data_loader.data_loaders import *
from train_test import *
from parse_config import ConfigParser


def main(config):
    data = load_data(config)
    train_test(data, config)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='AnchorKG')

    parser.add_argument('-c',
                        '--config',
                        default="./config.json",
                        type=str,
                        help='config file path (default: None)')
    parser.add_argument('-r',
                        '--resume',
                        default=None,
                        type=str,
                        help='path to latest checkpoint (default: None)')
    parser.add_argument('-d',
                        '--device',
                        default=None,
                        type=str,
                        help='indices of GPUs to enable (default: all)')

    config = ConfigParser.from_args(parser)
    main(config)
Example #6
0
                              yticklabels=tgt_seq)
            fig.xaxis.set_label_position('top')
            fig.figure.show()


if __name__ == '__main__':
    args = argparse.ArgumentParser()
    args.add_argument('-c',
                      '--config',
                      default=None,
                      type=str,
                      help='config file path (default: None)')
    args.add_argument('--model-path',
                      type=str,
                      required=True,
                      help='path to model.pth to test (default: None')
    args.add_argument('-r',
                      '--resume',
                      default=None,
                      type=str,
                      help='path to latest checkpoint (default: None)')
    args.add_argument('-d',
                      '--device',
                      default=None,
                      type=str,
                      help='indices of GPUs to enable (default: all)')
    # custom cli options to modify configuration from default values given in json file.
    CustomArgs = collections.namedtuple('CustomArgs', 'flags type target help')
    config, args = ConfigParser.from_args(args)
    main(config, args.model_path)
Example #7
0
                                     config.log_dir))
    logger.info(log)


if __name__ == '__main__':
    args = argparse.ArgumentParser(description='PyTorch Template')
    args.add_argument('-c',
                      '--config',
                      default=None,
                      type=str,
                      help='config file path (default: None)')
    args.add_argument('-r',
                      '--resume',
                      default=None,
                      type=str,
                      help='path to latest checkpoint (default: None)')
    args.add_argument('-d',
                      '--device',
                      default=None,
                      type=str,
                      help='indices of GPUs to enable (default: all)')
    CustomArgs = collections.namedtuple('CustomArgs', 'flags type target help')
    options = [
        CustomArgs(['-x', '--extract'],
                   type=str,
                   target=('extract'),
                   help='extract parameters of the model (default: False)')
    ]
    config = ConfigParser.from_args(args, options=options, test=True)
    main(config)
Example #8
0
            batch_size = data.shape[0]
            total_loss += (loss_s.item() - loss_b.item()) * batch_size

    n_samples = len(data_loader.sampler)
    log = {'loss': total_loss / n_samples}
    logger.info(log)


if __name__ == '__main__':
    args = argparse.ArgumentParser(description='Test code for evaluation.')
    # args.add_argument('-c', '--config', default=None, type=str,
    #                   help='config file path (default: None)')
    args.add_argument('-c',
                      '--config',
                      default='./configs/fmnist_glow_config_test.json',
                      type=str,
                      help='config file path (default: None)')
    args.add_argument('-r',
                      '--resume',
                      default=None,
                      type=str,
                      help='path to latest checkpoint (default: None)')
    args.add_argument('-d',
                      '--device',
                      default=None,
                      type=str,
                      help='indices of GPUs to enable (default: all)')

    config_test = ConfigParser.from_args(args)
    main(config_test)
Example #9
0
def parse_args():
    global config
    args = argparse.ArgumentParser(
        description='MASTER PyTorch Distributed Training')
    args.add_argument('-c',
                      '--config',
                      default=None,
                      type=str,
                      help='config file path (default: None)')
    args.add_argument('-r',
                      '--resume',
                      default=None,
                      type=str,
                      help='path to latest checkpoint (default: None)')
    args.add_argument('-d',
                      '--device',
                      default=None,
                      type=str,
                      help='indices of GPUs to be available (default: all)')
    # custom cli options to modify configuration from default values given in json file.
    CustomArgs = collections.namedtuple('CustomArgs',
                                        'flags default type target help')
    options = [
        # CustomArgs(['--lr', '--learning_rate'], default=0.0001, type=float, target='optimizer;args;lr',
        #            help='learning rate (default: 0.0001)'),
        CustomArgs(
            ['-dist', '--distributed'],
            default='true',
            type=str,
            target='distributed',
            help='run distributed training, true or false, (default: true).'
            ' turn off distributed mode can debug code on one gpu/cpu'),
        CustomArgs(
            ['--local_world_size'],
            default=1,
            type=int,
            target='local_world_size',
            help=
            'the number of processes running on each node, this is passed in explicitly '
            'and is typically either $1$ or the number of GPUs per node. (default: 1)'
        ),
        CustomArgs(
            ['--local_rank'],
            default=0,
            type=int,
            target='local_rank',
            help=
            'this is automatically passed in via torch.distributed.launch.py, '
            'process will be assigned a local rank ID in [0,local_world_size-1]. (default: 0)'
        ),
        CustomArgs(
            ['--finetune'],
            default='false',
            type=str,
            target='finetune',
            help=
            'finetune mode will load resume checkpoint, but do not use previous config and optimizer '
            '(default: false), so there has three running mode: normal, resume, finetune'
        )
    ]
    config = ConfigParser.from_args(args, options)
    return config