コード例 #1
0
ファイル: utils_test.py プロジェクト: liuguoyou/gconv-prune
  def test_load_model(self):
    """ load_model """
    # load a CIFAR model
    model = utils.load_model('preresnet164', 'cifar10', use_cuda=False)
    self.assertIsInstance(model, nn.Module)

    model = utils.load_model('vgg16', 'cifar10', use_cuda=False)
    self.assertIsInstance(model, nn.Module)

    model = utils.load_model('resnet50', 'imagenet', use_cuda=False)
    self.assertIsInstance(model, nn.Module)
    model = utils.load_model(
        'resnet50', 'imagenet', use_cuda=False, pretrained=True)
    self.assertIsInstance(model, nn.Module)
コード例 #2
0
    def test_load_model(self):
        """ load_model """
        # load a CIFAR model
        model = utils.load_model("preresnet164", "cifar10", use_cuda=False)
        self.assertIsInstance(model, nn.Module)

        model = utils.load_model("vgg16", "cifar10", use_cuda=False)
        self.assertIsInstance(model, nn.Module)

        model = utils.load_model("resnet50", "imagenet", use_cuda=False)
        self.assertIsInstance(model, nn.Module)
        model = utils.load_model("resnet50",
                                 "imagenet",
                                 use_cuda=False,
                                 pretrained=True)
        self.assertIsInstance(model, nn.Module)
コード例 #3
0
    def load_model(self, **kwargs):
        """ Load model and its coefficients. """
        if not self.args.resume_from_best:
            checkpoint_file_name = "checkpoint.pth.tar"
        else:
            checkpoint_file_name = "model_best.pth.tar"

        return utils.load_model(self.args.arch,
                                self.args.dataset,
                                resume=self.args.resume,
                                pretrained=self.args.pretrained,
                                checkpoint_file_name=checkpoint_file_name,
                                **kwargs)
コード例 #4
0
ファイル: model_runner.py プロジェクト: liuguoyou/gconv-prune
    def load_model(self, **kwargs):
        """ Load model and its coefficients. """
        if not self.args.resume_from_best:
            checkpoint_file_name = 'checkpoint.pth.tar'
        else:
            checkpoint_file_name = 'model_best.pth.tar'

        # logging.info(
        #     '==> Loading model from checkpoint: {}'.format(checkpoint_file_name))

        return utils.load_model(self.args.arch,
                                self.args.dataset,
                                resume=self.args.resume,
                                pretrained=self.args.pretrained,
                                checkpoint_file_name=checkpoint_file_name,
                                **kwargs)
コード例 #5
0
def main():
    use_cuda = not args.cpu
    print("==> Loading model {}".format(args.arch))
    model = utils.load_model(args.arch, "imagenet", use_cuda=use_cuda, pretrained=True)

    print("==> Loading group config {}".format(args.group_cfg))
    with open(args.group_cfg, "r") as f:
        group_cfg = json.load(f)

    print("==> Updating model ...")
    model = update_model(model, group_cfg, use_cuda=use_cuda)

    print(model)

    print(
        "==> Model size: {:.2f} M ops: {:.2f} M".format(
            model_utils.get_model_num_params(model),
            utils.get_model_num_ops(model, "imagenet"),
        )
    )

    torch.save(model, os.path.join(os.path.dirname(args.resume), "peng.pth.tar"))
コード例 #6
0
def main():
  use_cuda = not args.cpu

  if args.pretrained:
    logging.info('==> Loading pre-trained model ...')
    model = utils.load_model(
        args.arch, args.dataset, pretrained=args.pretrained, use_cuda=use_cuda)
  else:
    logging.info('==> Loading GConv model directly from Pickle ...')
    model = torch.load(args.resume)
    if not use_cuda:
      model.cpu()

    model.eval()
    logging.info('==> Sparsify model ...')
    utils.apply_sparse(model)
    print(model)

    logging.debug('Total params: {:.2f}M FLOPS: {:.2f}M'.format(
        model_utils.get_model_num_params(model),
        utils.get_model_num_ops(model, args.dataset)))

  if args.dataset in utils.IMAGENET_DATASETS:
    x = torch.rand((args.test_batch, 3, 224, 224))
  else:
    x = torch.rand((args.test_batch, 3, 32, 32))

  if not use_cuda:
    x = x.cpu()

  # setup the input and model
  if use_cuda:
    x = x.cuda()
    model.cuda()

  logging.info('==> Dry running ...')
  dry_run_iters = 10 if not use_cuda else 100
  for _ in range(dry_run_iters):
    y = model.forward(x)

  logging.info('==> Print profiling info ...')
  with torch.autograd.profiler.profile(use_cuda=use_cuda) as prof:
    y = model.forward(x)
  print(prof)

  # start timing
  logging.info('==> Start timing ...')
  if use_cuda:
    start = torch.cuda.Event(enable_timing=True)
    end = torch.cuda.Event(enable_timing=True)

    start.record()
    for _ in range(args.iters):
      y = model.forward(x)
    end.record()

    # synchronize
    torch.cuda.synchronize()

    elapsed = start.elapsed_time(end)
  else:
    start = time.time()
    for _ in range(args.iters):
      y = model.forward(x)
    end = time.time()
    elapsed = (end - start) * 1e3

  print('Elapsed time: {:10.2f} sec (total) {:6.2f} ms (per run) {:6.2f} FPS.'.
        format(elapsed * 1e-3, elapsed / args.iters,
               args.iters * args.test_batch / elapsed * 1e3))
コード例 #7
0
ファイル: heuristic.py プロジェクト: liuguoyou/gconv-prune
def draw_model_stats(arch, grps, data_dir, num_iters=None):
    """ Draw the statistics of several models """
    if not num_iters:
        num_iters = [1]
    fp = os.path.join(
        data_dir, 'model_stats_{}_NI_{}_G_{}.pdf'.format(
            arch, '-'.join([str(ni) for ni in num_iters]),
            '-'.join([str(g) for g in grps])))

    print('Plot to file: {}'.format(fp))

    fig, ax = plt.subplots(figsize=(5, 4))

    print('Running on model {} ...'.format(arch))

    model = utils.load_model(arch, 'imagenet', pretrained=True)
    results = {'num_iters': [], 'num_groups': [], 'ratio': []}

    for ni in num_iters:

        for G in grps:
            print('G = {} NI = {}'.format(G, ni))

            mods = {}

            # Collect statistics for a single model
            for name, mod in model.named_modules():
                if not isinstance(mod, nn.Conv2d):
                    continue

                W = mod.weight
                F, C = W.shape[:2]

                if F % G != 0 or C % G != 0:
                    continue

                C = W.norm(dim=(2, 3)).cpu().detach().numpy()
                gnd_in, gnd_out, cost = run_mbm(C,
                                                G,
                                                perm='GRPS',
                                                num_iters=ni)
                mods[name] = (cost, C.sum(), cost / C.sum() * 100)

                # print('{:30s}\t {:.2e}\t {:.2e}\t {:.2f}%'.format(
                #     name, mods[name][0], mods[name][1], mods[name][2]))

            # Summarise results
            sum_cost = sum([val[0] for val in mods.values()])
            total_cost = sum([val[1] for val in mods.values()])

            results['num_iters'].append('$N_S={}$'.format(ni))
            results['num_groups'].append('$G={}$'.format(G))
            results['ratio'].append(sum_cost / total_cost * 100)

    df = pd.DataFrame(results)
    sns.barplot(x='num_groups', y='ratio', hue='num_iters', data=df)

    ax.legend()
    plt.tight_layout()
    fig.savefig(fp)

    df.to_csv(fp.replace('.pdf', '.csv'))