Exemple #1
0
import pandas as pd
import numpy as np
import torch
import torch.sparse
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn

from gumi import model_utils
from gumi.ops import *
from gumi.model_runner.model_runner import ModelRunner
from gumi.model_runner.parser import create_cli_parser

parser = create_cli_parser(prog="CLI tool for profiling GConv models.")
parser.add_argument("--iters",
                    type=int,
                    default=10000,
                    help="Number of profiling iterations")
parser.add_argument("--use-cuda",
                    action="store_true",
                    default=False,
                    help="Whether to use GPU")
args = parser.parse_args()

# CUDA
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
cudnn.benchmark = True

Exemple #2
0
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets

from gumi import model_utils
from gumi.ops import *
from gumi.pruning.export import GroupExporter
from gumi.model_runner import utils
from gumi.model_runner.model_pruner import ModelPruner
from gumi.model_runner.parser import create_cli_parser

# CLI parser
parser = create_cli_parser(prog='CLI tool for pruning')
parser.add_argument('--apply-mask',
                    action='store_true',
                    default=False,
                    help='Whether to apply mask when loading model')
parser.add_argument('--skip-prune',
                    action='store_true',
                    default=False,
                    help='Whether to perform the fine-tuning step only.')
parser.add_argument('--skip-fine-tune',
                    action='store_true',
                    default=False,
                    help='Whether to skip file tune.')
parser.add_argument('--skip-validation',
                    action='store_true',
                    default=False,
Exemple #3
0
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel

from gumi import model_utils
from gumi.model_runner import utils
from gumi.model_runner.model_runner import ModelRunner
from gumi.model_runner.model_pruner import ModelPruner
from gumi.model_runner.parser import create_cli_parser
from gumi.ops import MaskConv2d
from gumi.pruning import mask_utils

logging.getLogger().setLevel(logging.DEBUG)

parser = create_cli_parser(prog="Debug the choice of the first pruning step.")
parser.add_argument(
    "--excludes-for-applying-mask",
    nargs="+",
    help="Excluded module names for applying mask",
)
parser.add_argument("--manual-seed",
                    default=None,
                    type=int,
                    help="Manual seed for reproducibility.")
parser.add_argument(
    "--min-factor",
    type=float,
    default=0.0,
    help="Minimum channels that should appear in each group.",
)
Exemple #4
0
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets

# image
from PIL import Image
from skimage import io, transform

from gumi import model_utils
from gumi.ops import *
from gumi.pruning.export import GroupExporter
from gumi.model_runner import utils
from gumi.model_runner.model_runner import ModelRunner
from gumi.model_runner.parser import create_cli_parser

parser = create_cli_parser(prog="CLI tool for pruning")
parser.add_argument("--image",
                    type=str,
                    metavar="PATH",
                    help="Path to an image file.")
args = parser.parse_args()

# CUDA
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
use_cuda = torch.cuda.is_available()
cudnn.benchmark = True


class ModelInferRunner(ModelRunner):
    def validate_args(self, args):
        pass