Exemple #1
0
def main():
	global device
	parser = config.prepare_parser()
	param = vars(parser.parse_args())
	device = torch.device(param['device'])
	name = config.name_from_config(param)
	print(param, name)
	run(param, name)
def main():
    global device, parallel, stage
    parser = config.prepare_parser()
    param = vars(parser.parse_args())
    device = torch.device(param['device'])
    parallel = param['parallel']
    stage = param['stage']
    name = config.name_from_config(param)
    print(param, name)
    run(param, name)
def main():
    global device, blocks
    parser = config.prepare_parser()
    args = parser.parse_args()
    param = vars(parser.parse_args())
    device = torch.device(param['device'])
    blocks = param['blocks'][param['arch']]
    name = config.name_from_config(param)
    print(param, name)
    ngpus_per_node = torch.cuda.device_count()
    args.world_size = ngpus_per_node * args.world_size
    if param['parallel']:
        mp.spawn(run,
                 nprocs=ngpus_per_node,
                 args=(ngpus_per_node, param, name, args, blocks, device))
    else:
        run(args.gpu, ngpus_per_node, param, name, args, blocks, device)
"""
test model after pruning
"""
from os import path
import pickle
import numpy as np
import torch

import config
import models
from utils import *

parser = config.prepare_parser()
param = vars(parser.parse_args())
device = torch.device(param['device'])


def test(data_loader, model, criterion):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    model.eval()
    start_time = time.time()
    end = time.time()
    for i, (input, target) in enumerate(data_loader):
        input = input.to(device)
        target = target.to(device)

        output = model(input)
        loss = criterion(output, target)