def get_loader(self): paths = [ os.path.join(self.cache_dir, phase + '.pkl') for phase in self.config.get('eval', 'phase').split() ] dataset = utils.data.Dataset(utils.data.load_pickles(paths)) logging.info('num_examples=%d' % len(dataset)) size = tuple(map(int, self.config.get('image', 'size').split())) try: workers = self.config.getint('data', 'workers') except configparser.NoOptionError: workers = multiprocessing.cpu_count() collate_fn = utils.data.Collate( transform.parse_transform( self.config, self.config.get('transform', 'resize_eval')), [size], transform_image=transform.get_transform( self.config, self.config.get('transform', 'image_test').split()), transform_tensor=transform.get_transform( self.config, self.config.get('transform', 'tensor').split()), ) return torch.utils.data.DataLoader(dataset, batch_size=self.args.batch_size, num_workers=workers, collate_fn=collate_fn)
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) model_dir = utils.get_model_dir(config) height, width = tuple(map(int, config.get('image', 'size').split())) resize = transform.parse_transform(config, config.get('transform', 'resize_test')) transform_image = transform.get_transform(config, config.get('transform', 'image_test').split()) transform_tensor = transform.get_transform(config, config.get('transform', 'tensor').split()) # load image image_bgr = cv2.imread('image.jpg') image_resized = resize(image_bgr, height, width) image = transform_image(image_resized) tensor = transform_tensor(image).unsqueeze(0) # Caffe2 init_net = caffe2_pb2.NetDef() with open(os.path.join(model_dir, 'init_net.pb'), 'rb') as f: init_net.ParseFromString(f.read()) predict_net = caffe2_pb2.NetDef() with open(os.path.join(model_dir, 'predict_net.pb'), 'rb') as f: predict_net.ParseFromString(f.read()) p = workspace.Predictor(init_net, predict_net) results = p.run([tensor.numpy()]) logging.info(utils.abs_mean(results[0])) logging.info(hashlib.md5(results[0].tostring()).hexdigest())
def __init__(self, args, config): self.args = args self.config = config self.cache_dir = utils.get_cache_dir(config) self.model_dir = utils.get_model_dir(config) self.category = utils.get_category(config, self.cache_dir if os.path.exists(self.cache_dir) else None) self.draw_bbox = utils.visualize.DrawBBox(config, self.category) self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous() self.height, self.width = tuple(map(int, config.get('image', 'size').split())) self.path, self.step, self.epoch = utils.train.load_model(self.model_dir) state_dict = torch.load(self.path, map_location=lambda storage, loc: storage) self.dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(config, state_dict), self.anchors, len(self.category)) self.dnn.load_state_dict(state_dict) self.inference = model.Inference(config, self.dnn, self.anchors) self.inference.eval() if torch.cuda.is_available(): self.inference.cuda() logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in self.inference.state_dict().values()))) self.create_cap() self.create_cap_size() self.create_writer() self.keys = set(args.keys) self.resize = transform.parse_transform(config, config.get('transform', 'resize_test')) self.transform_image = transform.get_transform(config, config.get('transform', 'image_test').split()) self.transform_tensor = transform.get_transform(config, config.get('transform', 'tensor').split())
def __init__(self, args, config): self.args = args self.config = config self.cache_dir = utils.get_cache_dir(config) self.model_dir = utils.get_model_dir(config) self.category = utils.get_category(config, self.cache_dir if os.path.exists(self.cache_dir) else None) self.draw_bbox = utils.visualize.DrawBBox(self.category, colors=args.colors, thickness=args.thickness) self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous() self.height, self.width = tuple(map(int, config.get('image', 'size').split())) self.path, self.step, self.epoch = utils.train.load_model(self.model_dir) state_dict = torch.load(self.path, map_location=lambda storage, loc: storage) self.dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(config, state_dict), self.anchors, len(self.category)) self.dnn.load_state_dict(state_dict) self.inference = model.Inference(config, self.dnn, self.anchors) self.inference.eval() if torch.cuda.is_available(): self.inference.cuda() logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in self.inference.state_dict().values()))) self.create_cap() self.create_cap_size() self.writer = self.create_writer() self.keys = set(args.keys) self.resize = transform.parse_transform(config, config.get('transform', 'resize_test')) self.transform_image = transform.get_transform(config, config.get('transform', 'image_test').split()) self.transform_tensor = transform.get_transform(config, config.get('transform', 'tensor').split())
def __init__(self, args, config): self.args = args self.config = config self.device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') self.cache_dir = utils.get_cache_dir(config) self.model_dir = utils.get_model_dir(config) _, self.num_parts = utils.get_dataset_mappers(config) self.limbs_index = utils.get_limbs_index(config) if args.debug is None: self.draw_cluster = utils.visualize.DrawCluster( colors=args.colors, thickness=args.thickness) else: self.draw_feature = utils.visualize.DrawFeature() s = re.search('(-?[0-9]+)([a-z]+)(-?[0-9]+)', args.debug) stage = int(s.group(1)) name = s.group(2) channel = int(s.group(3)) self.get_feature = lambda outputs: outputs[stage][name][0][channel] self.height, self.width = tuple( map(int, config.get('image', 'size').split())) if args.caffe: init_net = caffe2_pb2.NetDef() with open(os.path.join(self.model_dir, 'init_net.pb'), 'rb') as f: init_net.ParseFromString(f.read()) predict_net = caffe2_pb2.NetDef() with open(os.path.join(self.model_dir, 'predict_net.pb'), 'rb') as f: predict_net.ParseFromString(f.read()) p = workspace.Predictor(init_net, predict_net) self.inference = lambda tensor: [{ 'parts': torch.from_numpy(parts), 'limbs': torch.from_numpy(limbs) } for parts, limbs in zip( *[iter(p.run([tensor.detach().cpu().numpy()]))] * 2)] else: self.step, self.epoch, self.dnn, self.stages = self.load() self.inference = model.Inference(config, self.dnn, self.stages) self.inference.eval() if torch.cuda.is_available(): self.inference.cuda() logging.info( humanize.naturalsize( sum(var.cpu().numpy().nbytes for var in self.inference.state_dict().values()))) self.cap = self.create_cap() self.keys = set(args.keys) self.resize = transform.parse_transform( config, config.get('transform', 'resize_test')) self.transform_image = transform.get_transform( config, config.get('transform', 'image_test').split()) self.transform_tensor = transform.get_transform( config, config.get('transform', 'tensor').split())
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) cache_dir = utils.get_cache_dir(config) model_dir = utils.get_model_dir(config) category = utils.get_category( config, cache_dir if os.path.exists(cache_dir) else None) anchors = utils.get_anchors(config) anchors = torch.from_numpy(anchors).contiguous() path, step, epoch = utils.train.load_model(model_dir) state_dict = torch.load(path, map_location=lambda storage, loc: storage) dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels( config, state_dict), anchors, len(category)) dnn.load_state_dict(state_dict) height, width = tuple(map(int, config.get('image', 'size').split())) resize = transform.parse_transform(config, config.get('transform', 'resize_test')) transform_image = transform.get_transform( config, config.get('transform', 'image_test').split()) transform_tensor = transform.get_transform( config, config.get('transform', 'tensor').split()) # load image image_bgr = cv2.imread('image.jpg') image_resized = resize(image_bgr, height, width) image = transform_image(image_resized) tensor = transform_tensor(image).unsqueeze(0) # Checksum for key, var in dnn.state_dict().items(): a = var.cpu().numpy() print('\t'.join( map(str, [ key, a.shape, utils.abs_mean(a), hashlib.md5(a.tostring()).hexdigest() ]))) output = dnn(torch.autograd.Variable(tensor, volatile=True)).data for key, a in [ ('image_bgr', image_bgr), ('image_resized', image_resized), ('tensor', tensor.cpu().numpy()), ('output', output.cpu().numpy()), ]: print('\t'.join( map(str, [ key, a.shape, utils.abs_mean(a), hashlib.md5(a.tostring()).hexdigest() ])))
def get_loader(self): paths = [os.path.join(self.cache_dir, phase + '.pkl') for phase in self.config.get('eval', 'phase').split()] dataset = utils.data.Dataset(utils.data.load_pickles(paths)) logging.info('num_examples=%d' % len(dataset)) size = tuple(map(int, self.config.get('image', 'size').split())) try: workers = self.config.getint('data', 'workers') except configparser.NoOptionError: workers = multiprocessing.cpu_count() collate_fn = utils.data.Collate( [size], resize=transform.parse_transform(self.config, self.config.get('transform', 'resize_eval')), transform_image=transform.get_transform(self.config, self.config.get('transform', 'image_test').split()), transform_tensor=transform.get_transform(self.config, self.config.get('transform', 'tensor').split()), ) return torch.utils.data.DataLoader(dataset, batch_size=self.args.batch_size, num_workers=workers, collate_fn=collate_fn)