def __init__(self, args, config): self.args = args self.config = config self.cache_dir = utils.get_cache_dir(config) self.model_dir = utils.get_model_dir(config) self.category = utils.get_category(config, self.cache_dir if os.path.exists(self.cache_dir) else None) self.draw_bbox = utils.visualize.DrawBBox(self.category, colors=args.colors, thickness=args.thickness) self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous() self.height, self.width = tuple(map(int, config.get('image', 'size').split())) self.path, self.step, self.epoch = utils.train.load_model(self.model_dir) state_dict = torch.load(self.path, map_location=lambda storage, loc: storage) self.dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(config, state_dict), self.anchors, len(self.category)) self.dnn.load_state_dict(state_dict) self.inference = model.Inference(config, self.dnn, self.anchors) self.inference.eval() if torch.cuda.is_available(): self.inference.cuda() logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in self.inference.state_dict().values()))) self.create_cap() self.create_cap_size() self.writer = self.create_writer() self.keys = set(args.keys) self.resize = transform.parse_transform(config, config.get('transform', 'resize_test')) self.transform_image = transform.get_transform(config, config.get('transform', 'image_test').split()) self.transform_tensor = transform.get_transform(config, config.get('transform', 'tensor').split())
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) height, width = tuple(map(int, config.get('image', 'size').split())) cache_dir = utils.get_cache_dir(config) model_dir = utils.get_model_dir(config) category = utils.get_category( config, cache_dir if os.path.exists(cache_dir) else None) anchors = utils.get_anchors(config) anchors = torch.from_numpy(anchors).contiguous() dnn = utils.parse_attr(config.get('model', 'dnn'))(config, anchors, len(category)) inference = model.Inference(config, dnn, anchors) inference.eval() logging.info( humanize.naturalsize( sum(var.cpu().numpy().nbytes for var in inference.state_dict().values()))) path, step, epoch = utils.train.load_model(model_dir) checkpoint = torch.load(path, map_location=lambda storage, loc: storage) dnn.load_state_dict(checkpoint['dnn']) image = torch.autograd.Variable( torch.randn(args.batch_size, 3, height, width)) path = model_dir + '.onnx' logging.info('save ' + path) torch.onnx.export(dnn, image, path, export_params=True, verbose=args.verbose) # PyTorch's bug
def __init__(self, args, config): self.args = args self.config = config self.model_dir = utils.get_model_dir(config) self.cache_dir = utils.get_cache_dir(config) self.category = utils.get_category(config, self.cache_dir) self.draw_bbox = utils.visualize.DrawBBox(config, self.category) self.loader = self.get_loader() self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous() dnn = utils.parse_attr(config.get('model', 'dnn'))(config, self.anchors, len(self.category)) path, self.step, self.epoch = utils.train.load_model(self.model_dir) checkpoint = torch.load(path, map_location=lambda storage, loc: storage) dnn.load_state_dict(checkpoint['dnn']) logging.info( humanize.naturalsize( sum(var.cpu().numpy().nbytes for var in dnn.state_dict().values()))) self.inference = model.Inference(config, dnn, self.anchors) self.inference.eval() if torch.cuda.is_available(): self.inference.cuda() path = self.model_dir + '.ini' if os.path.exists(path): self._config = configparser.ConfigParser() self._config.read(path) else: logging.warning('training config (%s) not found' % path) self.now = datetime.datetime.now() self.mapper = utils.load_functions(self.config.get('eval', 'mapper'))
def run(self): self.writer = SummaryWriter( os.path.join(self.env.model_dir, self.env.args.run)) try: height, width = tuple( map(int, self.config.get('image', 'size').split())) tensor = torch.randn(1, 3, height, width) step, epoch, dnn, stages = self.env.load() inference = model.Inference(self.config, dnn, stages) forward = inference.forward inference.forward = lambda self, *x: list( forward(self, *x)[-1].values()) self.writer.add_graph(inference, (torch.autograd.Variable(tensor), )) except: traceback.print_exc() while True: name, kwargs = self.queue.get() if name is None: break func = getattr(self, 'summary_' + name) try: func(**kwargs) except: traceback.print_exc()
def __init__(self, args, config): self.args = args self.config = config self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.model_dir = utils.get_model_dir(config) _, self.num_parts = utils.get_dataset_mappers(config) self.limbs_index = utils.get_limbs_index(config) self.step, self.epoch, self.dnn, self.stages = self.load() self.inference = model.Inference(self.config, self.dnn, self.stages) self.inference.eval() logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in self.dnn.state_dict().values()))) if torch.cuda.is_available(): self.inference.cuda() self.height, self.width = tuple(map(int, config.get('image', 'size').split())) t = torch.zeros(1, 3, self.height, self.width).to(self.device) output = self.dnn(t) _, _, self.rows, self.cols = output.size() self.i, self.j = self.rows // 2, self.cols // 2 self.output = output[:, :, self.i, self.j] dataset = Dataset(self.height, self.width) try: workers = self.config.getint('data', 'workers') except configparser.NoOptionError: workers = multiprocessing.cpu_count() self.loader = torch.utils.data.DataLoader(dataset, batch_size=self.args.batch_size, num_workers=workers)
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) height, width = tuple(map(int, config.get('image', 'size').split())) model_dir = utils.get_model_dir(config) _, num_parts = utils.get_dataset_mappers(config) limbs_index = utils.get_limbs_index(config) path, step, epoch = utils.train.load_model(model_dir) state_dict = torch.load(path, map_location=lambda storage, loc: storage) config_channels_dnn = model.ConfigChannels(config, state_dict['dnn']) dnn = utils.parse_attr(config.get('model', 'dnn'))(config_channels_dnn) config_channels_stages = model.ConfigChannels(config, state_dict['stages'], config_channels_dnn.channels) channel_dict = model.channel_dict(num_parts, len(limbs_index)) stages = nn.Sequential(*[utils.parse_attr(s)(config_channels_stages, channel_dict, config_channels_dnn.channels, str(i)) for i, s in enumerate(config.get('model', 'stages').split())]) dnn.load_state_dict(config_channels_dnn.state_dict) stages.load_state_dict(config_channels_stages.state_dict) inference = model.Inference(config, dnn, stages) inference.eval() logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in inference.state_dict().values()))) image = torch.autograd.Variable(torch.randn(args.batch_size, 3, height, width), volatile=True) path = model_dir + '.onnx' logging.info('save ' + path) forward = inference.forward inference.forward = lambda self, *x: [[output[name] for name in 'parts, limbs'.split(', ')] for output in forward(self, *x)] torch.onnx.export(inference, image, path, export_params=True, verbose=args.verbose)
def main(): config = configparser.ConfigParser() config.read('config.ini') model_dir = utils.get_model_dir(config) dnn = utils.parse_attr(config.get('model', 'dnn'))() inference = model.Inference(dnn) inference.eval() inference.cuda() state_dict = torch.load(os.path.join(model_dir, 'latest.pth')) dnn.load_state_dict(state_dict) draw_masks = utils.visualize.DrawMasks() height, width = tuple(map(int, config.get('image', 'size').split())) trans = torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize((0.5, 0.5, 0.5), (1, 1, 1)) ]) cap = cv2.VideoCapture('2.mp4') env = {} try: while cap.isOpened(): ret, image_bgr = cap.read() if not ret: break _height, _width = image_bgr.shape[:2] scale = min(height, width) / min(_height, _width) image_resized = cv2.resize( image_bgr, (int(_width * scale), int(_height * scale))) #print (scale) #print (image_resized.size) tensor = trans(image_resized) tensor = tensor.unsqueeze(0).cuda() print(tensor.size()) feature = inference(torch.autograd.Variable( tensor, volatile=True)).contiguous() print(feature.size()) feature = torch.autograd.Variable(torch.from_numpy( np.array([[ cv2.resize(f.data.cpu().numpy(), (_width, _height)) for f in b ] for b in feature])), volatile=True) prob, pred = torch.max(F.softmax(feature, -1), -1) print(pred.size()) image_result = draw_masks(image_bgr, pred[0].data.numpy()) #cv2.imshow('estimate', image_result) cv2.waitKey(1) if 'writer' in env: env['writer'].write(image_result) else: env['writer'] = cv2.VideoWriter( '2_seg.mp4', int(cap.get(cv2.CAP_PROP_FOURCC)), int(cap.get(cv2.CAP_PROP_FPS)), (_width, _height)) #env['writer'] = cv2.VideoWriter('2_seg.mp4', 0x00000021, int(cap.get(cv2.CAP_PROP_FPS)), (_width, _height)) finally: cv2.destroyAllWindows() env['writer'].release() cap.release()
def __call__(self): with filelock.FileLock(os.path.join(self.model_dir, 'lock'), 0): try: loader = self.get_loader() logging.info('num_workers=%d' % loader.num_workers) dnn = utils.parse_attr(self.config.get('model', 'dnn'))( self.config, self.anchors, len(self.category)) inference = model.Inference(self.config, dnn, self.anchors) logging.info( humanize.naturalsize( sum(var.cpu().numpy().nbytes for var in inference.state_dict().values()))) step, epoch = self.restore(dnn) inference = ensure_model(inference) inference.train() optimizer = utils.train.get_optimizer( self.config, self.args.optimizer)(filter(lambda p: p.requires_grad, inference.parameters()), self.args.learning_rate) scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.1) for epoch in range(0 if epoch is None else epoch, self.args.epoch): scheduler.step(epoch) for data in loader if self.args.quiet else tqdm.tqdm( loader, desc='epoch=%d/%d' % (epoch, self.args.epoch)): kwargs = self.step(inference, optimizer, data) step += 1 kwargs = { **kwargs, **dict( dnn=dnn, inference=inference, optimizer=optimizer, step=step, epoch=epoch, ) } self.summary_worker('scalar', **kwargs) self.summary_worker('image', **kwargs) self.summary_worker('histogram', **kwargs) if self.timer_save(): self.save(**kwargs) if self.timer_eval(): self.eval(**kwargs) logging.info('finished') except KeyboardInterrupt: logging.warning('interrupted') self.save(**kwargs) except: traceback.print_exc() with open(os.path.join(self.model_dir, 'data.pkl'), 'wb') as f: pickle.dump(data, f) raise finally: self.stop()
def __init__(self, args, config): self.args = args self.config = config self.device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') self.cache_dir = utils.get_cache_dir(config) self.model_dir = utils.get_model_dir(config) _, self.num_parts = utils.get_dataset_mappers(config) self.limbs_index = utils.get_limbs_index(config) if args.debug is None: self.draw_cluster = utils.visualize.DrawCluster( colors=args.colors, thickness=args.thickness) else: self.draw_feature = utils.visualize.DrawFeature() s = re.search('(-?[0-9]+)([a-z]+)(-?[0-9]+)', args.debug) stage = int(s.group(1)) name = s.group(2) channel = int(s.group(3)) self.get_feature = lambda outputs: outputs[stage][name][0][channel] self.height, self.width = tuple( map(int, config.get('image', 'size').split())) if args.caffe: init_net = caffe2_pb2.NetDef() with open(os.path.join(self.model_dir, 'init_net.pb'), 'rb') as f: init_net.ParseFromString(f.read()) predict_net = caffe2_pb2.NetDef() with open(os.path.join(self.model_dir, 'predict_net.pb'), 'rb') as f: predict_net.ParseFromString(f.read()) p = workspace.Predictor(init_net, predict_net) self.inference = lambda tensor: [{ 'parts': torch.from_numpy(parts), 'limbs': torch.from_numpy(limbs) } for parts, limbs in zip( *[iter(p.run([tensor.detach().cpu().numpy()]))] * 2)] else: self.step, self.epoch, self.dnn, self.stages = self.load() self.inference = model.Inference(config, self.dnn, self.stages) self.inference.eval() if torch.cuda.is_available(): self.inference.cuda() logging.info( humanize.naturalsize( sum(var.cpu().numpy().nbytes for var in self.inference.state_dict().values()))) self.cap = self.create_cap() self.keys = set(args.keys) self.resize = transform.parse_transform( config, config.get('transform', 'resize_test')) self.transform_image = transform.get_transform( config, config.get('transform', 'image_test').split()) self.transform_tensor = transform.get_transform( config, config.get('transform', 'tensor').split())
def main(): args = make_args() config = configparser.ConfigParser() config.read('config.ini') model_dir = utils.get_model_dir(config) cache_dir = utils.get_cache_dir(config) os.makedirs(model_dir, exist_ok=True) dnn = utils.parse_attr(config.get('model', 'dnn'))() inference = model.Inference(dnn) inference.train() inference.cuda() try: state_dict = torch.load(os.path.join(model_dir, 'latest.pth')) dnn.load_state_dict(state_dict) print('latest model loaded') except: pass optimizer = torch.optim.Adam(inference.parameters(), args.learning_rate) paths = [os.path.join(cache_dir, phase + '.pkl') for phase in ['train']] dataset = utils.data.Dataset(utils.data.load_pickles(paths)) try: workers = config.getint('data', 'workers') except configparser.NoOptionError: workers = multiprocessing.cpu_count() size = tuple(map(int, config.get('data', 'size').split())) _size = tuple(map(int, config.get('data', '_size').split())) loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, num_workers=workers, collate_fn=utils.data.Collate( size, _size)) writer = SummaryWriter(os.path.join(model_dir)) step = 0 for epoch in range(args.epoch): for data in loader: data['tensor'] = data['tensor'].cuda() data['masks'] = data['masks'].cuda() #print ('the masks is: {}').format(data['masks']) tensor = torch.autograd.Variable(data['tensor']) feature = inference(tensor).contiguous() loss = model.loss(data, feature) optimizer.zero_grad() loss.backward() optimizer.step() if step % 30 == 0: writer.add_scalar('loss_total', loss, step) step += 1 try: path = os.path.join(model_dir, str(step)) + '.pth' torch.save(dnn.state_dict(), path) print(path) shutil.copy(path, os.path.join(model_dir, 'latest.pth')) except: traceback.print_exc()
def __call__(self): with filelock.FileLock(os.path.join(self.model_dir, 'lock'), 0): try: loader = self.get_loader() logging.info('num_workers=%d' % loader.num_workers) dnn = utils.parse_attr(self.config.get('model', 'dnn'))(self.config, self.anchors, len(self.category)) inference = model.Inference(self.config, dnn, self.anchors) logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in inference.state_dict().values()))) step, epoch = self.restore(dnn) if self.args.finetune: path = os.path.expanduser(os.path.expandvars(self.args.finetune)) logging.info('finetune from ' + path) self.finetune(dnn, path) inference = ensure_model(inference) inference.train() optimizer = eval(self.config.get('train', 'optimizer'))(filter(lambda p: p.requires_grad, inference.parameters()), self.args.learning_rate) try: scheduler = eval(self.config.get('train', 'scheduler'))(optimizer) except configparser.NoOptionError: scheduler = None for epoch in range(0 if epoch is None else epoch, self.args.epoch): if scheduler is not None: scheduler.step(epoch) logging.info('epoch=%d, lr=%s' % (epoch, str(scheduler.get_lr()))) for data in loader if self.args.quiet else tqdm.tqdm(loader, desc='epoch=%d/%d' % (epoch, self.args.epoch)): kwargs = self.step(inference, optimizer, data) step += 1 kwargs = {**kwargs, **dict( dnn=dnn, inference=inference, optimizer=optimizer, step=step, epoch=epoch, )} self.summary_worker('scalar', **kwargs) self.summary_worker('image', **kwargs) self.summary_worker('histogram', **kwargs) if self.timer_save(): self.save(**kwargs) if self.timer_eval(): self.eval(**kwargs) logging.info('finished') except KeyboardInterrupt: logging.warning('interrupted') self.save(**kwargs) except: traceback.print_exc() try: with open(os.path.join(self.model_dir, 'data.pkl'), 'wb') as f: pickle.dump(data, f) except UnboundLocalError: pass raise finally: self.stop()
def __init__(self, args, config): self.args = args self.config = config self.model_dir = utils.get_model_dir(config) self.cache_dir = utils.get_cache_dir(config) self.category = utils.get_category(config, self.cache_dir) self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous() logging.info('use cache directory ' + self.cache_dir) logging.info('tensorboard --logdir ' + self.model_dir) if args.delete: logging.warning('delete model directory: ' + self.model_dir) shutil.rmtree(self.model_dir, ignore_errors=True) os.makedirs(self.model_dir, exist_ok=True) with open(self.model_dir + '.ini', 'w') as f: config.write(f) self.step, self.epoch, self.dnn = self.load() self.inference = model.Inference(self.config, self.dnn, self.anchors) logging.info( humanize.naturalsize( sum(var.cpu().numpy().nbytes for var in self.inference.state_dict().values()))) if self.args.finetune: path = os.path.expanduser(os.path.expandvars(self.args.finetune)) logging.info('finetune from ' + path) self.finetune(self.dnn, path) self.inference = ensure_model(self.inference) self.inference.train() self.optimizer = eval(self.config.get('train', 'optimizer'))(filter( lambda p: p.requires_grad, self.inference.parameters()), self.args.learning_rate) self.saver = utils.train.Saver(self.model_dir, config.getint('save', 'keep')) self.timer_save = utils.train.Timer(config.getfloat('save', 'secs'), False) try: self.timer_eval = utils.train.Timer( eval(config.get('eval', 'secs')), config.getboolean('eval', 'first')) except configparser.NoOptionError: self.timer_eval = lambda: False self.summary_worker = SummaryWorker(self) self.summary_worker.start()
def __init__(self, args, config): self.args = args self.config = config self.model_dir = utils.get_model_dir(config) self.cache_dir = utils.get_cache_dir(config) self.category = utils.get_category(config, self.cache_dir) self.draw_bbox = utils.visualize.DrawBBox(self.category) self.loader = self.get_loader() self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous() self.path, self.step, self.epoch = utils.train.load_model( self.model_dir) state_dict = torch.load(self.path, map_location=lambda storage, loc: storage) dnn = utils.parse_attr(config.get('model', 'dnn'))( model.ConfigChannels(config, state_dict), self.anchors, len(self.category)) dnn.load_state_dict(state_dict) logging.info( humanize.naturalsize( sum(var.cpu().numpy().nbytes for var in dnn.state_dict().values()))) self.inference = model.Inference(config, dnn, self.anchors) self.inference.eval() if torch.cuda.is_available(): self.inference.cuda() path = self.model_dir + '.ini' if os.path.exists(path): self._config = configparser.ConfigParser() self._config.read(path) else: logging.warning('training config (%s) not found' % path) self.now = datetime.datetime.now() self.mapper = dict([ (inflection.underscore(name), member()) for name, member in inspect.getmembers( importlib.machinery.SourceFileLoader( '', self.config.get('eval', 'mapper')).load_module()) if inspect.isclass(member) ])
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) category = utils.get_category(config) anchors = torch.from_numpy(utils.get_anchors(config)).contiguous() dnn = utils.parse_attr(config.get('model', 'dnn'))(config, anchors, len(category)) inference = model.Inference(config, dnn, anchors) inference.train() optimizer = eval(config.get('train', 'optimizer'))(filter(lambda p: p.requires_grad, inference.parameters()), args.learning_rate) scheduler = eval(config.get('train', 'scheduler'))(optimizer) for epoch in range(args.epoch): scheduler.step(epoch) lr = scheduler.get_lr() print('\t'.join(map(str, [epoch] + lr)))
def main(params): num_epoch = params['num_epoch'] batch_size = params['batch_size'] lr = params['lr'] embed_type = params['embed_type'] seed = params['seed'] torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) crf = CRF.ConditionalRandomField(num_tags=5) crf.cuda() if embed_type == 'biomed_w2v': Model = model.w2v_Model(num_tags=5) else: Model = model.Model(num_tags=5) Model.cuda() optimizer = torch.optim.Adam( [param for param in crf.parameters() if param.requires_grad == True] + list(Model.parameters()), lr=lr) iter_num = 1 t_start = time.time() whole_train_set = json.load(open('data/training')) whole_train_ids = list(whole_train_set) train_set = {_id: whole_train_set[_id] for _id in whole_train_ids[:12500]} dev_set = {_id: whole_train_set[_id] for _id in whole_train_ids[12500:]} print('Loading caches...') if embed_type == 'biomed_w2v': caches = np.load('caches_%s/embedding.npy' % embed_type).item() else: caches = h5py.File('caches_%s/elmo_embeddings.hdf5' % embed_type, 'r') print('Loaded') idx2sentid = json.load(open('dict/idx2sentid_%s' % embed_type)) if embed_type == 'biomed_w2v': sentid2idx = {v: int(k) for k, v in idx2sentid.items()} else: sentid2idx = {v: k for k, v in idx2sentid.items()} log_dir = '%s_seed%d_log' % (embed_type, seed) os.system('rm -rf %s' % log_dir) os.system('mkdir %s' % log_dir) min_loss = float('inf') min_loss_iter = 0 for epoch in range(num_epoch): train_data_loader = Batching(list(train_set), batch_size, shuffle=True) for instances in train_data_loader: embedding, token_mask = readCache(caches, instances, sentid2idx, embed_type) # B x 3 x L x 1024 embedding = Model(embedding, token_mask) labels = iobesLabel( [train_set[instance] for instance in instances], embedding) loss = -crf(inputs=embedding, tags=labels, mask=token_mask) optimizer.zero_grad() loss.backward() optimizer.step() if iter_num % 25 == 0: t_elapsed = time.time() - t_start message = ('Epoch %d Iter %d TRAIN loss=%.4e elapsed=%.1f' % (epoch, iter_num, loss.detach(), t_elapsed)) print(message) with open('%s/training_log' % log_dir, 'a') as f: f.write(message + '\n') if iter_num % 200 == 0: dev_data_loader = Batching(list(dev_set), batch_size, shuffle=False) total_loss = 0 step = 0 for instances in dev_data_loader: embedding, token_mask = readCache( caches, instances, sentid2idx, embed_type) # B x 3 x L x 1024 embedding = Model(embedding, token_mask) labels = iobesLabel( [dev_set[instance] for instance in instances], embedding) loss = -crf(inputs=embedding, tags=labels, mask=token_mask).detach() total_loss += loss step += 1 average_loss = total_loss / step message = ('Epoch %d EVAL loss=%.4e' % (epoch, average_loss)) print(message) with open('%s/training_log' % log_dir, 'a') as f: f.write(message + '\n') if embed_type != 'biomed_w2v': weights = Model.weight.data.squeeze().detach().cpu().numpy( ) weights = [ np.exp(weight) / np.sum(np.exp(weights)) for weight in weights ] message = 'weights: %.4f %.4f %.4f' % ( weights[0], weights[1], weights[2]) print(message) with open('%s/training_log' % log_dir, 'a') as f: f.write(message + '\n') if average_loss <= min_loss: min_loss_iter = iter_num min_loss = average_loss torch.save(crf.state_dict(), '%s/best_crf' % log_dir) torch.save(Model.state_dict(), '%s/best_model' % log_dir) model.Inference('%s/predictions' % (log_dir), caches, sentid2idx, Model, crf, embed_type) message = 'best iter %s loss %.4e' % (min_loss_iter, min_loss) print(message) with open('%s/training_log' % log_dir, 'a') as f: f.write(message + '\n') iter_num += 1
def main(): config = configparser.ConfigParser() config.read('config.ini') model_dir = utils.get_model_dir(config) cache_dir = utils.get_cache_dir(config) dnn = utils.parse_attr(config.get('model', 'dnn'))() inference = model.Inference(dnn) inference.eval() inference.cuda() path = os.path.join(model_dir, 'latest.pth') #print (path) dnn.load_state_dict(torch.load(path)) height, width = tuple(map(int, config.get('image', 'size').split())) #print (str(height)+' '+str(width)) trans = torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize((0.5, 0.5, 0.5), (1, 1, 1)) ]) paths = [os.path.join(cache_dir, phase + '.pkl') for phase in ['test']] print(paths) draw_masks = utils.visualize.DrawMasks() results = [] for data in utils.data.load_pickles(paths): image_bgr = cv2.imread(data['path']) #print (data['path']) _height, _width = image_bgr.shape[:2] #print (str(_height)+' '+str(_width)) scale = min(height, width) / min(_height, _width) image_resized = cv2.resize(image_bgr, (int(_width * scale), int(_height * scale))) #print (str(scale)) tensor = trans(image_resized) tensor = tensor.unsqueeze(0).cuda() feature = inference(torch.autograd.Variable( tensor, volatile=True)).contiguous() #print ('features:') #print (feature.size()) feature = torch.autograd.Variable(torch.from_numpy( np.array([[ cv2.resize(f.data.cpu().numpy(), (_width, _height)) for f in b ] for b in feature])), volatile=True) #print (feature.size()) prob, pred = torch.max(F.softmax(feature, -1), -1) image_result = draw_masks(image_bgr, pred[0].data.numpy()) pred = pred.data.byte().cuda() _size = pred.size()[-2:] #print (pred.size()) _size = _size[::-1] masks = torch.from_numpy( np.array([(cv2.resize(cv2.imread(path, cv2.IMREAD_GRAYSCALE), _size) > 127).astype(np.uint8) for path in data['paths']])).unsqueeze(0).cuda() #print (masks.size()) iou = calc_iou(masks, pred) results.append(iou.cpu().numpy()) name = data['path'].split('/')[-1].split('.')[-2] #print (name) cv2.imwrite(os.path.join('./result', name + '_seg.png'), image_result) print(np.mean(results)) results = np.mean(results) with open('./record/eval.txt', 'a') as f: f.write(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) f.write('\n') f.write('the mIOU of artifact is: ' + str(results)) f.write('\n')
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) torch.manual_seed(args.seed) mapper = load_mapper(os.path.expandvars(os.path.expanduser(args.mapper))) model_dir = utils.get_model_dir(config) _, num_parts = utils.get_dataset_mappers(config) limbs_index = utils.get_limbs_index(config) height, width = tuple(map(int, config.get('image', 'size').split())) tensor = torch.randn(args.batch_size, 3, height, width) # PyTorch try: path, step, epoch = utils.train.load_model(model_dir) state_dict = torch.load(path, map_location=lambda storage, loc: storage) except (FileNotFoundError, ValueError): state_dict = {name: None for name in ('dnn', 'stages')} config_channels_dnn = model.ConfigChannels(config, state_dict['dnn']) dnn = utils.parse_attr(config.get('model', 'dnn'))(config_channels_dnn) config_channels_stages = model.ConfigChannels(config, state_dict['stages'], config_channels_dnn.channels) channel_dict = model.channel_dict(num_parts, len(limbs_index)) stages = nn.Sequential(*[ utils.parse_attr(s)(config_channels_stages, channel_dict, config_channels_dnn.channels, str(i)) for i, s in enumerate(config.get('model', 'stages').split()) ]) inference = model.Inference(config, dnn, stages) inference.eval() state_dict = inference.state_dict() # Caffe net = caffe.Net(os.path.expanduser(os.path.expandvars(args.prototxt)), os.path.expanduser(os.path.expandvars(args.caffemodel)), caffe.TEST) if args.debug: logging.info('Caffe variables') for name, blobs in net.params.items(): for i, blob in enumerate(blobs): val = blob.data print('\t'.join( map(str, [ '%s/%d' % (name, i), 'x'.join(map(str, val.shape)), utils.abs_mean(val), hashlib.md5(val.tostring()).hexdigest(), ]))) logging.info('Caffe features') input = net.blobs[args.input] input.reshape(*tensor.size()) input.data[...] = tensor.numpy() net.forward() for name, blob in net.blobs.items(): val = blob.data print('\t'.join( map(str, [ name, 'x'.join(map(str, val.shape)), utils.abs_mean(val), hashlib.md5(val.tostring()).hexdigest(), ]))) # convert saver = utils.train.Saver(model_dir, config.getint('save', 'keep')) try: for dst in state_dict: src, transform = mapper[dst] blobs = [b.data for b in net.params[src]] blob = transform(blobs) if isinstance(blob, np.ndarray): state_dict[dst] = torch.from_numpy(blob) else: state_dict[dst].fill_(blob) val = state_dict[dst].numpy() logging.info('\t'.join( list( map(str, (dst, src, val.shape, utils.abs_mean(val), hashlib.md5(val.tostring()).hexdigest()))))) inference.load_state_dict(state_dict) if args.delete: logging.warning('delete model directory: ' + model_dir) shutil.rmtree(model_dir, ignore_errors=True) saver( dict( dnn=inference.dnn.state_dict(), stages=inference.stages.state_dict(), ), 0) finally: for stage, output in enumerate( inference(torch.autograd.Variable(tensor, volatile=True))): for name, feature in output.items(): val = feature.data.numpy() print('\t'.join( map(str, [ 'stage%d/%s' % (stage, name), 'x'.join(map(str, val.shape)), utils.abs_mean(val), hashlib.md5(val.tostring()).hexdigest(), ])))
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) torch.manual_seed(args.seed) mapper = load_mapper(os.path.expandvars(os.path.expanduser(args.mapper))) model_dir = utils.get_model_dir(config) _, num_parts = utils.get_dataset_mappers(config) limbs_index = utils.get_limbs_index(config) height, width = tuple(map(int, config.get('image', 'size').split())) tensor = torch.randn(args.batch_size, 3, height, width) # PyTorch try: path, step, epoch = utils.train.load_model(model_dir) state_dict = torch.load(path, map_location=lambda storage, loc: storage) except (FileNotFoundError, ValueError): state_dict = {name: None for name in ('dnn', 'stages')} config_channels_dnn = model.ConfigChannels(config, state_dict['dnn']) dnn = utils.parse_attr(config.get('model', 'dnn'))(config_channels_dnn) config_channels_stages = model.ConfigChannels(config, state_dict['stages'], config_channels_dnn.channels) channel_dict = model.channel_dict(num_parts, len(limbs_index)) stages = nn.Sequential(*[ utils.parse_attr(s)(config_channels_stages, channel_dict, config_channels_dnn.channels, str(i)) for i, s in enumerate(config.get('model', 'stages').split()) ]) inference = model.Inference(config, dnn, stages) inference.eval() state_dict = inference.state_dict() # TensorFlow with open(os.path.expanduser(os.path.expandvars(args.path)), 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) image = ops.convert_to_tensor(np.transpose(tensor.cpu().numpy(), [0, 2, 3, 1]), name='image') tf.import_graph_def(graph_def, input_map={'image:0': image}) saver = utils.train.Saver(model_dir, config.getint('save', 'keep')) with tf.Session(config=tf.ConfigProto(device_count={ 'CPU': 1, 'GPU': 0 }, allow_soft_placement=True, log_device_placement=False)) as sess: try: for dst in state_dict: src, converter = mapper[dst] if src.isdigit(): state_dict[dst].fill_(float(src)) else: op = sess.graph.get_operation_by_name(src) t = op.values()[0] v = sess.run(t) state_dict[dst] = torch.from_numpy(converter(v)) val = state_dict[dst].numpy() print('\t'.join( list( map(str, (dst, src, val.shape, utils.abs_mean(val), hashlib.md5(val.tostring()).hexdigest()))))) inference.load_state_dict(state_dict) if args.delete: logging.warning('delete model directory: ' + model_dir) shutil.rmtree(model_dir, ignore_errors=True) saver( dict( dnn=inference.dnn.state_dict(), stages=inference.stages.state_dict(), ), 0) finally: if args.debug: for op in sess.graph.get_operations(): if op.values(): logging.info(op.values()[0]) for name in args.debug: t = sess.graph.get_tensor_by_name(name + ':0') val = sess.run(t) val = np.transpose(val, [0, 3, 1, 2]) print('\t'.join( map(str, [ name, 'x'.join(map(str, val.shape)), utils.abs_mean(val), hashlib.md5(val.tostring()).hexdigest(), ]))) _tensor = torch.autograd.Variable(tensor, volatile=True) val = dnn(_tensor).data.numpy() print('\t'.join( map(str, [ 'x'.join(map(str, val.shape)), utils.abs_mean(val), hashlib.md5(val.tostring()).hexdigest(), ]))) for stage, output in enumerate(inference(_tensor)): for name, feature in output.items(): val = feature.data.numpy() print('\t'.join( map(str, [ 'stage%d/%s' % (stage, name), 'x'.join(map(str, val.shape)), utils.abs_mean(val), hashlib.md5(val.tostring()).hexdigest(), ]))) forward = inference.forward inference.forward = lambda self, *x: list( forward(self, *x)[-1].values()) with SummaryWriter(model_dir) as writer: writer.add_graph(inference, (_tensor, ))