Exemple #1
0
 def load(self):
     try:
         path, step, epoch = utils.train.load_model(self.model_dir)
         state_dict = torch.load(path,
                                 map_location=lambda storage, loc: storage)
     except (FileNotFoundError, ValueError):
         step, epoch = 0, 0
         state_dict = {name: None for name in ('dnn', 'stages')}
     config_channels_dnn = model.ConfigChannels(self.config,
                                                state_dict['dnn'])
     dnn = utils.parse_attr(self.config.get('model',
                                            'dnn'))(config_channels_dnn)
     config_channels_stages = model.ConfigChannels(
         self.config, state_dict['stages'], config_channels_dnn.channels)
     channel_dict = model.channel_dict(self.num_parts,
                                       len(self.limbs_index))
     stages = nn.Sequential(*[
         utils.parse_attr(s)(config_channels_stages, channel_dict,
                             config_channels_dnn.channels, str(i))
         for i, s in enumerate(self.config.get('model', 'stages').split())
     ])
     if config_channels_dnn.state_dict is not None:
         dnn.load_state_dict(config_channels_dnn.state_dict)
     if config_channels_stages.state_dict is not None:
         stages.load_state_dict(config_channels_stages.state_dict)
     return step, epoch, dnn, stages
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    height, width = tuple(map(int, config.get('image', 'size').split()))
    model_dir = utils.get_model_dir(config)
    _, num_parts = utils.get_dataset_mappers(config)
    limbs_index = utils.get_limbs_index(config)
    path, step, epoch = utils.train.load_model(model_dir)
    state_dict = torch.load(path, map_location=lambda storage, loc: storage)
    config_channels_dnn = model.ConfigChannels(config, state_dict['dnn'])
    dnn = utils.parse_attr(config.get('model', 'dnn'))(config_channels_dnn)
    config_channels_stages = model.ConfigChannels(config, state_dict['stages'], config_channels_dnn.channels)
    channel_dict = model.channel_dict(num_parts, len(limbs_index))
    stages = nn.Sequential(*[utils.parse_attr(s)(config_channels_stages, channel_dict, config_channels_dnn.channels, str(i)) for i, s in enumerate(config.get('model', 'stages').split())])
    dnn.load_state_dict(config_channels_dnn.state_dict)
    stages.load_state_dict(config_channels_stages.state_dict)
    inference = model.Inference(config, dnn, stages)
    inference.eval()
    logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in inference.state_dict().values())))
    image = torch.autograd.Variable(torch.randn(args.batch_size, 3, height, width), volatile=True)
    path = model_dir + '.onnx'
    logging.info('save ' + path)
    forward = inference.forward
    inference.forward = lambda self, *x: [[output[name] for name in 'parts, limbs'.split(', ')] for output in forward(self, *x)]
    torch.onnx.export(inference, image, path, export_params=True, verbose=args.verbose)
Exemple #3
0
 def load(self):
     try:
         path, step, epoch = utils.train.load_model(self.model_dir)
         state_dict = torch.load(path,
                                 map_location=lambda storage, loc: storage)
         config_channels = model.ConfigChannels(self.config, state_dict)
     except ValueError:
         step, epoch = 0, 0
         config_channels = model.ConfigChannels(self.config)
     return step, epoch, config_channels
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    model_dir = utils.get_model_dir(config)
    category = utils.get_category(config)
    anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
    path, step, epoch = utils.train.load_model(model_dir)
    state_dict = torch.load(path, map_location=lambda storage, loc: storage)
    _model = utils.parse_attr(config.get('model', 'dnn'))
    dnn = _model(model.ConfigChannels(config, state_dict), anchors,
                 len(category))
    logging.info(
        humanize.naturalsize(
            sum(var.cpu().numpy().nbytes
                for var in dnn.state_dict().values())))
    dnn.load_state_dict(state_dict)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    image = torch.autograd.Variable(
        torch.randn(args.batch_size, 3, height, width))
    output = dnn(image)
    state_dict = dnn.state_dict()
    d = utils.dense(state_dict[args.name])
    keep = torch.LongTensor(np.argsort(d)[:int(len(d) * args.keep)])
    modifier = utils.channel.Modifier(
        args.name,
        state_dict,
        dnn,
        lambda name, var: var[keep],
        lambda name, var, mapper: var[mapper(keep, len(d))],
        debug=args.debug,
    )
    modifier(output.grad_fn)
    if args.debug:
        path = modifier.dot.view(
            '%s.%s.gv' % (os.path.basename(model_dir),
                          os.path.basename(os.path.splitext(__file__)[0])),
            os.path.dirname(model_dir))
        logging.info(path)
    assert len(keep) == len(state_dict[args.name])
    dnn = _model(model.ConfigChannels(config, state_dict), anchors,
                 len(category))
    dnn.load_state_dict(state_dict)
    dnn(image)
    if not args.debug:
        torch.save(state_dict, path)
Exemple #5
0
 def __init__(self, args, config):
     self.args = args
     self.config = config
     self.model_dir = utils.get_model_dir(config)
     self.category = utils.get_category(config)
     self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
     self.dnn = utils.parse_attr(config.get('model', 'dnn'))(
         model.ConfigChannels(config), self.anchors, len(self.category))
     self.dnn.eval()
     logging.info(
         humanize.naturalsize(
             sum(var.cpu().numpy().nbytes
                 for var in self.dnn.state_dict().values())))
     if torch.cuda.is_available():
         self.dnn.cuda()
     self.height, self.width = tuple(
         map(int,
             config.get('image', 'size').split()))
     output = self.dnn(
         torch.autograd.Variable(utils.ensure_device(
             torch.zeros(1, 3, self.height, self.width)),
                                 volatile=True))
     _, _, self.rows, self.cols = output.size()
     self.i, self.j = self.rows // 2, self.cols // 2
     self.output = output[:, :, self.i, self.j]
     dataset = Dataset(self.height, self.width)
     try:
         workers = self.config.getint('data', 'workers')
     except configparser.NoOptionError:
         workers = multiprocessing.cpu_count()
     self.loader = torch.utils.data.DataLoader(
         dataset, batch_size=self.args.batch_size, num_workers=workers)
 def __init__(self, args, config):
     self.args = args
     self.config = config
     self.cache_dir = utils.get_cache_dir(config)
     self.model_dir = utils.get_model_dir(config)
     self.category = utils.get_category(config, self.cache_dir if os.path.exists(self.cache_dir) else None)
     self.draw_bbox = utils.visualize.DrawBBox(self.category, colors=args.colors, thickness=args.thickness)
     self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
     self.height, self.width = tuple(map(int, config.get('image', 'size').split()))
     self.path, self.step, self.epoch = utils.train.load_model(self.model_dir)
     state_dict = torch.load(self.path, map_location=lambda storage, loc: storage)
     self.dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(config, state_dict), self.anchors, len(self.category))
     self.dnn.load_state_dict(state_dict)
     self.inference = model.Inference(config, self.dnn, self.anchors)
     self.inference.eval()
     if torch.cuda.is_available():
         self.inference.cuda()
     logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in self.inference.state_dict().values())))
     self.create_cap()
     self.create_cap_size()
     self.writer = self.create_writer()
     self.keys = set(args.keys)
     self.resize = transform.parse_transform(config, config.get('transform', 'resize_test'))
     self.transform_image = transform.get_transform(config, config.get('transform', 'image_test').split())
     self.transform_tensor = transform.get_transform(config, config.get('transform', 'tensor').split())
Exemple #7
0
 def load(self):
     try:
         path, step, epoch = utils.train.load_model(self.model_dir)
         state_dict = torch.load(path,
                                 map_location=lambda storage, loc: storage)
         config_channels = model.ConfigChannels(self.config, state_dict)
     except ValueError:
         step, epoch = 0, 0
         config_channels = model.ConfigChannels(self.config)
     dnn = utils.parse_attr(self.config.get('model',
                                            'dnn'))(config_channels,
                                                    self.anchors,
                                                    len(self.category))
     if config_channels.state_dict is not None:
         dnn.load_state_dict(config_channels.state_dict)
     return step, epoch, dnn
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    cache_dir = utils.get_cache_dir(config)
    model_dir = utils.get_model_dir(config)
    category = utils.get_category(
        config, cache_dir if os.path.exists(cache_dir) else None)
    anchors = utils.get_anchors(config)
    anchors = torch.from_numpy(anchors).contiguous()
    path, step, epoch = utils.train.load_model(model_dir)
    state_dict = torch.load(path, map_location=lambda storage, loc: storage)
    dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(
        config, state_dict), anchors, len(category))
    dnn.load_state_dict(state_dict)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    resize = transform.parse_transform(config,
                                       config.get('transform', 'resize_test'))
    transform_image = transform.get_transform(
        config,
        config.get('transform', 'image_test').split())
    transform_tensor = transform.get_transform(
        config,
        config.get('transform', 'tensor').split())
    # load image
    image_bgr = cv2.imread('image.jpg')
    image_resized = resize(image_bgr, height, width)
    image = transform_image(image_resized)
    tensor = transform_tensor(image).unsqueeze(0)
    # Checksum
    for key, var in dnn.state_dict().items():
        a = var.cpu().numpy()
        print('\t'.join(
            map(str, [
                key, a.shape,
                utils.abs_mean(a),
                hashlib.md5(a.tostring()).hexdigest()
            ])))
    output = dnn(torch.autograd.Variable(tensor, volatile=True)).data
    for key, a in [
        ('image_bgr', image_bgr),
        ('image_resized', image_resized),
        ('tensor', tensor.cpu().numpy()),
        ('output', output.cpu().numpy()),
    ]:
        print('\t'.join(
            map(str, [
                key, a.shape,
                utils.abs_mean(a),
                hashlib.md5(a.tostring()).hexdigest()
            ])))
Exemple #9
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    cache_dir = utils.get_cache_dir(config)
    model_dir = utils.get_model_dir(config)
    category = utils.get_category(config, cache_dir if os.path.exists(cache_dir) else None)
    anchors = utils.get_anchors(config)
    anchors = torch.from_numpy(anchors).contiguous()
    dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(config), anchors, len(category))
    dnn.eval()
    logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in dnn.state_dict().values())))
    state_dict = dnn.state_dict()
    grouped_dict = group_state(state_dict)
    try:
        layers = []
        with open(os.path.expanduser(os.path.expandvars(args.file)), 'rb') as f:
            major, minor, revision, seen = struct.unpack('4i', f.read(16))
            logging.info('major=%d, minor=%d, revision=%d, seen=%d' % (major, minor, revision, seen))
            total = 0
            filesize = os.fstat(f.fileno()).st_size
            for layer in grouped_dict:
                group = grouped_dict[layer]
                for suffix in ['conv.bias', 'bn.bias', 'bn.weight', 'bn.running_mean', 'bn.running_var', 'conv.weight']:
                    if suffix in group:
                        var = group[suffix]
                        size = var.size()
                        cnt = np.multiply.reduce(size)
                        total += cnt
                        key = layer + '.' + suffix
                        val = np.array(struct.unpack('%df' % cnt, f.read(cnt * 4)), np.float32)
                        val = np.reshape(val, size)
                        remaining = filesize - f.tell()
                        logging.info('%s.%s: %s=%f (%s), remaining=%d' % (layer, suffix, 'x'.join(list(map(str, size))), utils.abs_mean(val), hashlib.md5(val.tostring()).hexdigest(), remaining))
                        layers.append([key, torch.from_numpy(val)])
                logging.info('%d parameters assigned' % total)
        layers[-1][1] = transpose_weight(layers[-1][1], len(anchors))
        layers[-2][1] = transpose_bias(layers[-2][1], len(anchors))
    finally:
        if remaining > 0:
            logging.warning('%d bytes remaining' % remaining)
        state_dict = collections.OrderedDict(layers)
        if args.delete:
            logging.warning('delete model directory: ' + model_dir)
            shutil.rmtree(model_dir, ignore_errors=True)
        saver = utils.train.Saver(model_dir, config.getint('save', 'keep'), logger=None)
        path = saver(state_dict, 0, 0) + saver.ext
        if args.copy is not None:
            _path = os.path.expandvars(os.path.expanduser(args.copy))
            logging.info('copy %s to %s' % (path, _path))
            shutil.copy(path, _path)
Exemple #10
0
 def load(self):
     path, step, epoch = utils.train.load_model(self.model_dir)
     state_dict = torch.load(path,
                             map_location=lambda storage, loc: storage)
     config_channels_dnn = model.ConfigChannels(self.config,
                                                state_dict['dnn'])
     dnn = utils.parse_attr(self.config.get('model',
                                            'dnn'))(config_channels_dnn)
     config_channels_stages = model.ConfigChannels(
         self.config, state_dict['stages'], config_channels_dnn.channels)
     channel_dict = model.channel_dict(self.num_parts,
                                       len(self.limbs_index))
     stages = nn.Sequential(*[
         utils.parse_attr(s)(config_channels_stages, channel_dict,
                             config_channels_dnn.channels, str(i))
         for i, s in enumerate(self.config.get('model', 'stages').split())
     ])
     dnn.load_state_dict(config_channels_dnn.state_dict)
     stages.load_state_dict(config_channels_stages.state_dict)
     return step, epoch, dnn, stages
Exemple #11
0
 def __init__(self, args, config):
     self.args = args
     self.config = config
     self.model_dir = utils.get_model_dir(config)
     self.cache_dir = utils.get_cache_dir(config)
     self.category = utils.get_category(config, self.cache_dir)
     self.draw_bbox = utils.visualize.DrawBBox(self.category)
     self.loader = self.get_loader()
     self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
     self.path, self.step, self.epoch = utils.train.load_model(
         self.model_dir)
     state_dict = torch.load(self.path,
                             map_location=lambda storage, loc: storage)
     dnn = utils.parse_attr(config.get('model', 'dnn'))(
         model.ConfigChannels(config, state_dict), self.anchors,
         len(self.category))
     dnn.load_state_dict(state_dict)
     logging.info(
         humanize.naturalsize(
             sum(var.cpu().numpy().nbytes
                 for var in dnn.state_dict().values())))
     self.inference = model.Inference(config, dnn, self.anchors)
     self.inference.eval()
     if torch.cuda.is_available():
         self.inference.cuda()
     path = self.model_dir + '.ini'
     if os.path.exists(path):
         self._config = configparser.ConfigParser()
         self._config.read(path)
     else:
         logging.warning('training config (%s) not found' % path)
     self.now = datetime.datetime.now()
     self.mapper = dict([
         (inflection.underscore(name), member())
         for name, member in inspect.getmembers(
             importlib.machinery.SourceFileLoader(
                 '', self.config.get('eval', 'mapper')).load_module())
         if inspect.isclass(member)
     ])
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    model_dir = utils.get_model_dir(config)
    category = utils.get_category(config)
    anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
    try:
        path, step, epoch = utils.train.load_model(model_dir)
        state_dict = torch.load(path,
                                map_location=lambda storage, loc: storage)
    except (FileNotFoundError, ValueError):
        logging.warning('model cannot be loaded')
        state_dict = None
    dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(
        config, state_dict), anchors, len(category))
    logging.info(
        humanize.naturalsize(
            sum(var.cpu().numpy().nbytes
                for var in dnn.state_dict().values())))
    if state_dict is not None:
        dnn.load_state_dict(state_dict)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    image = torch.autograd.Variable(
        torch.randn(args.batch_size, 3, height, width))
    output = dnn(image)
    state_dict = dnn.state_dict()
    graph = utils.visualize.Graph(config, state_dict)
    graph(output.grad_fn)
    diff = [key for key in state_dict if key not in graph.drawn]
    if diff:
        logging.warning('variables not shown: ' + str(diff))
    path = graph.dot.view(
        os.path.basename(model_dir) + '.gv', os.path.dirname(model_dir))
    logging.info(path)
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    model_dir = utils.get_model_dir(config)
    category = utils.get_category(config)
    anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
    path, step, epoch = utils.train.load_model(model_dir)
    state_dict = torch.load(path, map_location=lambda storage, loc: storage)
    dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(
        config, state_dict), anchors, len(category))
    logging.info(
        humanize.naturalsize(
            sum(var.cpu().numpy().nbytes
                for var in dnn.state_dict().values())))
    dnn.load_state_dict(state_dict)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    image = torch.autograd.Variable(
        torch.randn(args.batch_size, 3, height, width))
    output = dnn(image)
    state_dict = dnn.state_dict()
    closure = utils.walk.Closure(args.name, state_dict,
                                 type(dnn).scope, args.debug)
    closure(output.grad_fn)
    d = utils.dense(state_dict[args.name])
    channels = torch.LongTensor(np.argsort(d)[int(len(d) * args.remove):])
    utils.walk.prune(closure, channels)
    if args.debug:
        path = closure.dot.view(
            os.path.basename(model_dir) + '.gv', os.path.dirname(model_dir))
        logging.info(path)
    else:
        torch.save(state_dict, path)
Exemple #14
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    category = utils.get_category(config)
    anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
    dnn = utils.parse_attr(config.get('model',
                                      'dnn'))(model.ConfigChannels(config),
                                              anchors, len(category))
    inference = model.Inference(config, dnn, anchors)
    inference.train()
    optimizer = eval(config.get('train',
                                'optimizer'))(filter(lambda p: p.requires_grad,
                                                     inference.parameters()),
                                              args.learning_rate)
    scheduler = eval(config.get('train', 'scheduler'))(optimizer)
    for epoch in range(args.epoch):
        scheduler.step(epoch)
        lr = scheduler.get_lr()
        print('\t'.join(map(str, [epoch] + lr)))
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    height, width = tuple(map(int, config.get('image', 'size').split()))
    cache_dir = utils.get_cache_dir(config)
    model_dir = utils.get_model_dir(config)
    category = utils.get_category(
        config, cache_dir if os.path.exists(cache_dir) else None)
    anchors = utils.get_anchors(config)
    anchors = torch.from_numpy(anchors).contiguous()
    path, step, epoch = utils.train.load_model(model_dir)
    state_dict = torch.load(path, map_location=lambda storage, loc: storage)
    dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(
        config, state_dict), anchors, len(category))
    inference = model.Inference(config, dnn, anchors)
    inference.eval()
    logging.info(
        humanize.naturalsize(
            sum(var.cpu().numpy().nbytes
                for var in inference.state_dict().values())))
    dnn.load_state_dict(state_dict)
    image = torch.autograd.Variable(torch.randn(args.batch_size, 3, height,
                                                width),
                                    volatile=True)
    path = model_dir + '.onnx'
    logging.info('save ' + path)
    torch.onnx.export(dnn,
                      image,
                      path,
                      export_params=True,
                      verbose=args.verbose)  # PyTorch's bug
Exemple #16
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    cache_dir = utils.get_cache_dir(config)
    _, num_parts = utils.get_dataset_mappers(config)
    limbs_index = utils.get_limbs_index(config)
    dnn = utils.parse_attr(config.get('model', 'dnn'))(
        model.ConfigChannels(config)).to(device)
    logging.info(
        humanize.naturalsize(
            sum(var.cpu().numpy().nbytes
                for var in dnn.state_dict().values())))
    size = tuple(map(int, config.get('image', 'size').split()))
    draw_points = utils.visualize.DrawPoints(limbs_index,
                                             colors=config.get(
                                                 'draw_points',
                                                 'colors').split())
    _draw_points = utils.visualize.DrawPoints(limbs_index, thickness=1)
    draw_bbox = utils.visualize.DrawBBox()
    paths = [os.path.join(cache_dir, phase + '.pkl') for phase in args.phase]
    dataset = utils.data.Dataset(
        config,
        utils.data.load_pickles(paths),
        transform=transform.augmentation.get_transform(
            config,
            config.get('transform', 'augmentation').split()),
        shuffle=config.getboolean('data', 'shuffle'),
    )
    logging.info('num_examples=%d' % len(dataset))
    try:
        workers = config.getint('data', 'workers')
    except configparser.NoOptionError:
        workers = multiprocessing.cpu_count()
    collate_fn = utils.data.Collate(
        config,
        transform.parse_transform(config,
                                  config.get('transform', 'resize_train')),
        [size],
        [dnn(torch.randn(1, 3, *size).to(device)).size()[-2:]],
        maintain=config.getint('data', 'maintain'),
        transform_image=transform.get_transform(
            config,
            config.get('transform', 'image_train').split()),
    )
    loader = torch.utils.data.DataLoader(dataset,
                                         shuffle=True,
                                         num_workers=workers,
                                         collate_fn=collate_fn)
    for data in loader:
        path, size, image, mask, keypoints, yx_min, yx_max, parts, limbs, index = (
            t.numpy() if hasattr(t, 'numpy') else t for t in
            (data[key] for key in
             'path, size, image, mask, keypoints, yx_min, yx_max, parts, limbs, index'
             .split(', ')))
        for path, size, image, mask, keypoints, yx_min, yx_max, parts, limbs, index in zip(
                *[
                    path, size, image, mask, keypoints, yx_min, yx_max, parts,
                    limbs, index
                ]):
            logging.info(path + ': ' + 'x'.join(map(str, size)))
            image = utils.visualize.draw_mask(image, mask, 1)
            size = yx_max - yx_min
            target = np.logical_and(*[
                np.squeeze(a, -1) > 0
                for a in np.split(size, size.shape[-1], -1)
            ])
            keypoints, yx_min, yx_max = (a[target]
                                         for a in (keypoints, yx_min, yx_max))
            for i, points in enumerate(keypoints):
                if i == index:
                    image = draw_points(image, points)
                else:
                    image = _draw_points(image, points)
            image = draw_bbox(image, yx_min.astype(np.int),
                              yx_max.astype(np.int))
            dialog = Visualizer('parts', image, parts)
            dialog.exec()
            dialog = Visualizer('limbs', image, limbs)
            dialog.exec()
Exemple #17
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    torch.manual_seed(args.seed)
    mapper = load_mapper(os.path.expandvars(os.path.expanduser(args.mapper)))
    model_dir = utils.get_model_dir(config)
    _, num_parts = utils.get_dataset_mappers(config)
    limbs_index = utils.get_limbs_index(config)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    tensor = torch.randn(args.batch_size, 3, height, width)
    # PyTorch
    try:
        path, step, epoch = utils.train.load_model(model_dir)
        state_dict = torch.load(path,
                                map_location=lambda storage, loc: storage)
    except (FileNotFoundError, ValueError):
        state_dict = {name: None for name in ('dnn', 'stages')}
    config_channels_dnn = model.ConfigChannels(config, state_dict['dnn'])
    dnn = utils.parse_attr(config.get('model', 'dnn'))(config_channels_dnn)
    config_channels_stages = model.ConfigChannels(config, state_dict['stages'],
                                                  config_channels_dnn.channels)
    channel_dict = model.channel_dict(num_parts, len(limbs_index))
    stages = nn.Sequential(*[
        utils.parse_attr(s)(config_channels_stages, channel_dict,
                            config_channels_dnn.channels, str(i))
        for i, s in enumerate(config.get('model', 'stages').split())
    ])
    inference = model.Inference(config, dnn, stages)
    inference.eval()
    state_dict = inference.state_dict()
    # TensorFlow
    with open(os.path.expanduser(os.path.expandvars(args.path)), 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
    image = ops.convert_to_tensor(np.transpose(tensor.cpu().numpy(),
                                               [0, 2, 3, 1]),
                                  name='image')
    tf.import_graph_def(graph_def, input_map={'image:0': image})
    saver = utils.train.Saver(model_dir, config.getint('save', 'keep'))
    with tf.Session(config=tf.ConfigProto(device_count={
            'CPU': 1,
            'GPU': 0
    },
                                          allow_soft_placement=True,
                                          log_device_placement=False)) as sess:
        try:
            for dst in state_dict:
                src, converter = mapper[dst]
                if src.isdigit():
                    state_dict[dst].fill_(float(src))
                else:
                    op = sess.graph.get_operation_by_name(src)
                    t = op.values()[0]
                    v = sess.run(t)
                    state_dict[dst] = torch.from_numpy(converter(v))
                val = state_dict[dst].numpy()
                print('\t'.join(
                    list(
                        map(str, (dst, src, val.shape, utils.abs_mean(val),
                                  hashlib.md5(val.tostring()).hexdigest())))))
            inference.load_state_dict(state_dict)
            if args.delete:
                logging.warning('delete model directory: ' + model_dir)
                shutil.rmtree(model_dir, ignore_errors=True)
            saver(
                dict(
                    dnn=inference.dnn.state_dict(),
                    stages=inference.stages.state_dict(),
                ), 0)
        finally:
            if args.debug:
                for op in sess.graph.get_operations():
                    if op.values():
                        logging.info(op.values()[0])
                for name in args.debug:
                    t = sess.graph.get_tensor_by_name(name + ':0')
                    val = sess.run(t)
                    val = np.transpose(val, [0, 3, 1, 2])
                    print('\t'.join(
                        map(str, [
                            name,
                            'x'.join(map(str, val.shape)),
                            utils.abs_mean(val),
                            hashlib.md5(val.tostring()).hexdigest(),
                        ])))
            _tensor = torch.autograd.Variable(tensor, volatile=True)
            val = dnn(_tensor).data.numpy()
            print('\t'.join(
                map(str, [
                    'x'.join(map(str, val.shape)),
                    utils.abs_mean(val),
                    hashlib.md5(val.tostring()).hexdigest(),
                ])))
            for stage, output in enumerate(inference(_tensor)):
                for name, feature in output.items():
                    val = feature.data.numpy()
                    print('\t'.join(
                        map(str, [
                            'stage%d/%s' % (stage, name),
                            'x'.join(map(str, val.shape)),
                            utils.abs_mean(val),
                            hashlib.md5(val.tostring()).hexdigest(),
                        ])))
            forward = inference.forward
            inference.forward = lambda self, *x: list(
                forward(self, *x)[-1].values())
            with SummaryWriter(model_dir) as writer:
                writer.add_graph(inference, (_tensor, ))
Exemple #18
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    torch.manual_seed(args.seed)
    mapper = load_mapper(os.path.expandvars(os.path.expanduser(args.mapper)))
    model_dir = utils.get_model_dir(config)
    _, num_parts = utils.get_dataset_mappers(config)
    limbs_index = utils.get_limbs_index(config)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    tensor = torch.randn(args.batch_size, 3, height, width)
    # PyTorch
    try:
        path, step, epoch = utils.train.load_model(model_dir)
        state_dict = torch.load(path,
                                map_location=lambda storage, loc: storage)
    except (FileNotFoundError, ValueError):
        state_dict = {name: None for name in ('dnn', 'stages')}
    config_channels_dnn = model.ConfigChannels(config, state_dict['dnn'])
    dnn = utils.parse_attr(config.get('model', 'dnn'))(config_channels_dnn)
    config_channels_stages = model.ConfigChannels(config, state_dict['stages'],
                                                  config_channels_dnn.channels)
    channel_dict = model.channel_dict(num_parts, len(limbs_index))
    stages = nn.Sequential(*[
        utils.parse_attr(s)(config_channels_stages, channel_dict,
                            config_channels_dnn.channels, str(i))
        for i, s in enumerate(config.get('model', 'stages').split())
    ])
    inference = model.Inference(config, dnn, stages)
    inference.eval()
    state_dict = inference.state_dict()
    # Caffe
    net = caffe.Net(os.path.expanduser(os.path.expandvars(args.prototxt)),
                    os.path.expanduser(os.path.expandvars(args.caffemodel)),
                    caffe.TEST)
    if args.debug:
        logging.info('Caffe variables')
        for name, blobs in net.params.items():
            for i, blob in enumerate(blobs):
                val = blob.data
                print('\t'.join(
                    map(str, [
                        '%s/%d' % (name, i),
                        'x'.join(map(str, val.shape)),
                        utils.abs_mean(val),
                        hashlib.md5(val.tostring()).hexdigest(),
                    ])))
        logging.info('Caffe features')
        input = net.blobs[args.input]
        input.reshape(*tensor.size())
        input.data[...] = tensor.numpy()
        net.forward()
        for name, blob in net.blobs.items():
            val = blob.data
            print('\t'.join(
                map(str, [
                    name,
                    'x'.join(map(str, val.shape)),
                    utils.abs_mean(val),
                    hashlib.md5(val.tostring()).hexdigest(),
                ])))
    # convert
    saver = utils.train.Saver(model_dir, config.getint('save', 'keep'))
    try:
        for dst in state_dict:
            src, transform = mapper[dst]
            blobs = [b.data for b in net.params[src]]
            blob = transform(blobs)
            if isinstance(blob, np.ndarray):
                state_dict[dst] = torch.from_numpy(blob)
            else:
                state_dict[dst].fill_(blob)
            val = state_dict[dst].numpy()
            logging.info('\t'.join(
                list(
                    map(str, (dst, src, val.shape, utils.abs_mean(val),
                              hashlib.md5(val.tostring()).hexdigest())))))
        inference.load_state_dict(state_dict)
        if args.delete:
            logging.warning('delete model directory: ' + model_dir)
            shutil.rmtree(model_dir, ignore_errors=True)
        saver(
            dict(
                dnn=inference.dnn.state_dict(),
                stages=inference.stages.state_dict(),
            ), 0)
    finally:
        for stage, output in enumerate(
                inference(torch.autograd.Variable(tensor, volatile=True))):
            for name, feature in output.items():
                val = feature.data.numpy()
                print('\t'.join(
                    map(str, [
                        'stage%d/%s' % (stage, name),
                        'x'.join(map(str, val.shape)),
                        utils.abs_mean(val),
                        hashlib.md5(val.tostring()).hexdigest(),
                    ])))
Exemple #19
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    cache_dir = utils.get_cache_dir(config)
    _, num_parts = utils.get_dataset_mappers(config)
    limbs_index = utils.get_limbs_index(config)
    dnn = utils.parse_attr(config.get('model', 'dnn'))(
        model.ConfigChannels(config)).to(device)
    draw_points = utils.visualize.DrawPoints(limbs_index,
                                             colors=config.get(
                                                 'draw_points',
                                                 'colors').split())
    _draw_points = utils.visualize.DrawPoints(limbs_index, thickness=1)
    draw_bbox = utils.visualize.DrawBBox()
    batch_size = args.rows * args.cols
    paths = [os.path.join(cache_dir, phase + '.pkl') for phase in args.phase]
    dataset = utils.data.Dataset(
        config,
        utils.data.load_pickles(paths),
        transform=transform.augmentation.get_transform(
            config,
            config.get('transform', 'augmentation').split()),
        shuffle=config.getboolean('data', 'shuffle'),
    )
    logging.info('num_examples=%d' % len(dataset))
    try:
        workers = config.getint('data', 'workers')
    except configparser.NoOptionError:
        workers = multiprocessing.cpu_count()
    sizes = utils.train.load_sizes(config)
    feature_sizes = [
        dnn(torch.randn(1, 3, *size).to(device)).size()[-2:] for size in sizes
    ]
    collate_fn = utils.data.Collate(
        config,
        transform.parse_transform(config,
                                  config.get('transform', 'resize_train')),
        sizes,
        feature_sizes,
        maintain=config.getint('data', 'maintain'),
        transform_image=transform.get_transform(
            config,
            config.get('transform', 'image_train').split()),
    )
    loader = torch.utils.data.DataLoader(dataset,
                                         batch_size=batch_size,
                                         shuffle=True,
                                         num_workers=workers,
                                         collate_fn=collate_fn)
    for data in loader:
        path, size, image, mask, keypoints, yx_min, yx_max, index = (
            t.numpy() if hasattr(t, 'numpy') else t for t in (
                data[key] for key in
                'path, size, image, mask, keypoints, yx_min, yx_max, index'.
                split(', ')))
        fig, axes = plt.subplots(args.rows, args.cols)
        axes = axes.flat if batch_size > 1 else [axes]
        for ax, path, size, image, mask, keypoints, yx_min, yx_max, index in zip(
                *
            [axes, path, size, image, mask, keypoints, yx_min, yx_max, index]):
            logging.info(path + ': ' + 'x'.join(map(str, size)))
            image = utils.visualize.draw_mask(image, mask, 1)
            size = yx_max - yx_min
            target = np.logical_and(*[
                np.squeeze(a, -1) > 0
                for a in np.split(size, size.shape[-1], -1)
            ])
            keypoints, yx_min, yx_max = (a[target]
                                         for a in (keypoints, yx_min, yx_max))
            for i, points in enumerate(keypoints):
                if i == index:
                    image = draw_points(image, points)
                else:
                    image = _draw_points(image, points)
            image = draw_bbox(image, yx_min.astype(np.int),
                              yx_max.astype(np.int))
            ax.imshow(image)
            ax.set_xticks([])
            ax.set_yticks([])
        fig.tight_layout()
        mng = plt.get_current_fig_manager()
        mng.resize(*mng.window.maxsize())
        plt.show()