Exemple #1
0
def get_image_with_bbox(attrs):
    images = parse_file(config.get('deepfashion', 'attributes_file'),
                        val_type=int, key_item_id=None, validate_fields=False)
    attrs = parse_attr(attrs, _PREDEFINED_ATTR)
    filtered = filter_items(images, attrs)

    image_files = append_path(config.get('deepfashion', 'image_dir'), filtered, key='image_name')
    boxes = bbox(filtered)

    return image_files, boxes
Exemple #2
0
def parse_transform(config, method):
    if isinstance(method, str):
        attr = utils.parse_attr(method)
        sig = inspect.signature(attr)
        if len(sig.parameters) == 1:
            return attr(config)
        else:
            return attr()
    else:
        return method
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    cache_dir = utils.get_cache_dir(config)
    model_dir = utils.get_model_dir(config)
    category = utils.get_category(
        config, cache_dir if os.path.exists(cache_dir) else None)
    anchors = utils.get_anchors(config)
    anchors = torch.from_numpy(anchors).contiguous()
    path, step, epoch = utils.train.load_model(model_dir)
    state_dict = torch.load(path, map_location=lambda storage, loc: storage)
    dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(
        config, state_dict), anchors, len(category))
    dnn.load_state_dict(state_dict)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    resize = transform.parse_transform(config,
                                       config.get('transform', 'resize_test'))
    transform_image = transform.get_transform(
        config,
        config.get('transform', 'image_test').split())
    transform_tensor = transform.get_transform(
        config,
        config.get('transform', 'tensor').split())
    # load image
    image_bgr = cv2.imread('image.jpg')
    image_resized = resize(image_bgr, height, width)
    image = transform_image(image_resized)
    tensor = transform_tensor(image).unsqueeze(0)
    # Checksum
    for key, var in dnn.state_dict().items():
        a = var.cpu().numpy()
        print('\t'.join(
            map(str, [
                key, a.shape,
                utils.abs_mean(a),
                hashlib.md5(a.tostring()).hexdigest()
            ])))
    output = dnn(torch.autograd.Variable(tensor, volatile=True)).data
    for key, a in [
        ('image_bgr', image_bgr),
        ('image_resized', image_resized),
        ('tensor', tensor.cpu().numpy()),
        ('output', output.cpu().numpy()),
    ]:
        print('\t'.join(
            map(str, [
                key, a.shape,
                utils.abs_mean(a),
                hashlib.md5(a.tostring()).hexdigest()
            ])))
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    model_dir = utils.get_model_dir(config)
    category = utils.get_category(config)
    anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
    path, step, epoch = utils.train.load_model(model_dir)
    state_dict = torch.load(path, map_location=lambda storage, loc: storage)
    _model = utils.parse_attr(config.get('model', 'dnn'))
    dnn = _model(model.ConfigChannels(config, state_dict), anchors,
                 len(category))
    logging.info(
        humanize.naturalsize(
            sum(var.cpu().numpy().nbytes
                for var in dnn.state_dict().values())))
    dnn.load_state_dict(state_dict)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    image = torch.autograd.Variable(
        torch.randn(args.batch_size, 3, height, width))
    output = dnn(image)
    state_dict = dnn.state_dict()
    d = utils.dense(state_dict[args.name])
    keep = torch.LongTensor(np.argsort(d)[:int(len(d) * args.keep)])
    modifier = utils.channel.Modifier(
        args.name,
        state_dict,
        dnn,
        lambda name, var: var[keep],
        lambda name, var, mapper: var[mapper(keep, len(d))],
        debug=args.debug,
    )
    modifier(output.grad_fn)
    if args.debug:
        path = modifier.dot.view(
            '%s.%s.gv' % (os.path.basename(model_dir),
                          os.path.basename(os.path.splitext(__file__)[0])),
            os.path.dirname(model_dir))
        logging.info(path)
    assert len(keep) == len(state_dict[args.name])
    dnn = _model(model.ConfigChannels(config, state_dict), anchors,
                 len(category))
    dnn.load_state_dict(state_dict)
    dnn(image)
    if not args.debug:
        torch.save(state_dict, path)
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    model_dir = utils.get_model_dir(config)
    category = utils.get_category(config)
    anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
    try:
        path, step, epoch = utils.train.load_model(model_dir)
        state_dict = torch.load(path,
                                map_location=lambda storage, loc: storage)
    except (FileNotFoundError, ValueError):
        logging.warning('model cannot be loaded')
        state_dict = None
    dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(
        config, state_dict), anchors, len(category))
    logging.info(
        humanize.naturalsize(
            sum(var.cpu().numpy().nbytes
                for var in dnn.state_dict().values())))
    if state_dict is not None:
        dnn.load_state_dict(state_dict)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    image = torch.autograd.Variable(
        torch.randn(args.batch_size, 3, height, width))
    output = dnn(image)
    state_dict = dnn.state_dict()
    graph = utils.visualize.Graph(config, state_dict)
    graph(output.grad_fn)
    diff = [key for key in state_dict if key not in graph.drawn]
    if diff:
        logging.warning('variables not shown: ' + str(diff))
    path = graph.dot.view(
        os.path.basename(model_dir) + '.gv', os.path.dirname(model_dir))
    logging.info(path)
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    model_dir = utils.get_model_dir(config)
    category = utils.get_category(config)
    anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
    path, step, epoch = utils.train.load_model(model_dir)
    state_dict = torch.load(path, map_location=lambda storage, loc: storage)
    dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(
        config, state_dict), anchors, len(category))
    logging.info(
        humanize.naturalsize(
            sum(var.cpu().numpy().nbytes
                for var in dnn.state_dict().values())))
    dnn.load_state_dict(state_dict)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    image = torch.autograd.Variable(
        torch.randn(args.batch_size, 3, height, width))
    output = dnn(image)
    state_dict = dnn.state_dict()
    closure = utils.walk.Closure(args.name, state_dict,
                                 type(dnn).scope, args.debug)
    closure(output.grad_fn)
    d = utils.dense(state_dict[args.name])
    channels = torch.LongTensor(np.argsort(d)[int(len(d) * args.remove):])
    utils.walk.prune(closure, channels)
    if args.debug:
        path = closure.dot.view(
            os.path.basename(model_dir) + '.gv', os.path.dirname(model_dir))
        logging.info(path)
    else:
        torch.save(state_dict, path)
Exemple #7
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    torch.manual_seed(args.seed)
    mapper = load_mapper(os.path.expandvars(os.path.expanduser(args.mapper)))
    model_dir = utils.get_model_dir(config)
    _, num_parts = utils.get_dataset_mappers(config)
    limbs_index = utils.get_limbs_index(config)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    tensor = torch.randn(args.batch_size, 3, height, width)
    # PyTorch
    try:
        path, step, epoch = utils.train.load_model(model_dir)
        state_dict = torch.load(path,
                                map_location=lambda storage, loc: storage)
    except (FileNotFoundError, ValueError):
        state_dict = {name: None for name in ('dnn', 'stages')}
    config_channels_dnn = model.ConfigChannels(config, state_dict['dnn'])
    dnn = utils.parse_attr(config.get('model', 'dnn'))(config_channels_dnn)
    config_channels_stages = model.ConfigChannels(config, state_dict['stages'],
                                                  config_channels_dnn.channels)
    channel_dict = model.channel_dict(num_parts, len(limbs_index))
    stages = nn.Sequential(*[
        utils.parse_attr(s)(config_channels_stages, channel_dict,
                            config_channels_dnn.channels, str(i))
        for i, s in enumerate(config.get('model', 'stages').split())
    ])
    inference = model.Inference(config, dnn, stages)
    inference.eval()
    state_dict = inference.state_dict()
    # TensorFlow
    with open(os.path.expanduser(os.path.expandvars(args.path)), 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
    image = ops.convert_to_tensor(np.transpose(tensor.cpu().numpy(),
                                               [0, 2, 3, 1]),
                                  name='image')
    tf.import_graph_def(graph_def, input_map={'image:0': image})
    saver = utils.train.Saver(model_dir, config.getint('save', 'keep'))
    with tf.Session(config=tf.ConfigProto(device_count={
            'CPU': 1,
            'GPU': 0
    },
                                          allow_soft_placement=True,
                                          log_device_placement=False)) as sess:
        try:
            for dst in state_dict:
                src, converter = mapper[dst]
                if src.isdigit():
                    state_dict[dst].fill_(float(src))
                else:
                    op = sess.graph.get_operation_by_name(src)
                    t = op.values()[0]
                    v = sess.run(t)
                    state_dict[dst] = torch.from_numpy(converter(v))
                val = state_dict[dst].numpy()
                print('\t'.join(
                    list(
                        map(str, (dst, src, val.shape, utils.abs_mean(val),
                                  hashlib.md5(val.tostring()).hexdigest())))))
            inference.load_state_dict(state_dict)
            if args.delete:
                logging.warning('delete model directory: ' + model_dir)
                shutil.rmtree(model_dir, ignore_errors=True)
            saver(
                dict(
                    dnn=inference.dnn.state_dict(),
                    stages=inference.stages.state_dict(),
                ), 0)
        finally:
            if args.debug:
                for op in sess.graph.get_operations():
                    if op.values():
                        logging.info(op.values()[0])
                for name in args.debug:
                    t = sess.graph.get_tensor_by_name(name + ':0')
                    val = sess.run(t)
                    val = np.transpose(val, [0, 3, 1, 2])
                    print('\t'.join(
                        map(str, [
                            name,
                            'x'.join(map(str, val.shape)),
                            utils.abs_mean(val),
                            hashlib.md5(val.tostring()).hexdigest(),
                        ])))
            _tensor = torch.autograd.Variable(tensor, volatile=True)
            val = dnn(_tensor).data.numpy()
            print('\t'.join(
                map(str, [
                    'x'.join(map(str, val.shape)),
                    utils.abs_mean(val),
                    hashlib.md5(val.tostring()).hexdigest(),
                ])))
            for stage, output in enumerate(inference(_tensor)):
                for name, feature in output.items():
                    val = feature.data.numpy()
                    print('\t'.join(
                        map(str, [
                            'stage%d/%s' % (stage, name),
                            'x'.join(map(str, val.shape)),
                            utils.abs_mean(val),
                            hashlib.md5(val.tostring()).hexdigest(),
                        ])))
            forward = inference.forward
            inference.forward = lambda self, *x: list(
                forward(self, *x)[-1].values())
            with SummaryWriter(model_dir) as writer:
                writer.add_graph(inference, (_tensor, ))
Exemple #8
0
def get_image_files(attrs):
    images = parse_file(config.get('celeba', 'attributes_file'))
    attrs = parse_attr(attrs, _PREDEFINED_ATTR)
    filtered = filter_items(images, attrs)
    return append_path(config.get('celeba', 'image_dir'), filtered)
Exemple #9
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    torch.manual_seed(args.seed)
    mapper = load_mapper(os.path.expandvars(os.path.expanduser(args.mapper)))
    model_dir = utils.get_model_dir(config)
    _, num_parts = utils.get_dataset_mappers(config)
    limbs_index = utils.get_limbs_index(config)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    tensor = torch.randn(args.batch_size, 3, height, width)
    # PyTorch
    try:
        path, step, epoch = utils.train.load_model(model_dir)
        state_dict = torch.load(path,
                                map_location=lambda storage, loc: storage)
    except (FileNotFoundError, ValueError):
        state_dict = {name: None for name in ('dnn', 'stages')}
    config_channels_dnn = model.ConfigChannels(config, state_dict['dnn'])
    dnn = utils.parse_attr(config.get('model', 'dnn'))(config_channels_dnn)
    config_channels_stages = model.ConfigChannels(config, state_dict['stages'],
                                                  config_channels_dnn.channels)
    channel_dict = model.channel_dict(num_parts, len(limbs_index))
    stages = nn.Sequential(*[
        utils.parse_attr(s)(config_channels_stages, channel_dict,
                            config_channels_dnn.channels, str(i))
        for i, s in enumerate(config.get('model', 'stages').split())
    ])
    inference = model.Inference(config, dnn, stages)
    inference.eval()
    state_dict = inference.state_dict()
    # Caffe
    net = caffe.Net(os.path.expanduser(os.path.expandvars(args.prototxt)),
                    os.path.expanduser(os.path.expandvars(args.caffemodel)),
                    caffe.TEST)
    if args.debug:
        logging.info('Caffe variables')
        for name, blobs in net.params.items():
            for i, blob in enumerate(blobs):
                val = blob.data
                print('\t'.join(
                    map(str, [
                        '%s/%d' % (name, i),
                        'x'.join(map(str, val.shape)),
                        utils.abs_mean(val),
                        hashlib.md5(val.tostring()).hexdigest(),
                    ])))
        logging.info('Caffe features')
        input = net.blobs[args.input]
        input.reshape(*tensor.size())
        input.data[...] = tensor.numpy()
        net.forward()
        for name, blob in net.blobs.items():
            val = blob.data
            print('\t'.join(
                map(str, [
                    name,
                    'x'.join(map(str, val.shape)),
                    utils.abs_mean(val),
                    hashlib.md5(val.tostring()).hexdigest(),
                ])))
    # convert
    saver = utils.train.Saver(model_dir, config.getint('save', 'keep'))
    try:
        for dst in state_dict:
            src, transform = mapper[dst]
            blobs = [b.data for b in net.params[src]]
            blob = transform(blobs)
            if isinstance(blob, np.ndarray):
                state_dict[dst] = torch.from_numpy(blob)
            else:
                state_dict[dst].fill_(blob)
            val = state_dict[dst].numpy()
            logging.info('\t'.join(
                list(
                    map(str, (dst, src, val.shape, utils.abs_mean(val),
                              hashlib.md5(val.tostring()).hexdigest())))))
        inference.load_state_dict(state_dict)
        if args.delete:
            logging.warning('delete model directory: ' + model_dir)
            shutil.rmtree(model_dir, ignore_errors=True)
        saver(
            dict(
                dnn=inference.dnn.state_dict(),
                stages=inference.stages.state_dict(),
            ), 0)
    finally:
        for stage, output in enumerate(
                inference(torch.autograd.Variable(tensor, volatile=True))):
            for name, feature in output.items():
                val = feature.data.numpy()
                print('\t'.join(
                    map(str, [
                        'stage%d/%s' % (stage, name),
                        'x'.join(map(str, val.shape)),
                        utils.abs_mean(val),
                        hashlib.md5(val.tostring()).hexdigest(),
                    ])))