def _build_detection_graph(output_collection_name, graph_hook_fn):
    """Build the detection graph."""
    net = mobilenetv1_ssh()
    # net = vgg16_ssh()
    net.create_architecture("TEST",
                            2,
                            tag='default',
                            anchor_scales={
                                "M1": [1, 2],
                                "M2": [4, 8],
                                "M3": [16, 32]
                            })
    placeholder_tensor = net._image
    outputs = net._predictions

    outputs = _add_output_tensor_nodes(net, outputs, output_collection_name)

    # Add global step to the graph.
    # slim.get_or_create_global_step()

    if graph_hook_fn: graph_hook_fn()

    return outputs, placeholder_tensor
Пример #2
0
    tfconfig = tf.ConfigProto(allow_soft_placement=True)
    tfconfig.gpu_options.allow_growth = True

    # init session
    sess = tf.Session(config=tfconfig)
    # load network
    if args.backbone == 'vgg16':
        net = vgg16_ssh()
    elif args.backbone == 'res50':
        net = resnetv1_ssh(num_layers=50)
    elif args.backbone == 'res101':
        net = resnetv1_ssh(num_layers=101)
    elif args.backbone == 'res152':
        net = resnetv1_ssh(num_layers=152)
    elif args.backbone == 'mobile':
        net = mobilenetv1_ssh()
    elif args.backbone == 'mobile_v2':
        net = mobilenetv2_ssh()
    else:
        raise NotImplementedError

    # load model
    net.create_architecture("TEST", imdb.num_classes, tag=tag,
                            anchor_scales=cfg.ANCHOR_SCALES,
                            anchor_ratios=cfg.ANCHOR_RATIOS)

    if args.model:
        print(('Loading model check point from {:s}').format(args.model))
        saver = tf.train.Saver()
        saver.restore(sess, args.model)
        print('Loaded.')