def initialFiltering():
    conn = sqlite3.connect('test.db')
    cursor = conn.cursor()
    data = cursor.execute('''SELECT * FROM DATATABLE''')
    data = data.fetchall()
    with open("init_net.pb", 'rb') as f:
        init_net = f.read()
    with open("predict_net.pb", 'rb') as f:
        predict_net = f.read()
    p = workspace.Predictor(init_net, predict_net)

    for row in data:
        tag = row[8]
        #Parse AMP string values back into an array
        ampBytes = row[0]
        ampFloat = parseAmp(ampBytes)

        #Convert values to log scale
        ampLog = numpy.log10(ampFloat)
        #Apply median filter
        ampMedian = medianAmp(ampLog)
        #Normalize between 0 and 1
        ampNormalized = normalizedAmp(ampMedian)
        #Select narrower range of -10:30
        ampNarrowSample = ampNormalized[30:70]
        #normalize between 0 and 1
        ampFeatures = ampFeatures.astype(numpy.float32)
        ampFeatures = ampFeatures.reshape(1, 1, 1, 40)
        result = p.run([ampFeatures])
        event = numpy.argmax(result)
        cursor.execute('''UPDATE DATATABLE SET EVENT=? WHERE TAG=?''',
                       (event, tag))
        print("Updated Table successfully")
    cursor.close()
    conn.commit()
Example #2
0
def load_model():
    with open("out/model_init.pb") as f:
        init_net = f.read()
    with open("model.pb") as f:
        predict_net = f.read()

    return workspace.Predictor(init_net, predict_net)
Example #3
0
    def __init__(self, neuropod_path):
        """
        Load a Caffe2 neuropod

        :param  neuropod_path:      The path to a Caffe2 neuropod package
        """
        super(Caffe2NeuropodExecutor, self).__init__(neuropod_path)

        neuropod_data_path = os.path.join(neuropod_path, "0", "data")

        # Add the model to the neuropod
        init_path = os.path.join(neuropod_data_path, "init_net.pb")
        predict_path = os.path.join(neuropod_data_path, "predict_net.pb")

        workspace.ResetWorkspace()
        with open(init_path, "rb") as f:
            init_net = f.read()
        with open(predict_path, "rb") as f:
            predict_net = f.read()
        workspace.RunNetOnce(init_net)
        workspace.CreateNet(predict_net)

        self.model = workspace.Predictor(init_net, predict_net)
        with open(os.path.join(neuropod_path, "0", "config.json"),
                  "r") as config_file:
            model_config = json.load(config_file)

            # Get the node name mapping and store it
            self.node_name_mapping = model_config["node_name_mapping"]
Example #4
0
def generate_test_output_data(caffe2_init_net, caffe2_predict_net, inputs):
    p = c2_workspace.Predictor(caffe2_init_net, caffe2_predict_net)
    inputs_map = {input[0]: input[1] for input in inputs}

    output = p.run(inputs_map)
    c2_workspace.ResetWorkspace()
    return output
def load_caffe2_model():
    """
    Loads the caffe2 model. The function will load the initial network and
    weights from the specified folder, initialize the network, and then
    return a caffe2 predictor.
    """
    MODEL_LOCATION = "model"
    INIT_NAME = "init_net.pb"
    PREDICT_NAME = "predict_net.pb" 
    init_path = os.path.join(MODEL_LOCATION, INIT_NAME)
    predict_path = os.path.join(MODEL_LOCATION, PREDICT_NAME)

    # Check that files exist
    if not os.path.exists(init_path):
        raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), init_path)

    if not os.path.exists(predict_path):
        raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), predict_path)
    
    # initialize the neural net
    with open(init_path) as f:
        init_net = f.read()
    with open(predict_path) as f:
        predict_net = f.read()

    return workspace.Predictor(init_net, predict_net)
Example #6
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    torch.manual_seed(args.seed)
    model_dir = utils.get_model_dir(config)
    init_net = caffe2_pb2.NetDef()
    with open(os.path.join(model_dir, 'init_net.pb'), 'rb') as f:
        init_net.ParseFromString(f.read())
    predict_net = caffe2_pb2.NetDef()
    with open(os.path.join(model_dir, 'predict_net.pb'), 'rb') as f:
        predict_net.ParseFromString(f.read())
    p = workspace.Predictor(init_net, predict_net)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    tensor = torch.randn(1, 3, height, width)
    # Checksum
    output = p.run([tensor.numpy()])
    for key, a in [
        ('tensor', tensor.cpu().numpy()),
        ('output', output[0]),
    ]:
        print('\t'.join(
            map(str, [
                key, a.shape,
                utils.abs_mean(a),
                hashlib.md5(a.tostring()).hexdigest()
            ])))
Example #7
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    model_dir = utils.get_model_dir(config)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    resize = transform.parse_transform(config, config.get('transform', 'resize_test'))
    transform_image = transform.get_transform(config, config.get('transform', 'image_test').split())
    transform_tensor = transform.get_transform(config, config.get('transform', 'tensor').split())
    # load image
    image_bgr = cv2.imread('image.jpg')
    image_resized = resize(image_bgr, height, width)
    image = transform_image(image_resized)
    tensor = transform_tensor(image).unsqueeze(0)
    # Caffe2
    init_net = caffe2_pb2.NetDef()
    with open(os.path.join(model_dir, 'init_net.pb'), 'rb') as f:
        init_net.ParseFromString(f.read())
    predict_net = caffe2_pb2.NetDef()
    with open(os.path.join(model_dir, 'predict_net.pb'), 'rb') as f:
        predict_net.ParseFromString(f.read())
    p = workspace.Predictor(init_net, predict_net)
    results = p.run([tensor.numpy()])
    logging.info(utils.abs_mean(results[0]))
    logging.info(hashlib.md5(results[0].tostring()).hexdigest())
def load_model(init_net_path, predict_net_path):
    with open(init_net_path, "rb") as f:
        init_net = f.read()
    with open(predict_net_path, "rb") as f:
        predict_net = f.read()
    p = workspace.Predictor(init_net, predict_net)
    return p
Example #9
0
 def predict(self, input_blob):
     input_blob = input_blob['data']
     p = workspace.Predictor(self.init_net, self.net)
     input_blob = input_blob.transpose((2, 0, 1))
     input_blob = np.expand_dims(input_blob, 0)
     input_blob = input_blob.astype(np.float32)
     return p.run([input_blob])
    def classify(self, path):
        input_image_size = self.model[4]

        img = skimage.img_as_float(skimage.io.imread(path)).astype(np.float32)
        img = self.rescale(img, input_image_size, input_image_size)
        img = self.crop_center(img, input_image_size, input_image_size)

        img = img.swapaxes(1, 2).swapaxes(0, 1)
        img = img[(2, 1, 0), :, :]
        img = img * 255 - self.mean

        img = img[np.newaxis, :, :, :].astype(np.float32)

        p = workspace.Predictor(self.init_net, self.predict_net)

        results = p.run([img])
        results = np.asarray(results)

        results = np.delete(results, 1)
        filtered_results = []

        for i, r in enumerate(results):
            if (float(r) > 0.01):
                filtered_results.append(
                    (self.get_category_from_code(i + 1), float(r)))

        return sorted(filtered_results,
                      key=lambda result: result[1],
                      reverse=True)
 def test_run(self):
     A = np.ones((2, 3), np.float32)
     B = np.ones((3, 4), np.float32)
     predictor = workspace.Predictor(self.init_net, self.predict_net)
     outputs = predictor.run([A, B])
     self.assertEqual(len(outputs), 1)
     np.testing.assert_almost_equal(np.dot(A, B), outputs[0])
Example #12
0
 def create_caffe2_predictor(onnx_file_path):
     with open(onnx_file_path, 'rb') as onnx_model:
         onnx_model_proto = ModelProto()
         onnx_model_proto.ParseFromString(onnx_model.read())
         init_net, predict_net = c2.onnx_graph_to_caffe2_net(
             onnx_model_proto)
         predictor = workspace.Predictor(init_net, predict_net)
     return predictor
Example #13
0
    def __init__(self):

        with open('init_net.pb') as f:
            init_net = f.read()
        with open('predict_net.pb') as f:
            predict_net = f.read()

        self.model = workspace.Predictor(init_net, predict_net)
Example #14
0
def caffe2_predictor_v2(init_net_path, predict_net_path, image_dir,
                        labels_filename):
    '''
    https://github.com/caffe2/tutorials/blob/master/Loading_Pretrained_Models.ipynb
    :param init_net_path:
    :param predict_net_path:
    :param image_dir:
    :param labels_filename:
    :return:
    '''
    resize_height = 224
    resize_width = 224

    labels = np.loadtxt(labels_filename, str, delimiter='\t')
    test_transform = transforms.Compose([
        transforms.Resize(size=(resize_height, resize_width)),
        transforms.ToTensor(),
        # transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    # Read the contents of the input protobufs into local variables
    with open(init_net_path, "rb") as f:
        init_net = f.read()
    with open(predict_net_path, "rb") as f:
        predict_net = f.read()

    predict_def = caffe2_pb2.NetDef()
    predict_def.ParseFromString(predict_net)
    print(net_printer.to_string(predict_def))
    # 加载图像
    # workspace.RunNetOnce(init_net)
    # workspace.CreateNet(predict_net)
    p = workspace.Predictor(init_net, predict_net)

    images_list = glob.glob(os.path.join(image_dir, '*.jpg'))
    for image_path in images_list:
        print("--------------------------------------")

        image = Image.open(image_path).convert('RGB')
        image_tensor = test_transform(image).float()
        # Add an extra batch dimension since pytorch treats all images as batches
        image_tensor = image_tensor.unsqueeze_(0)

        input = image_tensor.numpy()
        print("input.shape:{}".format(input.shape))
        # output = p.run({'0': input})
        output = p.run([input])
        #
        output = np.asarray(output)
        output = np.squeeze(output, axis=(0, ))
        print(output)
        # print("output shape: ", output.shape)
        pre_score = fun.softmax(output, axis=1)
        pre_index = np.argmax(pre_score, axis=1)
        max_score = pre_score[:, pre_index]
        pre_label = labels[pre_index]
        print("{} is: pre labels:{},name:{} score: {}".format(
            image_path, pre_index, pre_label, max_score))
    def test_mobile_exporter(self):
        model = ModelHelper(name="mobile_exporter_test_model")
        # Test LeNet
        brew.conv(model, 'data', 'conv1', dim_in=1, dim_out=20, kernel=5)
        brew.max_pool(model, 'conv1', 'pool1', kernel=2, stride=2)
        brew.conv(model, 'pool1', 'conv2', dim_in=20, dim_out=50, kernel=5)
        brew.max_pool(model, 'conv2', 'pool2', kernel=2, stride=2)
        brew.fc(model, 'pool2', 'fc3', dim_in=50 * 4 * 4, dim_out=500)
        brew.relu(model, 'fc3', 'fc3')
        brew.fc(model, 'fc3', 'pred', 500, 10)
        brew.softmax(model, 'pred', 'out')

        # Create our mobile exportable networks
        workspace.RunNetOnce(model.param_init_net)
        init_net, predict_net = mobile_exporter.Export(workspace, model.net,
                                                       model.params)

        # Populate the workspace with data
        np_data = np.random.rand(1, 1, 28, 28).astype(np.float32)
        workspace.FeedBlob("data", np_data)

        workspace.CreateNet(model.net)
        workspace.RunNet(model.net)
        ref_out = workspace.FetchBlob("out")

        # Clear the workspace
        workspace.ResetWorkspace()

        # Populate the workspace with data
        workspace.RunNetOnce(init_net)
        # Fake "data" is populated by init_net, we have to replace it
        workspace.FeedBlob("data", np_data)

        # Overwrite the old net
        workspace.CreateNet(predict_net, True)
        workspace.RunNet(predict_net.name)
        manual_run_out = workspace.FetchBlob("out")
        np.testing.assert_allclose(ref_out,
                                   manual_run_out,
                                   atol=1e-10,
                                   rtol=1e-10)

        # Clear the workspace
        workspace.ResetWorkspace()

        # Predictor interface test (simulates writing to disk)
        predictor = workspace.Predictor(init_net.SerializeToString(),
                                        predict_net.SerializeToString())

        # Output is a vector of outputs but we only care about the first and only result
        predictor_out = predictor.run([np_data])
        assert len(predictor_out) == 1
        predictor_out = predictor_out[0]

        np.testing.assert_allclose(ref_out,
                                   predictor_out,
                                   atol=1e-10,
                                   rtol=1e-10)
Example #16
0
def main():

    minsize = 20

    caffe_model_path = "./models/caffe2_models"

    threshold = [0.00006, 0.7, 0.7]
    factor = 0.709

    with open(caffe_model_path + "/det1_init_net.pb", "rb") as f:
        det1_init = f.read()
    with open(caffe_model_path + "/det1_predict_net.pb", "rb") as f:
        det1_predict = f.read()
    with open(caffe_model_path + "/det2_init_net.pb", "rb") as f:
        det2_init = f.read()
    with open(caffe_model_path + "/det2_predict_net.pb", "rb") as f:
        det2_predict = f.read()
    with open(caffe_model_path + "/det3_init_net.pb", "rb") as f:
        det3_init = f.read()
    with open(caffe_model_path + "/det3_predict_net.pb", "rb") as f:
        det3_predict = f.read()
    PNet = workspace.Predictor(det1_init, det1_predict)
    RNet = workspace.Predictor(det2_init, det2_predict)
    ONet = workspace.Predictor(det3_init, det3_predict)

    files = os.listdir(r'./')
    for f in files[:1]:

        img = cv2.imread(r'./ivan.jpg')
        img_matlab = img.copy()
        tmp = img_matlab[:, :, 2].copy()
        img_matlab[:, :, 2] = img_matlab[:, :, 0]
        img_matlab[:, :, 0] = tmp

        boundingboxes, points = detect_face(img_matlab, minsize, PNet, RNet,
                                            ONet, threshold, False, factor)

        for i in range(len(boundingboxes)):
            cv2.rectangle(img,
                          (int(boundingboxes[i][0]), int(boundingboxes[i][1])),
                          (int(boundingboxes[i][2]), int(boundingboxes[i][3])),
                          (0, 255, 0), 1)
        print(boundingboxes)
        img = drawBoxes(img, boundingboxes)
        cv2.imwrite(r'./res_ivan.jpg', img)
 def __init__(self, init_net_path, predict_net_path, onnx_model=False):
     if onnx_model:
         # TODO write ONNX specific code
         pass 
     with open(init_net_path) as f:
         self.init_net = f.read()
     with open(predict_net_path) as f:
         self.predict_net = f.read()
     self.model = workspace.Predictor(self.init_net, self.predict_net)
Example #18
0
 def get_model_from_file(self, file_name: str):
     self._model_file_content = SerializationHelper.get_model_content_from_file(
         file_name, Caffe2Model.get_package_name(), self.get_params())
     if CAFFE2_MODEL_PREDICT_FILE_NAME in self.get_params():
         self._model_file_content[CAFFE2_MODEL_PREDICT_FILE_NAME] = open(
             self.get_params().get(CAFFE2_MODEL_PREDICT_FILE_NAME), 'rb')
     return workspace.Predictor(
         self._model_file_content[CAFFE2_MODEL_INIT_FILE_NAME].read(),
         self._model_file_content[CAFFE2_MODEL_PREDICT_FILE_NAME].read())
def run_caffe2_model(predict_net_path, init_net_path, feed_dict):
    from caffe2.python import workspace
    with open(init_net_path, "rb") as f:
        init_net = f.read()
    with open(predict_net_path, "rb") as f:
        predict_net = f.read()

    predictor = workspace.Predictor(init_net, predict_net)
    return [np.array(arr) for arr in predictor.run(feed_dict)]
def LoadPredictor(workspace, initNetPath, predictNetPath):
	initNet, predictNet = None, None

	with open(initNetPath, 'rb') as f:
		initNet = f.read()
	with open(predictNetPath, 'rb') as f:
		predictNet = f.read()
	
	return workspace.Predictor(initNet, predictNet)
Example #21
0
 def __init__(self, args, config):
     self.args = args
     self.config = config
     self.device = torch.device(
         'cuda' if torch.cuda.is_available() else 'cpu')
     self.cache_dir = utils.get_cache_dir(config)
     self.model_dir = utils.get_model_dir(config)
     _, self.num_parts = utils.get_dataset_mappers(config)
     self.limbs_index = utils.get_limbs_index(config)
     if args.debug is None:
         self.draw_cluster = utils.visualize.DrawCluster(
             colors=args.colors, thickness=args.thickness)
     else:
         self.draw_feature = utils.visualize.DrawFeature()
         s = re.search('(-?[0-9]+)([a-z]+)(-?[0-9]+)', args.debug)
         stage = int(s.group(1))
         name = s.group(2)
         channel = int(s.group(3))
         self.get_feature = lambda outputs: outputs[stage][name][0][channel]
     self.height, self.width = tuple(
         map(int,
             config.get('image', 'size').split()))
     if args.caffe:
         init_net = caffe2_pb2.NetDef()
         with open(os.path.join(self.model_dir, 'init_net.pb'), 'rb') as f:
             init_net.ParseFromString(f.read())
         predict_net = caffe2_pb2.NetDef()
         with open(os.path.join(self.model_dir, 'predict_net.pb'),
                   'rb') as f:
             predict_net.ParseFromString(f.read())
         p = workspace.Predictor(init_net, predict_net)
         self.inference = lambda tensor: [{
             'parts': torch.from_numpy(parts),
             'limbs': torch.from_numpy(limbs)
         } for parts, limbs in zip(
             *[iter(p.run([tensor.detach().cpu().numpy()]))] * 2)]
     else:
         self.step, self.epoch, self.dnn, self.stages = self.load()
         self.inference = model.Inference(config, self.dnn, self.stages)
         self.inference.eval()
         if torch.cuda.is_available():
             self.inference.cuda()
         logging.info(
             humanize.naturalsize(
                 sum(var.cpu().numpy().nbytes
                     for var in self.inference.state_dict().values())))
     self.cap = self.create_cap()
     self.keys = set(args.keys)
     self.resize = transform.parse_transform(
         config, config.get('transform', 'resize_test'))
     self.transform_image = transform.get_transform(
         config,
         config.get('transform', 'image_test').split())
     self.transform_tensor = transform.get_transform(
         config,
         config.get('transform', 'tensor').split())
Example #22
0
def initFaceDetector():
    caffe_model_path = "./models"
    with open(caffe_model_path+"/12init_net.pb", "rb") as f:
        det1_init = f.read()
    with open(caffe_model_path+"/12predict_net.pb", "rb") as f:
        det1_predict = f.read()
    with open(caffe_model_path+"/24init_net.pb", "rb") as f:
        det2_init = f.read()
    with open(caffe_model_path+"/24predict_net.pb", "rb") as f:
        det2_predict = f.read()
    with open(caffe_model_path+"/48init_net.pb", "rb") as f:
        det3_init = f.read()
    with open(caffe_model_path+"/48predict_net.pb", "rb") as f:
        det3_predict = f.read()

    PNet = workspace.Predictor(det1_init, det1_predict)
    RNet = workspace.Predictor(det2_init, det2_predict)
    ONet = workspace.Predictor(det3_init, det3_predict)
    return (PNet, RNet, ONet)
def load_network():
    """Create network, load weights and create a predictor from it."""

    with open(INIT_NET) as f:
        init_net = f.read()
    with open(PREDICT_NET) as f:
        predict_net = f.read()

    predictor = workspace.Predictor(init_net, predict_net)
    return predictor
Example #24
0
    def _load_model(self):
        INIT_NET = os.path.join(CAFFE_MODELS, MODEL[0], MODEL[1])
        PREDICT_NET = os.path.join(CAFFE_MODELS, MODEL[0], MODEL[2])
        with open(INIT_NET, "rb") as f:
            init_net = f.read()
        with open(PREDICT_NET, "rb") as f:
            predict_net = f.read()

        predictor = workspace.Predictor(init_net, predict_net)
        return predictor
Example #25
0
    def test_predictor_memory_model(self):
        workspace.ResetWorkspace()
        m = self._create_model()
        workspace.FeedBlob("data", np.zeros([4], dtype='float32'))
        self.predictor = workspace.Predictor(
            workspace.StringifyProto(m.param_init_net.Proto()),
            workspace.StringifyProto(m.net.Proto()))

        inputs = np.array([1, 3, 256, 256], dtype='float32')
        outputs = self.predictor.run([inputs])
        np.testing.assert_array_almost_equal(np.array([[516, 516]], dtype='float32'), outputs)
Example #26
0
def run_caffe2(init_net, predict_net, input_path, output_path=''):
    x = read_input(input_path)
    with open(init_net, 'rb') as f:
        init_net = f.read()

    with open(predict_net, 'rb') as f:
        predict_net = f.read()
    p = workspace.Predictor(init_net, predict_net)
    # TODO get 'data' parameter more universal, blobs contain other names
    results = p.run({'data': x})
    print(results)
    save_result(output_path, results)
Example #27
0
def initFaceDetector():
    minsize = 20
    caffe_model_path = "./models"
    threshold = [0.6, 0.7, 0.7]
    factor = 0.709
    caffe.set_mode_cpu()
    with open(caffe_model_path + "/det1_init_net.pb", "rb") as f:
        det1_init = f.read()
    with open(caffe_model_path + "/det1_predict_net.pb", "rb") as f:
        det1_predict = f.read()
    with open(caffe_model_path + "/det2_init_net.pb", "rb") as f:
        det2_init = f.read()
    with open(caffe_model_path + "/det2_predict_net.pb", "rb") as f:
        det2_predict = f.read()
    with open(caffe_model_path + "/det3_init_net.pb", "rb") as f:
        det3_init = f.read()
    with open(caffe_model_path + "/det3_predict_net.pb", "rb") as f:
        det3_predict = f.read()
    PNet = workspace.Predictor(det1_init, det1_predict)
    RNet = workspace.Predictor(det2_init, det2_predict)
    ONet = workspace.Predictor(det3_init, det3_predict)
    return (minsize, PNet, RNet, ONet, threshold, factor)
Example #28
0
def main(argv):
    pycaffe_dir = os.path.dirname(__file__)

    parser = argparse.ArgumentParser()
    # Required arguments: input file.
    parser.add_argument(
        "input_file",
        help="Path to the input image file"
    )


    args = parser.parse_args()

    with open("init_net.pb") as f:
        init_net = f.read()
    with open("predict_net.pb") as f:
    	predict_net = f.read()

    nsfw_net = workspace.Predictor(init_net, predict_net)

    img = skimage.img_as_float(skimage.io.imread(args.input_file)).astype(np.float32)
    img = rescale(img, 256, 256)
    img = crop_center(img, 224, 224)

    img = img.swapaxes(1, 2).swapaxes(0, 1)
    
    #switch to BGR
    img = img[(2, 1, 0), :, :]

    mean = np.empty([3,224,224])

    mean[0] = 104
    mean[1] = 117
    mean[2] = 123
 
    img = img*255 - mean

    img = img[np.newaxis, :, :, :].astype(np.float32)

    #img.shape = (1,) + img.shape

    print "NCHW: ", img.shape


    # Classify.
    outputs = nsfw_net.run({'data':img})
    scores = outputs[0][0].astype(float)

    # Scores is the array containing SFW / NSFW image probabilities
    # scores[1] indicates the NSFW probability
    print "NSFW score:  " , scores[1]
    def test_mobile_exporter_datatypes(self):
        model = ModelHelper(name="mobile_exporter_test_model")
        model.Copy("data_int", "out")
        model.params.append("data_int")

        # Create our mobile exportable networks
        workspace.RunNetOnce(model.param_init_net)
        np_data_int = np.random.randint(100, size=(1, 1, 28, 28), dtype=np.int32)
        workspace.FeedBlob("data_int", np_data_int)

        init_net, predict_net = mobile_exporter.Export(
            workspace, model.net, model.params
        )

        workspace.CreateNet(model.net)
        workspace.RunNet(model.net)
        ref_out = workspace.FetchBlob("out")

        # Clear the workspace
        workspace.ResetWorkspace()

        # Populate the workspace with data
        workspace.RunNetOnce(init_net)

        # Overwrite the old net
        workspace.CreateNet(predict_net, True)
        workspace.RunNet(predict_net.name)
        manual_run_out = workspace.FetchBlob("out")
        np.testing.assert_allclose(
            ref_out, manual_run_out, atol=1e-10, rtol=1e-10
        )

        # Clear the workspace
        workspace.ResetWorkspace()

        # Predictor interface test (simulates writing to disk)
        predictor = workspace.Predictor(
            init_net.SerializeToString(), predict_net.SerializeToString()
        )

        # Output is a vector of outputs but we only care about the first and only result
        predictor_out = predictor.run([])
        assert len(predictor_out) == 1
        predictor_out = predictor_out[0]

        np.testing.assert_allclose(
            ref_out, predictor_out, atol=1e-10, rtol=1e-10
        )
Example #30
0
    def load_onnx(self):

        init_filename = EXP_PATH + 'init.pb'
        pred_filename = EXP_PATH + 'pred.pb'

        if not (os.path.isfile(init_filename)
                or os.path.isfile(pred_filename)):
            self.save_onnx()

        with open(init_filename, mode='r+b') as f:
            init_net = f.read()

        with open(pred_filename, mode='r+b') as f:
            pred_net = f.read()

        self.model_caffe = workspace.Predictor(init_net, pred_net)