Esempio n. 1
0
def run_inference():

    model_path = 'resnet50'
    batch_size = 1
    channels = 3
    height = width = 224
    input_shape = {'data': [batch_size, channels, height, width]}
    classes = 1000
    output_shape = [batch_size, classes]
    device = 'cpu'
    model = DLRModel(model_path, input_shape, output_shape, device)

    synset_path = os.path.join(model_path, 'synset.txt')
    with open(synset_path, 'r') as f:
        synset = eval(f.read())

    image = np.load('dog.npy').astype(np.float32)
    input_data = {'data': image}

    for rep in range(4):
        t1 = current_milli_time()
        out = model.run(input_data)
        t2 = current_milli_time()

        logging.debug('done m.run(), time (ms): {}'.format(t2 - t1))

        top1 = np.argmax(out[0])
        logging.debug('Inference result: {}, {}'.format(top1, synset[top1]))

    import resource
    logging.debug(
        "peak memory usage (bytes on OS X, kilobytes on Linux) {}".format(
            resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))

    return {'synset_id': top1, 'prediction': synset[top1], 'time': t2 - t1}
def run_inference():

    os.system(
        'fswebcam -r 1024x768 --no-banner --scale 224x224 output.jpg -S 7 --save /home/pi/Photos/std.jpg'
    )  # uses Fswebcam to take picture
    image = Image.open('output.jpg')
    #data = np.array(image,dtype='float64')
    #data=data1.reshape((1,data1.shape[2],data1.shape[0],data1.shape[1]))

    #np.save( 'flamingo.npy', data)

    image_data = utils.transform_image(image)
    print(image_data)
    flattened_data = image_data.astype(np.float32).flatten()
    #np.save( 'puppi.npy',flattened_data)
    print("Start Prinring Flattern")
    print(flattened_data)
    #run_inference(image_data)
    #time.sleep(15) # this line creates a 15 second delay before repeating the loop

    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                              '../model-rasp3b')
    batch_size = 1
    channels = 3
    height = width = 224
    input_shape = {'input0': [batch_size, channels, height, width]}
    classes = 1000
    output_shape = [batch_size, classes]
    device = 'cpu'
    model = DLRModel(model_path, input_shape, output_shape, device)

    synset_path = os.path.join(model_path, 'imagenet1000_clsidx_to_labels.txt')
    with open(synset_path, 'r') as f:
        synset = eval(f.read())

    #image = np.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dog.npy')).astype(np.float32)
    #input_data = {'data': image_data}

    # Predict
    out = model.run({'input0': flattened_data}).squeeze()
    top1 = np.argmax(out)
    prob = np.max(out)
    print("Class: %s, probability: %f" % (synset[top1], prob))

    for rep in range(4):
        t1 = current_milli_time()
        out = model.run({'input0': flattened_data}).squeeze()
        t2 = current_milli_time()

        logging.debug('done m.run(), time (ms): {}'.format(t2 - t1))

        top1 = np.argmax(out[0])
        logging.debug('Inference result: {}, {}'.format(top1, synset[top1]))

    import resource
    logging.debug(
        "peak memory usage (bytes on OS X, kilobytes on Linux) {}".format(
            resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))

    return {'synset_id': top1, 'prediction': synset[top1], 'time': t2 - t1}
Esempio n. 3
0
    def do_set_property(self, prop: GObject.GParamSpec, value):
        if prop.name == 'model-dir':
            self.model_dir = value
            if self.model_dir != "" and os.path.exists(self.model_dir):
                # Load model
                print("Loading model from %s..." % self.model_dir)
                self.model = DLRModel(self.model_dir, self.device_type)
                print("Done.")

                print("Warming up DLR engine...")
                start_time = time.time()
                x = np.random.rand(1, 3, 320, 320)
                result = self.model.run(x)
                print(len(result))
                print(result[0].shape)
                print('inference time is ' + str((time.time() - start_time)) +
                      ' seconds')
                print("Done.")
        elif prop.name == 'device-type':
            self.device_type = value
        elif prop.name == 'image-size':
            self.image_size = value
        elif prop.name == 'threshold':
            self.threshold = value
        else:
            raise AttributeError('unknown property %s' % prop.name)
def load_test():
    # Load the model
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                              'resnet18_v1')
    classes = 1000
    device = 'cpu'
    model = DLRModel(model_path, device)

    results = []
    for i in range(0, 100):
        # Run the model
        image = np.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dog.npy')).astype(np.float32)
        # flatten within a input array
        input_data = {'data': image}
        print('Testing inference on resnet18...')

        start = time.time()
        probabilities = model.run(input_data)  # need to be a list of input arrays matching input names
        end = time.time()
        results.append(1000 * (end - start))

        assert probabilities[0].argmax() == 151


    results = np.array(results)
    print(results.min(), results.mean(), results.max())
Esempio n. 5
0
def test_mnist():
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xgboost-mnist')
    data_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xgboost', 'mnist.libsvm')
    model = DLRModel(model_path, 'cpu', 0)

    X, _ = load_svmlight_file(data_file, zero_based=True)
    print('Testing inference on XGBoost MNIST...')
    assert model.run(_sparse_to_dense(X))[0] == 7.0
Esempio n. 6
0
def test_iris():
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xgboost-iris')
    data_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xgboost', 'iris.libsvm')
    model = DLRModel(model_path, 'cpu', 0)

    X, _ = load_svmlight_file(data_file, zero_based=True)
    expected = np.array([2.159504452720284462e-03, 9.946205615997314453e-01, 3.219985403120517731e-03])
    print('Testing inference on XGBoost Iris...')
    assert np.allclose(model.run(_sparse_to_dense(X))[0], expected)
Esempio n. 7
0
def test_multi_input():
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
            '2in1out')
    device = 'cpu'
    model = DLRModel(model_path, device)

    input1 = np.asarray([1., 2.])
    input2 = np.asarray([3., 4.])
    input_map = {'data1': input1, 'data2': input2}
    outputs = model.run(input_map)
    assert outputs[0].tolist() == [4, 6]
Esempio n. 8
0
def test_resnet():
    # Load the model
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
            'resnet18')
    classes = 1000
    device = 'cpu'
    model = DLRModel(model_path, device)

    # Run the model
    image = np.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dog.npy')).astype(np.float32)
    #flatten within a input array
    input_data = {'data': image}
    probabilities = model.run(input_data) #need to be a list of input arrays matching input names
    assert probabilities[0].argmax() == 111
def test_resnet(data):
        # Load the model
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                        'catdog_model')
    device = 'gpu'
    model = DLRModel(model_path, device)

    # Run the model
    image = np.load(os.path.join(data)).astype(np.float32)
    #flatten within a input array
    input_data = {'data': image}
    probabilities = model.run(input_data) #need to be a list of input arrays matching input names
    print probabilities[0]
    print probabilities[0].argmax()
def test_mnist():
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                              'xgboost-mnist-1.10.0')
    data_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             'xgboost', 'mnist.libsvm')
    model = DLRModel(model_path, 'cpu', 0)

    X, y = load_svmlight_file(data_file)
    assert y.shape == (8, )
    print('Testing inference on XGBoost MNIST...')
    res = model.run(X.toarray())[0]
    # mnist model uses multi:softmax objective which outputs one class with the maximum probability
    assert res.shape == (8, 1)
    assert np.allclose(res.flatten(), y)
Esempio n. 11
0
def test_multi_input_multi_output():
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
            '4in2out')
    device = 'cpu'
    model = DLRModel(model_path, device)

    input1 = np.asarray([1., 2.])
    input2 = np.asarray([3., 4.])
    input3 = np.asarray([5., 6.])
    input4 = np.asarray([7., 8.])
    input_map = {'data1': input1, 'data2': input2, 'data3': input3, 'data4': input4}
    outputs = model.run(input_map)

    assert outputs[0].tolist() == [4, 6]
    assert outputs[1].tolist() == [12, 14]
Esempio n. 12
0
def test_letor():
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xgboost-letor')
    data_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xgboost', 'letor.libsvm')
    model = DLRModel(model_path, 'cpu', 0)

    X, _ = load_svmlight_file(data_file, zero_based=True)
    expected = np.array([1.372033834457397461e+00, -2.448803186416625977e+00, 8.579480648040771484e-01,
                         1.369985580444335938e+00, -7.058695554733276367e-01, 4.134958684444427490e-01,
                         -2.247941017150878906e+00, -2.461995363235473633e+00, -2.394921064376831055e+00,
                         -1.191793322563171387e+00, 9.672126173973083496e-02, 2.687671184539794922e-01,
                         1.417675256729125977e+00, -1.832636356353759766e+00, -5.582004785537719727e-02,
                         -9.497703313827514648e-01, -1.219825387001037598e+00, 1.512521862983703613e+00,
                         -1.179921030998229980e-01, -2.383430719375610352e+00, -9.094548225402832031e-01])
    expected = expected.reshape((-1, 1))
    print('Testing inference on XGBoost LETOR...')
    assert np.allclose(model.run(_sparse_to_dense(X))[0], expected)
def test_mobilenet_v1_0_75_224_quant_wrong_input_type():
    # Load the model
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                              'mobilenet_v1_0.75_224_quant')
    device = 'cpu'
    model = DLRModel(model_path, device)
    # load image (dtype: float32)
    image = np.load(
        os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dog.npy'))
    print(
        'Testing inference on mobilenet_v1_0.75_224_quant with float32 input...'
    )
    try:
        model.run({'input': image})
        assert False, "ValueError is expected"
    except ValueError as e:
        assert str(
            e
        ) == "input data with name input should have dtype uint8 but float32 is provided"
Esempio n. 14
0
def test_multi_input_multi_output():
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
            '4in2out')
    device = 'cpu'
    model = DLRModel(model_path, device)

    assert model._impl._get_output_size_dim(0) == (2, 1)
    assert model._impl._get_output_size_dim(1) == (3, 1)

    input1 = np.asarray([1., 2.])
    input2 = np.asarray([3., 4.])
    input3 = np.asarray([5., 6., 7])
    input4 = np.asarray([8., 9., 10])
    input_map = {'data1': input1, 'data2': input2, 'data3': input3, 'data4': input4}
    print('Testing multi_input/multi_output support...')
    outputs = model.run(input_map)

    assert outputs[0].tolist() == [4, 6]
    assert outputs[1].tolist() == [13, 15, 17]
Esempio n. 15
0
    def load_model(self):
        if self.model is not None:
            return
        if self.model_dir == "":
            return
        if os.path.exists(self.model_dir):
            # Load model
            print("Loading model from %s..." % self.model_dir)
            self.model = DLRModel(self.model_dir, self.device_type)
            print("Done.")

            print("Warming up DLR engine...")
            start_time = time.time()
            x = np.random.rand(1, 3, self.image_size, self.image_size)
            result = self.model.run(x)
            print(len(result))
            print(result[0].shape)
            print('inference time is ' + str((time.time() - start_time)) +
                  ' seconds')
            print("Done.")
Esempio n. 16
0
def test_assign_op():
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                              'assign_op')
    device = 'cpu'
    model = DLRModel(model_path, device)

    print('Testing _assign() operator...')
    # Example from https://github.com/dmlc/tvm/blob/bb87f044099ba61ba4782d17dd9127b869936373/nnvm/tests/python/compiler/test_top_assign.py
    np.random.seed(seed=0)
    input1 = np.random.random(size=(5, 3, 18, 18))
    model.run({'w': input1})
    input1_next = model.get_input('w2', shape=(5, 3, 18, 18))
    assert np.allclose(input1_next, input1 + 2)

    model.run({})
    input1_next = model.get_input('w2', shape=(5, 3, 18, 18))
    assert np.allclose(input1_next, input1 + 3)
def test_mobilenet_v1_0_75_224_quant():
    # Load the model
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                              'mobilenet_v1_0.75_224_quant')
    device = 'cpu'
    model = DLRModel(model_path, device)
    # load image (dtype: uint8)
    image = np.load(
        os.path.join(os.path.dirname(os.path.abspath(__file__)),
                     'cat_224_uint8.npy'))
    print('Testing inference on mobilenet_v1_0.75_224_quant...')
    probabilities = model.run({'input': image})
    assert probabilities[0].argmax() == 282
    assert model.get_input_names() == ["input"]
    assert model.get_input_dtypes() == ["uint8"]
    assert model.get_output_dtypes() == ["uint8"]
    assert model.get_input_dtype(0) == "uint8"
    assert model.get_output_dtype(0) == "uint8"
    input2 = model.get_input("input")
    assert input2.dtype == 'uint8'
    assert input2.shape == (1, 224, 224, 3)
    assert (input2 == image).all()
Esempio n. 18
0
def test_resnet():
    # Load the model
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
            'resnet18_v1')
    device = 'cpu'
    model = DLRModel(model_path, device)

    # Run the model
    image = np.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dog.npy')).astype(np.float32)
    #flatten within a input array
    input_data = {'data': image}
    print('Testing inference on resnet18...')
    probabilities = model.run(input_data) #need to be a list of input arrays matching input names
    assert probabilities[0].argmax() == 151
    assert model.get_input_names() == ["data"]
    assert model.get_input_dtypes() == ["float32"]
    assert model.get_output_dtypes() == ["float32"]
    assert model.get_input_dtype(0) == "float32"
    assert model.get_output_dtype(0) == "float32"
Esempio n. 19
0
def test_ssd_mobilenet_v2_model():
    model = DLRModel(MODEL_PATH.as_posix())
    data = np.load(DATA_PATH)
    assert model.get_input_names() == ['image_tensor']
    assert model.get_output_names() == [
        'detection_scores:0', 'detection_classes:0', 'num_detections:0'
    ]
    assert model.get_input_dtypes() == ['uint8']
    assert model.get_output_dtypes() == ['float32', 'float32', 'float32']
    outputs = model.run({"image_tensor": data})
    assert outputs[0].shape == (1, 100, 4)
    assert outputs[1].shape == (1, 100)
    assert outputs[2].shape == (1, 100)
    detections = np.multiply(np.ceil(outputs[1]), outputs[2])
    expected = np.zeros(detections.shape)
    expected[:, :6] = np.array([[1., 1., 1., 2., 3., 1]])
    comparison = detections == expected
    assert comparison.all()
Esempio n. 20
0
def test_get_set_input():
    model_path = get_models(model_name='4in2out', arch=get_arch(), kind='tvm')
    device = 'cpu'
    model = DLRModel(model_path, device)

    input1 = np.asarray([1., 2.])
    input2 = np.asarray([3., 4.])
    input3 = np.asarray([5., 6., 7])
    input4 = np.asarray([8., 9., 10])
    
    model.run({'data1': input1, 'data2': input2, 'data3': input3, 'data4': input4})

    assert np.array_equal(model.get_input('data1'), input1)
    assert np.array_equal(model.get_input('data2'), input2)
    assert np.array_equal(model.get_input('data3'), input3)
    assert np.array_equal(model.get_input('data4'), input4)
def test_ssd_mobilenet_v2_model():
    model = DLRModel(MODEL_PATH.as_posix())
    data = np.load(DATA_PATH)
    assert model.get_input_names() == ['image_tensor']
    assert model.get_output_names() == [
        'detection_classes:0', 'num_detections:0', 'detection_boxes:0',
        'detection_scores:0'
    ]
    assert model.get_input_dtypes() == ['uint8']
    assert model.get_output_dtypes() == [
        'float32', 'float32', 'float32', 'float32'
    ]
    outputs = model.run({"image_tensor": data})
    assert outputs[0].shape == (1, 100)
    assert outputs[1].shape == (1, )
    assert outputs[2].shape == (1, 100, 4)
    assert outputs[3].shape == (1, 100)
    detections = np.multiply(np.ceil(outputs[3]), outputs[0])
    expected = np.zeros(detections.shape)
    assert np.count_nonzero(detections) == outputs[1][0]
Esempio n. 22
0
def test_tflite_model():
    _generate_tflite_file()

    m = DLRModel(TFLITE_FILE_PATH)
    inp_names = m.get_input_names()
    assert sorted(inp_names) == ['input1', 'input2']

    out_names = m.get_output_names()
    assert sorted(out_names) == ['preproc/output1', 'preproc/output2']

    inp1 = np.array([[4., 1.], [3., 2.]]).astype("float32")
    inp2 = np.array([[0., 1.], [1., 0.]]).astype("float32")

    res = m.run({'input1': inp1, 'input2': inp2})
    assert res is not None
    assert len(res) == 2
    exp_out0 = np.array([[36., 361.], [49., 324.]]).astype("float32")
    assert np.alltrue(res[0] == exp_out0)
    assert res[1] == 1

    m_inp1 = m.get_input('input1')
    m_inp2 = m.get_input('input2')
    assert np.alltrue(m_inp1 == inp1)
    assert np.alltrue(m_inp2 == inp2)
Esempio n. 23
0
def test_tf_model(dev_type=None, dev_id=None):
    _generate_frozen_graph()
    model = DLRModel(FROZEN_GRAPH_PATH, dev_type, dev_id)
    inp_names = model.get_input_names()
    assert inp_names == ['import/input1:0', 'import/input2:0']

    out_names = model.get_output_names()
    assert out_names == [
        'import/preproc/output1:0', 'import/preproc/output2:0'
    ]

    inp1 = [[4., 1.], [3., 2.]]
    inp2 = [[0., 1.], [1., 0.]]

    res = model.run({'import/input1:0': inp1, 'import/input2:0': inp2})
    assert res is not None
    assert len(res) == 2
    assert np.alltrue(res[0] == [[36., 361.], [49., 324.]])
    assert res[1] == 1

    m_inp1 = model.get_input('import/input1:0')
    m_inp2 = model.get_input('import/input2:0')
    assert np.alltrue(m_inp1 == inp1)
    assert np.alltrue(m_inp2 == inp2)
Esempio n. 24
0
class GstNeoDLR(GstBase.BaseTransform):

    GST_PLUGIN_NAME = 'neodlr'

    __gstmetadata__ = (
        "NeoDLR",  # Name
        "Filter",  # Transform
        "ML Inference with SageMaker Neo",  # Description
        "Bartek Pawlik <*****@*****.**>")  # Author

    __gsttemplates__ = (
        Gst.PadTemplate.new(
            "src", Gst.PadDirection.SRC, Gst.PadPresence.ALWAYS,
            Gst.Caps.from_string(f"video/x-raw,format={FORMATS}")),
        Gst.PadTemplate.new(
            "sink", Gst.PadDirection.SINK, Gst.PadPresence.ALWAYS,
            Gst.Caps.from_string(f"video/x-raw,format={FORMATS}")))

    __gproperties__ = {
        "model-dir": (
            GObject.TYPE_STRING,  # type
            "Model directory",  # nick
            "SageMaker Neo model directory",  # blurb
            DEFAULT_MODEL_DIR,  # default
            GObject.ParamFlags.READWRITE  # flags
        ),
        "device-type": (
            GObject.TYPE_STRING,  # type
            "Device type",  # nick
            "Device type: cpu or gpu",  # blurb
            DEFAULT_DEVICE_TYPE,  # default
            GObject.ParamFlags.READWRITE  # flags
        ),
        "image-size": (
            GObject.TYPE_INT64,  # type
            "Image Size",  # nick
            "Image size for inference, width or height",  # blurb
            1,  # min
            GLib.MAXINT,  # max
            DEFAULT_IMAGE_SIZE,  # default
            GObject.ParamFlags.READWRITE  # flags
        ),
        "threshold": (
            GObject.TYPE_FLOAT,
            "Detection threshold",
            "Detection threshold under which detection result will be ignored",
            0.0,  # min
            1.0,  # max
            DEFAULT_THRESHOLD,  # default
            GObject.ParamFlags.READWRITE),
    }

    def __init__(self):

        super(GstNeoDLR, self).__init__()

        # Initialize properties before Base Class initialization
        self.model_dir = DEFAULT_MODEL_DIR
        self.device_type = DEFAULT_DEVICE_TYPE
        self.image_size = DEFAULT_IMAGE_SIZE
        self.threshold = DEFAULT_THRESHOLD
        self.model = None

    def do_get_property(self, prop: GObject.GParamSpec):
        if prop.name == 'model-dir':
            return self.model_dir
        elif prop.name == 'device-type':
            return self.device_type
        elif prop.name == 'image-size':
            return self.image_size
        elif prop.name == 'threshold':
            return self.threshold
        else:
            raise AttributeError('unknown property %s' % prop.name)

    def load_model(self):
        if self.model is not None:
            return
        if self.model_dir == "":
            return
        if os.path.exists(self.model_dir):
            # Load model
            print("Loading model from %s..." % self.model_dir)
            self.model = DLRModel(self.model_dir, self.device_type)
            print("Done.")

            print("Warming up DLR engine...")
            start_time = time.time()
            x = np.random.rand(1, 3, self.image_size, self.image_size)
            result = self.model.run(x)
            print(len(result))
            print(result[0].shape)
            print('inference time is ' + str((time.time() - start_time)) +
                  ' seconds')
            print("Done.")

    def do_set_property(self, prop: GObject.GParamSpec, value):
        if prop.name == 'model-dir':
            self.model_dir = value
        elif prop.name == 'device-type':
            self.device_type = value
        elif prop.name == 'image-size':
            self.image_size = value
        elif prop.name == 'threshold':
            self.threshold = value
        else:
            raise AttributeError('unknown property %s' % prop.name)

    def do_transform_ip(self, buffer: Gst.Buffer) -> Gst.FlowReturn:
        try:
            # Load the model
            if self.model is None:
                self.load_model()

            # Check if model has been loaded
            if self.model is None:
                return Gst.FlowReturn.OK

            # convert Gst.Buffer to np.ndarray
            image = gst_buffer_with_caps_to_ndarray(
                buffer, self.sinkpad.get_current_caps())

            print('Testing inference...')
            start_time = time.time()
            # img_rand = np.random.rand(1, 3, 320, 320)

            # Prepare input
            image_3 = image[:, :, :3]
            img_small = cv2.resize(image_3, (self.image_size, self.image_size))
            img_reshaped = np.reshape(img_small,
                                      (1, 3, self.image_size, self.image_size))

            # # Normalize & transpose
            # mean_vec = np.array([0.485, 0.456, 0.406])
            # stddev_vec = np.array([0.229, 0.224, 0.225])
            # img_reshaped = (img_reshaped/255 - mean_vec)/stddev_vec
            # img_reshaped = np.rollaxis(img_reshaped, axis=2, start=0)[np.newaxis, :]

            # Run inference
            result = self.model.run(img_reshaped)
            print('inference time is ' + str((time.time() - start_time)) +
                  ' seconds')

            # Process inference output
            temp = []
            for r in result:
                r = np.squeeze(r)
                temp.append(r.tolist())
            idx, score, bbox = temp
            bbox = np.asarray(bbox)
            res = np.hstack((np.column_stack((idx, score)), bbox))
            l = list()
            for r in res:
                (class_id, score, x0, y0, x1, y1) = r
                if score < self.threshold:
                    continue
                d = {
                    "bounding_box": (int(x0), int(y0), int(x1), int(y1)),
                    "confidence": score,
                    "class_name": "class_name",
                    "track_id": int(class_id)
                }
                l.append(d)
            print(l)
            gst_meta_write(buffer, l)

        except Exception as e:
            logging.error(e)

        return Gst.FlowReturn.OK
Esempio n. 25
0
    img = np.rollaxis(img, axis=2, start=0)[np.newaxis, :]
    img = img.astype(np.float32)
    return img


class_map = None
parser = argparse.ArgumentParser(description="Xavier Work")
parser.add_argument(
    '--classmap',
    type=str,
    help="Name of the classmap to use, options are currently 'VOC' or 'CARDBOT'"
)
parser.add_argument('--modelpath', type=str, help="Path to the model files")
args = parser.parse_args()

dlr_model = DLRModel(model_path=args.modelpath, dev_type='gpu')
val_path = 'observations'
out_path = 'results'

cardbot_map = [
    'AH', 'KH', 'QH', 'JH', '10H', '9H', '8H', '7H', '6H', '5H', '4H', '3H',
    '2H', 'AD', 'KD', 'QD', 'JD', '10D', '9D', '8D', '7D', '6D', '5D', '4D',
    '3D', '2D', 'AC', 'KC', 'QC', 'JC', '10C', '9C', '8C', '7C', '6C', '5C',
    '4C', '3C', '2C', 'AS', 'KS', 'QS', 'JS', '10S', '9S', '8S', '7S', '6S',
    '5S', '4S', '3S', '2S'
]
voc_map = [
    "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
    "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
    "pottedplant", "sheep", "sofa", "train", "tvmonitor"
]
Esempio n. 26
0
def run_inference():
    now = datetime.now()
    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
    print("date and time =", dt_string)

    for x in range(4):

        rep = session.next()
        try:
            if (rep["class"] == "TPV"):
                print(str(rep.lat) + "," + str(rep.lon))
                locate = ("Timestamp " + dt_string + "," + " Latitude: " +
                          str(rep.lat) + "," + "longitudes: " + str(rep.lon))

        except Exception as e:
            print("Got exception " + str(e))
    print(time)
    #os.system('fswebcam -r 1024x768 --no-banner --scale 224x224 output.jpg -S 7 --save /home/pi/Photos/std.jpg') # uses Fswebcam to take picture
    image = Image.open('output.jpg')
    #data = np.array(image,dtype='float64')
    #data=data1.reshape((1,data1.shape[2],data1.shape[0],data1.shape[1]))

    #np.save( 'flamingo.npy', data)

    image_data = utils.transform_image(image)
    #print(image_data)
    flattened_data = image_data.astype(np.float32).flatten()
    #np.save( 'puppi.npy',flattened_data)
    #print("Start Prinring Flattern")
    #print(flattened_data)
    #run_inference(image_data)
    #time.sleep(15) # this line creates a 15 second delay before repeating the loop

    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                              '../model-rasp3b')
    batch_size = 1
    channels = 3
    height = width = 224
    input_shape = {'input0': [batch_size, channels, height, width]}
    classes = 1000
    output_shape = [batch_size, classes]
    device = 'cpu'
    model = DLRModel(model_path, input_shape, output_shape, device)

    synset_path = os.path.join(model_path, 'imagenet1000_clsidx_to_labels.txt')
    with open(synset_path, 'r') as f:
        synset = eval(f.read())

    #image = np.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dog.npy')).astype(np.float32)
    #input_data = {'data': image_data}

    # Predict
    out = model.run({'input0': flattened_data}).squeeze()
    top1 = np.argmax(out)
    prob = np.max(out)

    # Using for loop
    for i in list:
        # How to use find()
        if (synset[top1].find(i) != -1):
            print("Contains given substring ")
            GPIO.output(12, GPIO.HIGH)  # Turn on
            sleep(10)  # Sleep for 1 second
            GPIO.output(12, GPIO.LOW)  # Turn off
        #sleep(10)

        #else:
        #print ("Doesn't contains given substring")
        #print(i)
    print("Class: %s, probability: %f" % (synset[top1], prob))
    #while True: # Run forever
    #GPIO.output(8, GPIO.HIGH) # Turn on
    #sleep(10) # Sleep for 1 second
    #GPIO.output(8, GPIO.LOW) # Turn off
    #sleep(10)

    #for rep in range(4):
    #t1 = current_milli_time()
    #out = model.run({'input0' : flattened_data}).squeeze()
    #t2 = current_milli_time()

    #logging.debug('done m.run(), time (ms): {}'.format(t2 - t1))

    #top1 = np.argmax(out)

    print(locate)
    logging.debug('Inference result: {}, {}'.format(locate, synset[top1]))
Esempio n. 27
0
from cv2 import VideoCapture, destroyAllWindows, imdecode, imread, resize
from dlr import DLRModel
from numpy import argsort, fromstring, load, uint8

config_utils.logger.info("Using dlr from '{}'.".format(
    modules[DLRModel.__module__].__file__))
config_utils.logger.info("Using np from '{}'.".format(
    modules[argsort.__module__].__file__))
config_utils.logger.info("Using cv2 from '{}'.".format(
    modules[VideoCapture.__module__].__file__))

# Read synset file
with open(config_utils.LABELS, "r") as f:
    synset = literal_eval(f.read())

dlr_model = DLRModel(config_utils.MODEL_DIR, config_utils.DEFAULT_ACCELERATOR)


def enable_camera():
    if platform.machine() == "armv7l":  # RaspBerry Pi
        import picamera

        config_utils.CAMERA = picamera.PiCamera()
    elif platform.machine() == "aarch64":  # Nvidia Jetson TX
        config_utils.CAMERA = VideoCapture(
            "nvarguscamerasrc ! video/x-raw(memory:NVMM)," +
            "width=(int)1920, height=(int)1080, format=(string)NV12," +
            "framerate=(fraction)30/1 ! nvvidconv flip-method=2 !" +
            "video/x-raw, width=(int)1920, height=(int)1080," +
            "format=(string)BGRx ! videoconvert ! appsink")
    elif platform.machine() == "x86_64":  # Deeplens
Esempio n. 28
0
#!/usr/bin/env python3
import time
import datetime
import numpy as np
import boto3
from dlr import DLRModel
import greengrasssdk
import PIL.Image

mqtt_client = greengrasssdk.client('iot-data')
model_resource_path = ('/ml_model')
dlr_model = DLRModel(model_resource_path, 'gpu')

cloudwatch = boto3.client('cloudwatch')
prev_class = -1

dino_names = [
    'Spinosaurus', 'Dilophosaurus', 'Stegosaurus', 'Triceratops',
    'Brachiosaurus', 'Unknown'
]


def push_to_cloudwatch(name, value):
    try:
        response = cloudwatch.put_metric_data(Namespace='dino-detect',
                                              MetricData=[
                                                  {
                                                      'MetricName': name,
                                                      'Value': value,
                                                      'Unit': 'Percent'
                                                  },
Esempio n. 29
0
    def main(self):
        # SagemakerNeo init
        self.model = DLRModel(self.model_path, 'cpu')

        # Gstreamer Init
        Gst.init(None)

        pipeline1_cmd="v4l2src device="+self.videosrc+" do-timestamp=True ! videoconvert ! \
            videoscale n-threads=4 method=nearest-neighbour ! \
            video/x-raw,format=RGB,width="+str(WIDTH)+",height="+str(HEIGHT)+" ! \
            queue leaky=downstream max-size-buffers=1 ! appsink name=sink \
            drop=True max-buffers=1 emit-signals=True max-lateness=8000000000"

        pipeline2_cmd = "appsrc name=appsource1 is-live=True block=True ! \
            video/x-raw,format=RGB,width="+str(WIDTH)+",height="+ \
            str(HEIGHT)+",framerate=20/1,interlace-mode=(string)progressive ! \
            videoconvert ! waylandsink" #v4l2sink max-lateness=8000000000 device=/dev/video14"

        self.pipeline1 = Gst.parse_launch(pipeline1_cmd)
        appsink = self.pipeline1.get_by_name('sink')
        appsink.connect("new-sample", self.on_new_frame, appsink)

        self.pipeline2 = Gst.parse_launch(pipeline2_cmd)
        self.appsource = self.pipeline2.get_by_name('appsource1')

        self.pipeline1.set_state(Gst.State.PLAYING)
        bus1 = self.pipeline1.get_bus()
        self.pipeline2.set_state(Gst.State.PLAYING)
        bus2 = self.pipeline2.get_bus()

        # Main Loop
        while True:
            message = bus1.timed_pop_filtered(10000, Gst.MessageType.ANY)
            if message:
                if message.type == Gst.MessageType.ERROR:
                    err,debug = message.parse_error()
                    print("ERROR bus 1:",err,debug)
                    self.pipeline1.set_state(Gst.State.NULL)
                    self.pipeline2.set_state(Gst.State.NULL)
                    quit()

                if message.type == Gst.MessageType.WARNING:
                    err,debug = message.parse_warning()
                    print("WARNING bus 1:",err,debug)

                if message.type == Gst.MessageType.STATE_CHANGED:
                    old_state, new_state, pending_state = message.parse_state_changed()
                    print("INFO: state on bus 2 changed from ",old_state," To: ",new_state)
            message = bus2.timed_pop_filtered(10000, Gst.MessageType.ANY)
            if message:
                if message.type == Gst.MessageType.ERROR:
                    err,debug = message.parse_error()
                    print("ERROR bus 2:",err,debug)
                    self.pipeline1.set_state(Gst.State.NULL)
                    self.pipeline2.set_state(Gst.State.NULL)
                    quit()

                if message.type == Gst.MessageType.WARNING:
                    err,debug = message.parse_warning()
                    print("WARNING bus 2:",err,debug)

                if message.type == Gst.MessageType.STATE_CHANGED:
                    old_state, new_state, pending_state = message.parse_state_changed()
                    print("INFO: state on bus 2 changed from ",old_state," To: ",new_state)
Esempio n. 30
0
import os
import numpy as np
from PIL import Image
from dlr import DLRModel

# Load the compiled model
input_shape = {'data': [1, 3, 224, 224]}  # A single RGB 224x224 image
output_shape = [1, 8]  # The probability for each one of the 1,000 classes
device = 'cpu'  # Go, Raspberry Pi, go!
model = DLRModel('resnet50-custom', input_shape, output_shape, device)

# Load names for ImageNet classes
synset_path = os.path.join('resnet50-custom', 'synset.txt')
with open(synset_path, 'r') as f:
    synset = eval(f.read())

# Load the image
image = Image.open('image.jpeg')
image.load()

# Resize the image
new_width = 224
new_height = 224
image = image.resize((new_width, new_height), Image.ANTIALIAS)
image.save('image224.jpeg')

# Create image numpy array
image = np.array(image) - np.array([123.68, 116.779, 103.939])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]