def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--checkpoint-path', required=True)
    parser.add_argument('--image', nargs='+', required=True)
    parser.add_argument('--num-classes', type=int, required=True)
    args = parser.parse_args()

    model = SqueezeNet(weights=None, classes=args.num_classes)
    model.load_weights(args.checkpoint_path)

    xs = []
    for path in args.image:
        img = image.load_img(path, target_size=(SIZE, SIZE))
        x = image.img_to_array(img)
        xs.append(x)

    xs = np.array(xs)
    xs = preprocess_input(xs)

    probs = model.predict(xs)

    print('')
    for i, path in enumerate(args.image):
        print('%s' % path)
        print('    Prediction: %s' % np.argmax(probs[i]))
Beispiel #2
0
class TLClassifier:
    def __init__(self, is_site):
        #TODO load classifier
        assert not is_site
        weights_file = r'light_classification/models/squeezenet_weights.h5'  #Replace with real world classifier

        image_shape = (224, 224, 3)

        self.states = (TrafficLight.RED, TrafficLight.YELLOW,
                       TrafficLight.GREEN, TrafficLight.UNKNOWN)

        print('Loading model..')
        self.model = SqueezeNet(len(self.states), *image_shape)
        self.model.load_weights(weights_file, by_name=True)
        self.model._make_predict_function()
        print('Loaded weights: %s' % weights_file)

    def get_classification(self, image):
        """Determines the color of the traffic light in the image

        Args:
            image (cv::Mat): image containing the traffic light

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        mini_batch = cv2.resize(
            image, (224, 224),
            cv2.INTER_AREA).astype('float')[np.newaxis, ..., ::-1] / 255.
        light = self.states[np.argmax(self.model.predict(mini_batch))]

        return light
def predict(img_local_path):
    model = SqueezeNet(weights='imagenet')
    img = image.load_img(img_local_path, target_size=(227, 227))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    result = decode_predictions(preds)
    return result
Beispiel #4
0
import numpy as np
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.preprocessing import image
from squeezenet import SqueezeNet
import matplotlib.pyplot as plt
# import matplotlib.image as mpimg
model = SqueezeNet()

img = image.load_img('2.jpg', target_size=(227, 227))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

preds = model.predict(x)
all_results = decode_predictions(preds)
for results in all_results:
    for result in results:
        print('Probability %0.2f%% => [%s]' % (100 * result[2], result[1]))
        result_text = 'Probability %0.2f%% => [%s]' % (100 * result[2],
                                                       result[1])
        break

plt.figure(num=1, figsize=(8, 6), dpi=80)
plt.imshow(img)
plt.text(130,
         90,
         result_text,
         horizontalalignment='center',
         verticalalignment='center',
         fontsize=16,
         color='black')
Beispiel #5
0
class DNNModel:
    def __init__(self, image_path):
        self.IMAGE_SIZE = 64
        self.data = []
        self.labels = []
        self.model = self.build_model()

        if image_path is not None:
            self.image_path = image_path
        else:
            self.image_path = "/home/madi/deeplearning/raspberry-pi/datasets"
        pass

    def gen_training_image_set(self):
        imagePaths = os.listdir(self.image_path)
        # loop over the input images
        for imagePath in imagePaths:
            # load the image, pre-process it, and store it in the data list
            imagePath = self.image_path + "/" + imagePath
            print imagePath
            image = cv2.imread(imagePath)
            image = cv2.resize(image, (self.IMAGE_SIZE, self.IMAGE_SIZE))
            image = img_to_array(image)

            self.data.append(image)

            # extract the class label from the image path and update the
            # labels list]
            if "left" in imagePath.split(os.path.sep)[-2]:
                label = 1
            elif "right" in imagePath.split(os.path.sep)[-2]:
                label = 2
            else:
                label = 0

            self.labels.append(label)

        # scale the raw pixel intensities to the range [0, 1]
        self.data = np.array(self.data, dtype="float") / 255.0
        self.labels = np.array(self.labels)

    def add_training_sample(self, data, label):
        image = cv2.resize(data, (self.IMAGE_SIZE, self.IMAGE_SIZE))
        image = img_to_array(image)
        self.data.append(image)
        self.labels.append(label)

    def scale_and_norm_training_samples(self):
        # scale the raw pixel intensities to the range [0, 1]
        self.data = np.array(self.data, dtype="float") / 255.0
        self.labels = np.array(self.labels)

    def build_model(self):
        self.model = SqueezeNet(include_top=True,
                                weights=None,
                                classes=3,
                                input_shape=(self.IMAGE_SIZE, self.IMAGE_SIZE,
                                             3))
        self.model.summary()
        opt = Adam()
        self.model.compile(loss="binary_crossentropy",
                           optimizer=opt,
                           metrics=["accuracy"])
        return self.model

    def train(self):
        # split train and test set
        (trainX, testX, trainY, testY) = train_test_split(self.data,
                                                          self.labels,
                                                          test_size=0.25,
                                                          random_state=42)

        # convert the labels from integers to vectors
        trainY = to_categorical(trainY, num_classes=3)
        testY = to_categorical(testY, num_classes=3)

        print trainX.shape
        print trainY.shape
        self.model.fit(trainX,
                       trainY,
                       batch_size=1,
                       epochs=50,
                       verbose=1,
                       validation_data=(testX, testY))
        self.test()
        pass

    def predict(self, img_frame):
        img_frame = cv2.resize(img_frame, (self.IMAGE_SIZE, self.IMAGE_SIZE))
        img_frame = img_to_array(img_frame)
        data = np.array([img_frame])
        # scale the raw pixel intensities to the range [0, 1]
        data = np.array(data, dtype="float") / 255.0

        ret = self.model.predict(data)

        if len(ret) > 0:
            return ret[0]
        pass

    def save_model(self):
        self.model.save("greenball_squeezenet_local.h5")
        pass

    def load_model(self, path):
        self.model = load_model("greenball_squeezenet_local.h5")
        pass

    def test(self):
        cnt = 0
        for i in xrange(len(self.data)):
            ret = self.model.predict(self.data[i])
            pred = np.argmax(ret)
            if pred == self.labels[i]:
                cnt += 1
        print "total correct number is %d" % cnt
Beispiel #6
0
# images = np.array([cv2.resize(cv2.cvtColor(im, cv2.COLOR_GRAY2RGB), (227, 227)) for im in images])
# images = np.array(images)
# print images.shape
# classes = to_categorical(classes, nb_classes=nr_classes)

print('Loading model..')
model = SqueezeNet(nb_classes, input_shape=input_shape)
adam = Adam(lr=0.0040)
model.compile(loss="categorical_crossentropy",
              optimizer='adam',
              metrics=['accuracy'])
if os.path.isfile(weights_file):
    print('Loading weights: %s' % weights_file)
    model.load_weights(weights_file, by_name=True)

total = 0
correct = 0

for images, classes in training_data:
    for i in xrange(len(images)):
        img = np.expand_dims(images[i], axis=0)
        cl = classes[i]
        cl = cl.argsort()[-1:][::-1]
        result = model.predict(img)
        res = result[0].argsort()[-1:][::-1]
        if res[0] == cl[0]:
            correct += 1
        total += 1
        print('result: %s / %s, correct: %s/%s' % (res, cl, correct, total))
Beispiel #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--checkpoint-path', required=True)
    parser.add_argument('--test-dir', default='data/test')
    parser.add_argument('--output-file', default='confusion_matrix.png')
    parser.add_argument('--batch-size', type=int, default=32)
    parser.add_argument('--limit', type=int, default=0)
    parser.add_argument('--num-classes', type=int, required=True)
    args = parser.parse_args()

    model = SqueezeNet(weights=None, classes=args.num_classes)
    model.load_weights(args.checkpoint_path)

    data = []
    classes = sorted(os.listdir(args.test_dir))
    if len(classes) != args.num_classes:
        raise ValueError('expecting %d classes, found %d in %s' % (
            args.num_classes,
            len(classes),
            args.test_dir
        ))

    for ic in range(len(classes)):
        directory = os.path.join(args.test_dir, classes[ic])
        for path in os.listdir(directory):
            full_path = os.path.join(directory, path)
            data.append((full_path, ic))

    rng = random.Random(0)
    rng.shuffle(data)
    if args.limit > 0:
        data = data[:args.limit]

    chunked = list(chunks(data, args.batch_size))
    gstart = time.time()
    cmat = np.zeros((len(classes), len(classes)), dtype=np.int)
    last_print = 0
    for i, chunk in enumerate(chunked):
        start = time.time()
        paths, ys = zip(*chunk)
        xs = []
        for path in paths:
            img = image.load_img(path, target_size=(SIZE, SIZE))
            x = image.img_to_array(img)
            xs.append(x)
        xs = np.array(xs)
        xs = preprocess_input(xs)

        probs = model.predict(xs, batch_size=args.batch_size)
        preds = probs.argmax(axis=1)
        for actual, predicted in zip(ys, preds):
            cmat[actual][predicted] += 1

        diff = time.time() - start
        gdiff = time.time() - gstart
        if time.time() - last_print > 1 or i == len(chunked)-1:
            last_print = time.time()
            print('batch %d/%d (in %.3fs, %.1fs elapsed, %.1fs remaining)' % (
                i+1,
                len(chunked),
                time.time() - start,
                gdiff,
                gdiff / (i+1) * (len(chunked)-i-1)
            ))

    print(cmat)
    plot_cmat(cmat, classes, args.output_file)
    print('saved figure to %s' % args.output_file)
def predict(path, model_path, index_file_path, MainUI):

    try:
        result_string = " Detected Object : Probability \n \n"

        if (MainUI.squeezenet_model_loaded == False):
            wx.CallAfter(
                pub.sendMessage,
                "report101",
                message=
                "Loading SqueezeNet model for the first time. This may take few minutes or less than a minute. Please wait. \nLoading....."
            )
            model = SqueezeNet(model_path=model_path)
            wx.CallAfter(
                pub.sendMessage,
                "report101",
                message=
                "SqueezeNet model loaded.. Picture about to be processed.. \nLoading......"
            )
            MainUI.model_collection_squeezenet.append(model)
            MainUI.squeezenet_model_loaded = True
        else:
            wx.CallAfter(pub.sendMessage,
                         "report101",
                         message="Retrieving loaded model. \nLoading........")
            model = MainUI.model_collection_squeezenet[0]
            wx.CallAfter(
                pub.sendMessage,
                "report101",
                message=
                "ResNet model loaded.. Picture about to be processed.. \nLoading......"
            )

        img = image.load_img(path, target_size=(227, 227))
        img = image.img_to_array(img, data_format="channels_last")
        img = np.expand_dims(img, axis=0)

        img = preprocess_input(img, data_format="channels_last")
        wx.CallAfter(
            pub.sendMessage,
            "report101",
            message="Picture is transformed for prediction. \nLoading........")

        prediction = model.predict(img, steps=1)
        wx.CallAfter(
            pub.sendMessage,
            "report101",
            message=
            "Picture prediction is done. Sending in results. \nLoading......")

        predictiondata = decode_predictions(prediction,
                                            top=10,
                                            index_file_path=index_file_path)

        for results in predictiondata:
            countdown = 0
            for result in results:
                countdown += 1
                result_string += "(" + str(countdown) + ") " + str(
                    result[1]) + " : " + str(100 * result[2])[0:4] + "% \n"

        return result_string
    except Exception as e:
        return getattr(e, "message", repr(e))
import numpy as np
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.preprocessing import image
from squeezenet import SqueezeNet
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg


model = SqueezeNet()

img = image.load_img('pexels-photo-280207.jpeg', target_size=(227, 227))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

preds = model.predict(x)
all_results = decode_predictions(preds)
for results in all_results:
    for result in results:
        print('Probability %0.2f%% => [%s]' % (100*result[2], result[1]))
        #result_text= 'Probability %0.2f%% => [%s]' % (100*result[2], result[1])
        #break
#plt.figure(num=1,figsize=(8, 6), dpi=80)
#plt.imshow(img)
#plt.text(130,90,result_text,horizontalalignment='center', verticalalignment='center',fontsize=16,color='black')
#plt.axis('off')
#plt.show()

Beispiel #10
0
nr_classes = len(decode)
images = np.array(images)

print('Loading model..')
model = SqueezeNet(nr_classes)
model.compile(loss="categorical_crossentropy", optimizer="adam")
if os.path.isfile(weights_file):
    print('Loading weights...')
    model.load_weights(weights_file)

print("Classifying images...")
# predictions = model.predict(images, batch_size=100, verbose=1)
# print('Predicted %s images' % len(predictions))
for i in xrange(len(images)):
    img = np.expand_dims(images[i], axis=0)
    res = model.predict(img)
    results = res[0].argsort()[-5:][::-1]
    print('%s: ' % paths[i])
    for j in xrange(len(results)):
        result = decode[results[j]]
        text = '%.3f: %s' % (res[0][results[j]], result)
        print(text)

    # confidences = predictions[i].argsort()[-5:][::-1]
    # result_classes = [(decode_dict[c]) for c in confidences]
    # prediction = model.predict(img)
    # print('%s. prediction: %s' % (i, prediction))
    # result_classes = [(decode[j], prediction[j]) for j in xrange(len(prediction))]
    # path = paths[i]
    # print('%s: %s ' % (path, result_classes))