コード例 #1
0
    def save(self, *args, **kwargs):
        try:
            img = load_img(self.picture, target_size=(299,299))
            img_array = img_to_array(img)
            ## model takes 4D array bcoz it can use multiple images
            ## but our is only 1 so 1 at the beginning 
            ## we need to convert our 3D array to 4d array (299, 299, 3) -> (1, 299, 299, 3)
            ## we can do so by using numpy
            to_pred = np.expand_dims(img_array, axis=0) #(1,299,299, 3)

            # preprocess input
            preprocess = preprocess_input(to_pred)
            ## assign model
            model = InceptionResNetV2(weights='imagenet')
            prediction = model.predict(preprocess) ## returns large array of 0,0...some number
            
            # this decodes the prediction
            decoded = decode_predictions(prediction)[0][0]
            thingName = decoded[1]
            probab = decoded[2]
            #print(decoded)
            #print(thingName)
            #print(probab)

            self.classification = thingName
            self.probability = probab
            print('success')
        except Exception as e:
            print('classification failed', e)
        super().save(*args, **kwargs)
コード例 #2
0
    def saveImg(self, frame, x, y, w, h):
        global helmet_count, no_helmet_count, current_video, ref_area, the_line
        area = w * h
        # ref_area = area if ref_area == 0 else ref_area
        # if area < ref_area*0.5:
        #     return
        y0 = y - extra_top if y - extra_top > 0 else 0
        bike_img = frame[y0:y + h, x:x + w]
        sqr_img = cv2.resize(bike_img, (299, 299))

        # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        sqr_img = image.img_to_array(sqr_img)
        imgx = np.expand_dims(sqr_img, axis=0)
        preprocess_input(imgx)  #may use /255. if something go wrong
        preds = general_model.predict(imgx)

        top = decode_predictions(preds, top=5)[0]

        for result in top:
            if result[1] == 'motor_scooter':  #and result[2] > 0.1:
                self.detectToken = 0
                the_line = int(the_line * 0.5 + self.y * 0.5)
                img = cv2.resize(bike_img, (299, 299))
                img = img / 255.
                if not self.rightward:
                    img = cv2.flip(img, 1)

                preds = helmet_model.predict([[img]])
                helmet = preds[0][1]

                spt = current_video.split('/')
                spt = spt[-1]
                spt = spt.split('.')
                spt = spt[0]
                result_path = ''
                if helmet > 0.5:  #helmet
                    helmet_count += 1
                    result_path = "extracted/helmet/" + spt + "#" + str(
                        helmet_count)
                    self.status = (0, 255, 0)
                else:
                    helmet = 1 - helmet
                    no_helmet_count += 1
                    result_path = "extracted/no_helmet/" + spt + "#" + str(
                        no_helmet_count)
                    self.status = (0, 0, 255)

                helmet = str(helmet * 100)
                helmet = helmet.split('.')
                a = helmet[0]
                b = helmet[1][:2]
                result_path += " [" + a + "." + b + "%].jpg"
                cv2.imwrite(result_path, bike_img)
                break
コード例 #3
0
ファイル: models.py プロジェクト: daquesada/images
 def save(self, *args, **kwargs):
     try:
         img = load_img(self.picture, target_size=(299, 299))
         img_array = img_to_array(img)
         to_pred = np.expand_dims(img_array, axis=0)
         prep = preprocess_input(to_pred)
         model = InceptionResNetV2(weights="imagenet")
         prediction = model.predict(prep)
         decoded = decode_predictions(prediction)[0][0][1]
         self.classified = str(decoded)
     except Exception as e:
         print("error", e)
     super().save(*args, **kwargs)
    def saveImg(self, frame, x, y, w, h):
        global helmet_count, no_helmet_count
        global current_video
        y0 = y - extra_top if y - extra_top > 0 else 0
        bike_img = frame[y0:y + h, x:x + w]
        sqr_img = cv2.resize(bike_img, (299, 299))

        # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        sqr_img = image.img_to_array(sqr_img)
        imgx = np.expand_dims(sqr_img, axis=0)
        preprocess_input(imgx)  #may use /255. if something go wrong
        preds = general_model.predict(imgx)

        top = decode_predictions(preds, top=5)[0]

        for result in top:
            if result[1] == 'motor_scooter':  #and result[2] > 0.1:
                img = cv2.resize(bike_img, (299, 299))
                img = img / 255.
                flip = ''
                if not self.rightward:
                    img = cv2.flip(img, 1)
                    flip = 'flip'

                preds = helmet_model.predict([[img]])
                helmet = preds[0][1]

                spt = current_video.split('/')
                spt = spt[-1]
                spt = spt.split('.')
                spt = spt[0]
                result_path = ''
                if helmet > 0.5:  #helmet
                    helmet_count += 1
                    result_path = path + "/helmet/" + spt + "#" + str(
                        helmet_count)
                else:
                    helmet = 1 - helmet
                    no_helmet_count += 1
                    result_path = path + "/no_helmet/" + spt + "#" + str(
                        no_helmet_count)

                helmet = str(helmet * 100)
                helmet = helmet.split('.')
                a = helmet[0]
                b = helmet[1][:2]
                result_path += "" + flip + " [" + a + "." + b + "%].jpg"
                cv2.imwrite(result_path, bike_img)
                break
        self.isSaved = True
コード例 #5
0
 def save(self, *args, **kwargs):
     try:
         img = load_img(self.picture.path, target_size=(299, 299))
         img_arr = img_to_array(img)
         to_pred = np.expand_dims(img_arr, axis=0)  # (1,299,299,3)
         prep = preprocess_input(to_pred)
         model = InceptionResNetV2(weights='imagenet')
         prediction = model.predict(prep)
         decoded = decode_predictions(prediction)[0][0][1]
         self.classified = str(decoded)
         print('Success')
     except Exception as e:
         print(f"Classification Failed {e}")
     super().save(*args, **kwargs)
コード例 #6
0
ファイル: models.py プロジェクト: vikas080889/react-django-ML
 def save(self, *args, **kwargs):
     try:
         # print(self.picture)
         img = load_img(self.picture, target_size=(299, 299))
         img_array = img_to_array(img)
         to_pred = np.expand_dims(img_array, axis=0)
         prep = preprocess_input(to_pred)
         model = InceptionResNetV2(weights='imagenet')
         prediction = model.predict(prep)
         decoded = decode_predictions(prediction)[0][0][1]
         self.classified = decoded
         print("success")
     except:
         print('classification failed')
     super().save(*args, **kwargs)
コード例 #7
0
def predict(img_path):
    img = image.load_img(img_path, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    preds = model.predict(x)
    # decode the results into a list of tuples (class, description, probability)
    # (one such list for each sample in the batch)

    return [{
        'probability': float(p[2]),
        'name': p[1],
        'categories': synset.tree(p[0])
    } for p in decode_predictions(preds, top=2)[0]]
コード例 #8
0
    def save(self, *args, **kwargs):
        try:

            img = load_img(self.picture, target_size=(299, 299))
            img_array = img_to_array(img)
            to_predict = np.expand_dims(img_array, axis=0)
            preprossesing = preprocess_input(to_predict)
            model = InceptionResNetV2(weights='imagenet')
            prediction = model.predict(preprossesing)
            decode = decode_predictions(prediction)[0][0][1]
            self.classified = str(decode)
            print('success')

        except Exception as e:
            print("classification failed", e)
        super().save(*args, **kwargs)
import matplotlib.pyplot as plt

model = InceptionResNetV2(weights='imagenet')
count = 0
for i in range(60):
    img_path = 'crop_image2/bike_'+str(i)+'.jpg'
    img = image.load_img(img_path, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    preds = model.predict(x)

# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
    t3 = decode_predictions(preds, top=3)[0]
    found = False
    for result in t3:
        
        if result[1] == 'motor_scooter':
            # plt.imshow(img)
            # plt.show()
            count+=1
            print(i, 'is motor scooter')
            found = True
    if not found:
        print(i, 'is something else', t3)
print('scooter found',count)

# Predicted: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]
コード例 #10
0
    def set_model(self, model_name, top_n=5):
        if model_name == 'densenet':
            self.model = densenet.DenseNet121(include_top=True,
                                              weights='imagenet',
                                              input_tensor=None,
                                              input_shape=None,
                                              pooling=None,
                                              classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: densenet.decode_predictions(x, top=top_n)
            self.ref = """
                <ul>
                <li><a href='https://arxiv.org/abs/1608.06993' target='_blank'>
                Densely Connected Convolutional Networks</a> (CVPR 2017 Best Paper Award)</li>
                </ul>
                """

        elif model_name == 'inception_resnet_v2':
            self.model = inception_resnet_v2.InceptionResNetV2(
                include_top=True,
                weights='imagenet',
                input_tensor=None,
                input_shape=None,
                pooling=None,
                classes=1000)
            self.target_size = (299, 299)
            self.decoder = lambda x: inception_resnet_v2.decode_predictions(
                x, top=top_n)
            self.ref = """
                <ul>
                <li><a href='https://arxiv.org/abs/1602.07261' target='_blank'>
                Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning</a></li>
                </ul>
                """

        elif model_name == 'inception_v3':
            self.model = inception_v3.InceptionV3(include_top=True,
                                                  weights='imagenet',
                                                  input_tensor=None,
                                                  input_shape=None,
                                                  pooling=None,
                                                  classes=1000)
            self.target_size = (299, 299)
            self.decoder = lambda x: inception_v3.decode_predictions(x,
                                                                     top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1512.00567' target='_blank'>
                Rethinking the Inception Architecture for Computer Vision</a></li>
                </ul>
                """

        elif model_name == 'mobilenet':
            self.model = mobilenet.MobileNet(input_shape=None,
                                             alpha=1.0,
                                             depth_multiplier=1,
                                             dropout=1e-3,
                                             include_top=True,
                                             weights='imagenet',
                                             input_tensor=None,
                                             pooling=None,
                                             classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: mobilenet.decode_predictions(x, top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1704.04861' target='_blank'>
                MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications</a></li>
                </ul>
                """

        elif model_name == 'mobilenet_v2':
            self.model = mobilenet_v2.MobileNetV2(input_shape=None,
                                                  alpha=1.0,
                                                  include_top=True,
                                                  weights='imagenet',
                                                  input_tensor=None,
                                                  pooling=None,
                                                  classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: mobilenet_v2.decode_predictions(x,
                                                                     top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1801.04381' target='_blank'>
                MobileNetV2: Inverted Residuals and Linear Bottlenecks</a></li>
                </ul>
                """

        elif model_name == 'nasnet':
            self.model = nasnet.NASNetLarge(input_shape=None,
                                            include_top=True,
                                            weights='imagenet',
                                            input_tensor=None,
                                            pooling=None,
                                            classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: nasnet.decode_predictions(x, top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1707.07012' target='_blank'>
                Learning Transferable Architectures for Scalable Image Recognition</a></li>
                </ul>
                """

        elif model_name == 'resnet50':
            self.model = resnet50.ResNet50(include_top=True,
                                           weights='imagenet',
                                           input_tensor=None,
                                           input_shape=None,
                                           pooling=None,
                                           classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: resnet50.decode_predictions(x, top=top_n)
            self.ref = """<ul>
                <li>ResNet : 
                <a href='https://arxiv.org/abs/1512.03385' target='_blank'>Deep Residual Learning for Image Recognition
                </a></li>
                </ul>
                """

        elif model_name == 'vgg16':
            self.model = vgg16.VGG16(include_top=True,
                                     weights='imagenet',
                                     input_tensor=None,
                                     input_shape=None,
                                     pooling=None,
                                     classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: vgg16.decode_predictions(x, top=top_n)
            self.ref = """<ul>
            <li><a href='https://arxiv.org/abs/1409.1556' target='_blank'>
            Very Deep Convolutional Networks for Large-Scale Image Recognition</a></li>
            </ul>"""

        elif model_name == 'vgg19':
            self.model = vgg19.VGG19(include_top=True,
                                     weights='imagenet',
                                     input_tensor=None,
                                     input_shape=None,
                                     pooling=None,
                                     classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: vgg19.decode_predictions(x, top=top_n)
            self.ref = """<ul>
            <li><a href='https://arxiv.org/abs/1409.1556' target='_blank'>Very Deep Convolutional Networks for Large-Scale Image Recognition</a></li>
            </ul>"""

        elif model_name == 'xception':
            self.model = xception.Xception(include_top=True,
                                           weights='imagenet',
                                           input_tensor=None,
                                           input_shape=None,
                                           pooling=None,
                                           classes=1000)
            self.target_size = (299, 299)
            self.decoder = lambda x: xception.decode_predictions(x, top=top_n)
            self.ref = """<ul>
            <li><a href='https://arxiv.org/abs/1610.02357' target='_blank'>Xception: Deep Learning with Depthwise Separable Convolutions</a></li>
            </ul>"""

        else:
            logger.ERROR('There has no model name !!!')