Beispiel #1
0
    def saveImg(self, frame, x, y, w, h):
        global helmet_count, no_helmet_count, current_video, ref_area, the_line
        area = w * h
        # ref_area = area if ref_area == 0 else ref_area
        # if area < ref_area*0.5:
        #     return
        y0 = y - extra_top if y - extra_top > 0 else 0
        bike_img = frame[y0:y + h, x:x + w]
        sqr_img = cv2.resize(bike_img, (299, 299))

        # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        sqr_img = image.img_to_array(sqr_img)
        imgx = np.expand_dims(sqr_img, axis=0)
        preprocess_input(imgx)  #may use /255. if something go wrong
        preds = general_model.predict(imgx)

        top = decode_predictions(preds, top=5)[0]

        for result in top:
            if result[1] == 'motor_scooter':  #and result[2] > 0.1:
                self.detectToken = 0
                the_line = int(the_line * 0.5 + self.y * 0.5)
                img = cv2.resize(bike_img, (299, 299))
                img = img / 255.
                if not self.rightward:
                    img = cv2.flip(img, 1)

                preds = helmet_model.predict([[img]])
                helmet = preds[0][1]

                spt = current_video.split('/')
                spt = spt[-1]
                spt = spt.split('.')
                spt = spt[0]
                result_path = ''
                if helmet > 0.5:  #helmet
                    helmet_count += 1
                    result_path = "extracted/helmet/" + spt + "#" + str(
                        helmet_count)
                    self.status = (0, 255, 0)
                else:
                    helmet = 1 - helmet
                    no_helmet_count += 1
                    result_path = "extracted/no_helmet/" + spt + "#" + str(
                        no_helmet_count)
                    self.status = (0, 0, 255)

                helmet = str(helmet * 100)
                helmet = helmet.split('.')
                a = helmet[0]
                b = helmet[1][:2]
                result_path += " [" + a + "." + b + "%].jpg"
                cv2.imwrite(result_path, bike_img)
                break
def load_and_preprocess_multiclass_data_with_numpy(config):
    # TODO better let this do by the dataloader!!!
    images_by_class = config['num_classes'] * [[]]
    #
    print('load images')
    for class_nr, class_path in enumerate(os.listdir(config['dataset_dir'])):
        print(class_nr)
        for it, file in enumerate(
                os.listdir(config['dataset_dir'] + '/' + class_path)):
            img = Image.open(config['dataset_dir'] + '/' + class_path + '/' +
                             file)
            img = img.resize([224, 224])
            images_by_class[class_nr].append(img)

    print('preprocess data')
    code.interact(local=dict(globals(), **locals()))
    #
    train_images_by_class = list(
        map(
            lambda class_nr: images_by_class[class_nr][:-int(config[
                'val_split'] * len(images_by_class[class_nr]))],
            range(config['num_classes'])))
    val_images_by_class = list(
        map(
            lambda class_nr: images_by_class[class_nr][-int(config[
                'val_split'] * len(images_by_class[class_nr])):],
            range(config['num_classes'])))
    #
    train_images = np.concatenate(
        list(map(lambda elem: np.stack(elem), train_images_by_class)))
    val_images = np.concatenate(
        list(map(lambda elem: np.stack(elem), val_images_by_class)))
    #
    train_labels = np.concatenate(
        list(
            map(
                lambda class_nr: class_nr * np.ones(
                    len(train_images_by_class[class_nr]), dtype=np.int32),
                range(config['num_classes']))))
    val_labels = np.concatenate(
        list(
            map(
                lambda class_nr: class_nr * np.ones(
                    len(val_images_by_class[class_nr]), dtype=np.int32),
                range(config['num_classes']))))
    #
    train_images = preprocess_input(train_images)
    val_images = preprocess_input(val_images)
    #
    train_images = np.transpose(train_images, [0, 3, 1, 2])
    val_images = np.transpose(val_images, [0, 3, 1, 2])
    #
    return train_images, val_images, train_labels, test_labels
    def saveImg(self, frame, x, y, w, h):
        global helmet_count, no_helmet_count
        global current_video
        y0 = y - extra_top if y - extra_top > 0 else 0
        bike_img = frame[y0:y + h, x:x + w]
        sqr_img = cv2.resize(bike_img, (299, 299))

        # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        sqr_img = image.img_to_array(sqr_img)
        imgx = np.expand_dims(sqr_img, axis=0)
        preprocess_input(imgx)  #may use /255. if something go wrong
        preds = general_model.predict(imgx)

        top = decode_predictions(preds, top=5)[0]

        for result in top:
            if result[1] == 'motor_scooter':  #and result[2] > 0.1:
                img = cv2.resize(bike_img, (299, 299))
                img = img / 255.
                flip = ''
                if not self.rightward:
                    img = cv2.flip(img, 1)
                    flip = 'flip'

                preds = helmet_model.predict([[img]])
                helmet = preds[0][1]

                spt = current_video.split('/')
                spt = spt[-1]
                spt = spt.split('.')
                spt = spt[0]
                result_path = ''
                if helmet > 0.5:  #helmet
                    helmet_count += 1
                    result_path = path + "/helmet/" + spt + "#" + str(
                        helmet_count)
                else:
                    helmet = 1 - helmet
                    no_helmet_count += 1
                    result_path = path + "/no_helmet/" + spt + "#" + str(
                        no_helmet_count)

                helmet = str(helmet * 100)
                helmet = helmet.split('.')
                a = helmet[0]
                b = helmet[1][:2]
                result_path += "" + flip + " [" + a + "." + b + "%].jpg"
                cv2.imwrite(result_path, bike_img)
                break
        self.isSaved = True
Beispiel #4
0
    def __getitem__(self, idx):
        i = idx * batch_size

        length = min(batch_size, (len(self.samples) - i))
        batch_inputs = np.empty((3, length, img_size, img_size, channel), dtype=np.float32)
        batch_dummy_target = np.zeros((length, embedding_size * 3), dtype=np.float32)

        for i_batch in range(length):
            sample = self.samples[i + i_batch]
            for j, role in enumerate(['a', 'p', 'n']):
                image_name = sample[role]
                filename = os.path.join(self.image_folder, image_name)
                image = cv.imread(filename)  # BGR
                image = image[:, :, ::-1]  # RGB
                dets = self.detector(image, 1)

                num_faces = len(dets)
                if num_faces > 0:
                    # Find the 5 face landmarks we need to do the alignment.
                    faces = dlib.full_object_detections()
                    for detection in dets:
                        faces.append(self.sp(image, detection))
                    image = dlib.get_face_chip(image, faces[0], size=img_size)
                else:
                    image = cv.resize(image, (img_size, img_size), cv.INTER_CUBIC)

                if self.usage == 'train':
                    image = aug_pipe.augment_image(image)

                batch_inputs[j, i_batch] = preprocess_input(image)

        return [batch_inputs[0], batch_inputs[1], batch_inputs[2]], batch_dummy_target
Beispiel #5
0
def calc_inception_embedding(img, inception):
    rgb_img = color.gray2rgb(color.rgb2gray(img))
    rgb_img_resize = resize(rgb_img, (299, 299, 3), mode="constant")
    rgb_img_resize = np.array([rgb_img_resize])
    rgb_img_resize = inception_resnet_v2.preprocess_input(rgb_img_resize)
    embed = inception.predict(rgb_img_resize)
    return embed[0]
Beispiel #6
0
    def save(self, *args, **kwargs):
        try:
            img = load_img(self.picture, target_size=(299,299))
            img_array = img_to_array(img)
            ## model takes 4D array bcoz it can use multiple images
            ## but our is only 1 so 1 at the beginning 
            ## we need to convert our 3D array to 4d array (299, 299, 3) -> (1, 299, 299, 3)
            ## we can do so by using numpy
            to_pred = np.expand_dims(img_array, axis=0) #(1,299,299, 3)

            # preprocess input
            preprocess = preprocess_input(to_pred)
            ## assign model
            model = InceptionResNetV2(weights='imagenet')
            prediction = model.predict(preprocess) ## returns large array of 0,0...some number
            
            # this decodes the prediction
            decoded = decode_predictions(prediction)[0][0]
            thingName = decoded[1]
            probab = decoded[2]
            #print(decoded)
            #print(thingName)
            #print(probab)

            self.classification = thingName
            self.probability = probab
            print('success')
        except Exception as e:
            print('classification failed', e)
        super().save(*args, **kwargs)
Beispiel #7
0
    def call(self, x):
        # Split out the ground truth. During inference this is None,
        # during training it contains labels used for autoregression
        input_image, ground_truth = x

        # Encode the image using resnet
        f_views = [self.feature_enc(preprocess_input(x)) for x in tf.split(
            input_image, self.num_views, axis=2)]

        # Add the spacial coords
        f_views_enc = [self.encode_coords(f_view) for f_view in f_views]
        f_pool = self.pool_views(f_views_enc)

        # Generate the logits from the sequential model
        if ground_truth is not None:
            ground_truth = self.character_mapper.get_ids(ground_truth)
        chars_logit, _ = self.rnn((f_pool, ground_truth))
        # Interpret the logits
        predicted_chars, chars_log_prob, predicted_scores = (
            self.char_predictions(chars_logit))
        predicted_text = self.character_mapper.get_text(predicted_chars)

        return OutputEndpoints(
            chars_logit=chars_logit,
            chars_log_prob=chars_log_prob,
            predicted_chars=predicted_chars,
            predicted_scores=predicted_scores,
            predicted_text=predicted_text)
def build_model(classes=2):
    inputs = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
    x = preprocess_input(inputs)
    x = InceptionResNetV2(weights=None, classes=classes)(x)
    model = Model(inputs=inputs, outputs=x)
    model.compile(loss='categorical_crossentropy', metrics=['accuracy'])
    return model
    def predict(self, X, features_names, **kwargs):

        #        Return a prediction.
        #        Parameters
        #        ----------
        #        X : array-like
        #        feature_names : array of feature names (optional)
        result = []
        print(X)
        for x in X:
            print(x)
            filename = wget.download(x)
            print(filename)
            cls_list = ['cats', 'dogs']
            img = image.load_img(filename, target_size=(299, 299))
            if img is None:
                return 'unknown'
            x = image.img_to_array(img)
            x = preprocess_input(x)
            x = np.expand_dims(x, axis=0)
            with self.graph.as_default():
                set_session(self.sess)
                pred = self.net.predict(x)[0]
            top_inds = pred.argsort()[::-1][:5]
            result.append(cls_list[0])
            # result.append(self.evaluate(filename))
        print("Predict called - will run identity function")
        return result
Beispiel #10
0
def predict_save(event, context):
    if event.get("source") == "serverless-plugin-warmup":
        print("WarmUp - Lambda is warm!")
        return {}
    es = make_connect()
    body = json.loads(event['body'])
    imageLink = body['imageLink']
    userId = body['userId']
    entryId = body['entryId']
    index_type = 'found' if entryId.startswith('found') else 'lost'
    index_env = os.getenv('LAMBDA_ENV', 'local')
    index = f'{index_env}-{index_type}'
    array = url_to_array(imageLink, IMAGE_SHAPE)
    array = preprocess_input(array[None])
    prediction = model(array).numpy().tolist()[0]
    document = {
        "image-vector": prediction,
        "image-url": imageLink,
        "user-id": userId,
        "entry-id": entryId,
    }
    result = es.index(index=index, body=document)
    body = {'result': result}
    response = {"statusCode": 200, "body": json.dumps(body)}
    return response
Beispiel #11
0
def update_vectors(event, context):
    index_types = ['found', 'lost']
    index_env = os.getenv('LAMBDA_ENV', 'local')
    es = make_connect()
    counts = defaultdict(int)
    for index_type in index_types:

        index = f'{index_env}-{index_type}'
        results = scan(es,
                       query={"query": {
                           "match_all": {}
                       }},
                       index=index,
                       preserve_order=True)
        for item in results:
            _id = item['_id']
            url = item['_source']['image-url']
            array = url_to_array(url, IMAGE_SHAPE)
            array = preprocess_input(array[None])
            prediction = model(array).numpy().tolist()[0]
            document = {
                "doc": {
                    "image-vector": prediction,
                }
            }
            result = es.update(index=index, id=_id, body=document)
            print(result)
            counts[index_type] += 1

    response = {"statusCode": 200, "body": json.dumps(counts)}
    return response
Beispiel #12
0
def search(event, context):
    if event.get("source") == "serverless-plugin-warmup":
        print("WarmUp - Lambda is warm!")
        return {}
    es = make_connect()
    body = json.loads(event['body'])
    url = body['url']
    index_type = body['index']
    index_env = os.getenv('LAMBDA_ENV', 'local')
    index = f'{index_env}-{index_type}'
    size = body['size']
    array = url_to_array(url, IMAGE_SHAPE)
    array = preprocess_input(array[None])
    prediction = model(array).numpy().tolist()[0]
    results = es.search(index=index,
                        body={
                            "size": size,
                            "query": {
                                "knn": {
                                    "image-vector": {
                                        "vector": prediction,
                                        "k": size
                                    }
                                }
                            },
                            "_source": ["image-url", "user-id", "entry-id"],
                        })
    body = {'result': results}
    response = {"statusCode": 200, "body": json.dumps(body)}
    return response
Beispiel #13
0
def InceptionResNetV2(image_bytes):

    image_batch = np.expand_dims(image_bytes, axis=0)
    processed_imgs = inception_resnet_v2.preprocess_input(image_batch)
    inception_resnet_v2_features = inception_resnet_v2_extractor.predict(
        processed_imgs)
    flattened_features = inception_resnet_v2_features.flatten()
    # normalized_features = flattened_features / norm(flattened_features)
    return flattened_features
Beispiel #14
0
    def __getitem__(self, idx):
        i = idx * batch_size  # 计算当前batch数据索引起点

        # 计算当前batch大小
        length = min(batch_size, (len(self.samples) - i))
        # 声明一个batch数据
        batch_inputs = np.empty((3, length, img_size, img_size, channel),
                                dtype=np.float32)
        # 声明label数据
        batch_dummy_target = np.zeros((length * 3, embedding_size),
                                      dtype=np.float32)

        # 遍历获取一个batch数据
        for i_batch in range(length):
            sample = self.samples[i + i_batch]
            # print(sample)
            for j, role in enumerate(['a', 'p', 'n']):
                image_name = sample[role]  # 获取图像名
                filename = os.path.join(self.image_folder, image_name)
                img = cv2.imread(filename)  # 读取图像数据
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # 转成RGB

                # 人脸检测
                rectangles = self.mtcnn_model.detectFace(img, self.threshold)

                num_faces = len(rectangles)
                if num_faces > 0:
                    # 转化成正方形
                    rectangles = rect2square(np.array(rectangles))
                    rectangle = rectangles[0]
                    # 记下他们的landmark
                    landmark = (np.reshape(rectangle[5:15], (5, 2)) - np.array(
                        [int(rectangle[0]),
                         int(rectangle[1])])) / (rectangle[3] -
                                                 rectangle[1]) * 160
                    # 裁剪人脸图像
                    crop_img = img[int(rectangle[1]):int(rectangle[3]),
                                   int(rectangle[0]):int(rectangle[2])]

                    if crop_img.shape[0] > 0 and crop_img.shape[1] > 1:
                        crop_img = cv2.resize(crop_img, (160, 160))
                        # 对齐人脸
                        image, _ = Alignment(crop_img, landmark)
                    else:
                        image = cv2.resize(img, (img_size, img_size),
                                           cv2.INTER_CUBIC)

                else:
                    image = cv2.resize(img, (img_size, img_size),
                                       cv2.INTER_CUBIC)

                # 输入图像预处理
                batch_inputs[j, i_batch] = preprocess_input(image)
        return np.vstack((batch_inputs[0], batch_inputs[1],
                          batch_inputs[2])), batch_dummy_target
 def evaluate(self, file):
     cls_list = ['cats', 'dogs']
     img = image.load_img(file, target_size=(299, 299))
     if img is None:
         return 'unknown'
     x = image.img_to_array(img)
     x = preprocess_input(x)
     x = np.expand_dims(x, axis=0)
     pred = self.net.predict(x)[0]
     top_inds = pred.argsort()[::-1][:5]
     return cls_list[0]
def back_mean_gen(df, flip=True):
    while True:
        sample = df.sample(n=8)
        mean_imgs = np.array([
            inception_resnet_v2.preprocess_input(
                cv2.resize(cv2.imread(p),
                           (target_size[1], target_size[0]))[:, :, ::-1])
            for p in sample.mean_file_name
        ])
        back_imgs = np.array([
            inception_resnet_v2.preprocess_input(
                cv2.resize(cv2.imread(p),
                           (target_size[1], target_size[0]))[:, :, ::-1])
            for p in sample.back_file_name
        ])
        if flip and random.choice([True, False]):
            mean_imgs = mean_imgs[:, :, ::-1, :]
            back_imgs = back_imgs[:, :, ::-1, :]
        labels = sample[label_columns].values
        yield ((back_imgs, mean_imgs), labels)
Beispiel #17
0
def inception_resnet_v2_feature_extractor(preprocess_image):
    # preprocessing for image input the vgg_server
    # preprocess_image = nn_image_preprocessing(image_bytes)
    processed_imgs = inception_resnet_v2.preprocess_input(preprocess_image)
    # predicting the image
    inception_resnet_v2_features = inception_resnet_v2_extractor.predict(
        processed_imgs)
    # making features flatten and reshape
    flattened_features = inception_resnet_v2_features.flatten()
    flattened_features = np.array(flattened_features)
    flattened_features = flattened_features.reshape(1, -1)
    return flattened_features
Beispiel #18
0
def predict(event, context):
    if event.get("source") == "serverless-plugin-warmup":
        print("WarmUp - Lambda is warm!")
        return {}
    body = json.loads(event['body'])
    url = body['url']
    array = url_to_array(url, IMAGE_SHAPE)
    array = preprocess_input(array[None])
    prediction = model(array)
    body = {'prediction': prediction.numpy().tolist()}
    response = {"statusCode": 200, "body": json.dumps(body)}
    return response
Beispiel #19
0
def create_inception_embedding(filenames,inception):
    grayscaled_rgb_resized = []
    for name in filenames:
        i=np.array(PIL.Image.open(name))
        i=gray2rgb(rgb2gray(i))
        i = resize(i, (299, 299, 3), mode='constant')
        grayscaled_rgb_resized.append(i)
    grayscaled_rgb_resized = np.array(grayscaled_rgb_resized)
    grayscaled_rgb_resized = preprocess_input(grayscaled_rgb_resized)
    with inception.graph.as_default():
        embed = inception.predict(grayscaled_rgb_resized)
    return embed
Beispiel #20
0
def Home():
    if request.method == 'POST':
        f = request.files['img_file']
        test_image = img_to_array(load_img(f, target_size=(299, 299)))
        test_image = np.array(test_image, dtype=float)
        test_image = preprocess_input(test_image)
        test_features = IR2.predict(np.array([test_image]))
        # html=generate_desc(loaded_model, tokenizer, np.array(test_features), 100)

        return render_template('index.html', url=test_features)

    return render_template('index.html')
Beispiel #21
0
def __calculate_embedding(img_path):

    model = __get_model()

    # Reshape
    img = image.load_img(img_path, target_size=(IMG_WIDTH, IMG_HEIGHT))
    # img to Array
    x = image.img_to_array(img)
    # Expand Dim (1, w, h)
    x = np.expand_dims(x, axis=0)
    # Pre process Input
    x = preprocess_input(x)
    return model.predict(x).reshape(-1)
Beispiel #22
0
 def save(self, *args, **kwargs):
     try:
         img = load_img(self.picture, target_size=(299, 299))
         img_array = img_to_array(img)
         to_pred = np.expand_dims(img_array, axis=0)
         prep = preprocess_input(to_pred)
         model = InceptionResNetV2(weights="imagenet")
         prediction = model.predict(prep)
         decoded = decode_predictions(prediction)[0][0][1]
         self.classified = str(decoded)
     except Exception as e:
         print("error", e)
     super().save(*args, **kwargs)
    def pre_process_built_cnn(x, type_cnn):
        """
            Method that pre-processes images based on the specified built convolutional neural network.
            Some of the supported CNNs include: VGG16, InceptionV3, DenseNet, ResNet50, and InceptionResNetV2
                :param x:           (np_array)  matrix with all image data with shape [n_samples, img_width, img_height]
                :param type_cnn:    (str)       type of CNN to pre-process the data. Currently supported strings are:
                                                'inceptionv3', 'densenet', 'inception_resnet_v2', 'resnet50',
                                                and 'vgg16'
                :return: (np_array)  pre-processed data.
        """

        valid_cnn_types = [
            'inceptionv3', 'densenet', 'inception_resnet_v2', 'resnet50',
            'vgg16'
        ]
        if type_cnn not in valid_cnn_types:
            raise ValueError('You entered an invalid type of CNN: {}. '
                             'Valid CNN type arguments include: {}'.format(
                                 type_cnn, valid_cnn_types))

        if len(x.shape) < 3 or len(x.shape) > 4:
            raise DimensionalityError(
                'Cannot pre-process image data with shape {} provided for the'
                'training data "x".\n'
                'Valid shapes are: (n_samples, img_width, img_height)'
                ' and (n_samples, img_width, img_height, n_channels)'.format(
                    x.shape))

        # Transform gray scale data to RGB
        if len(x.shape) == 3:
            try:
                x = np.array([
                    np.array(Image.fromarray(x_i).convert('RGB')) for x_i in x
                ])
            except TypeError:
                x = np.array([
                    np.array(Image.fromarray(np.uint8(x_i)).convert('RGB'))
                    for x_i in x
                ])

        if type_cnn == 'vgg16':
            x = vgg16.preprocess_input(x)
        elif type_cnn == 'resnet50':
            x = resnet50.preprocess_input(x)
        elif type_cnn == 'inception_resnet_v2':
            x = inception_resnet_v2.preprocess_input(x)
        elif type_cnn == 'densenet':
            x = densenet.preprocess_input(x)
        elif type_cnn == 'inceptionv3':
            x = inception_v3.preprocess_input(x)
        return x
 def save(self, *args, **kwargs):
     try:
         img = load_img(self.picture.path, target_size=(299, 299))
         img_arr = img_to_array(img)
         to_pred = np.expand_dims(img_arr, axis=0)  # (1,299,299,3)
         prep = preprocess_input(to_pred)
         model = InceptionResNetV2(weights='imagenet')
         prediction = model.predict(prep)
         decoded = decode_predictions(prediction)[0][0][1]
         self.classified = str(decoded)
         print('Success')
     except Exception as e:
         print(f"Classification Failed {e}")
     super().save(*args, **kwargs)
def predict(img_path):
    img = image.load_img(img_path, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    preds = model.predict(x)
    # decode the results into a list of tuples (class, description, probability)
    # (one such list for each sample in the batch)

    return [{
        'probability': float(p[2]),
        'name': p[1],
        'categories': synset.tree(p[0])
    } for p in decode_predictions(preds, top=2)[0]]
Beispiel #26
0
 def save(self, *args, **kwargs):
     try:
         # print(self.picture)
         img = load_img(self.picture, target_size=(299, 299))
         img_array = img_to_array(img)
         to_pred = np.expand_dims(img_array, axis=0)
         prep = preprocess_input(to_pred)
         model = InceptionResNetV2(weights='imagenet')
         prediction = model.predict(prep)
         decoded = decode_predictions(prediction)[0][0][1]
         self.classified = decoded
         print("success")
     except:
         print('classification failed')
     super().save(*args, **kwargs)
Beispiel #27
0
    def get_predictions_from_url(self, img_url, class_name=None):
        if class_name is None:
            class_name = "test"
        timestamp = datetime.now().timestamp()
        img_path = tf.keras.utils.get_file(
            f"{class_name}-{timestamp}",
            origin=img_url)  # need new filename each time
        img = tf.keras.preprocessing.image.load_img(img_path,
                                                    target_size=self.img_dims)
        img_array = tf.keras.preprocessing.image.img_to_array(img)
        img_array = tf.expand_dims(img_array, 0)
        img_processed = preprocess_input(img_array)

        prediction = self.model.predict(img_processed)
        # score = tf.nn.softmax(prediction[0])
        return prediction
Beispiel #28
0
    def save(self, *args, **kwargs):
        try:

            img = load_img(self.picture, target_size=(299, 299))
            img_array = img_to_array(img)
            to_predict = np.expand_dims(img_array, axis=0)
            preprossesing = preprocess_input(to_predict)
            model = InceptionResNetV2(weights='imagenet')
            prediction = model.predict(preprossesing)
            decode = decode_predictions(prediction)[0][0][1]
            self.classified = str(decode)
            print('success')

        except Exception as e:
            print("classification failed", e)
        super().save(*args, **kwargs)
    def __init__(self, root, transforms_=None, mode='train'):
        self.transform = transforms.Compose(transforms_)
        print("in here")

        images_gray = np.load(root + 'A/gray_scale.npy')
        images_lab = np.load(root + 'B/ab1.npy')

        self.files_A = self.pipe_line_img(
            images_gray,
            batch_size=images_gray.shape[0]).transpose(0, 3, 1, 2)
        self.files_B = preprocess_input(
            self.get_rbg_from_lab(gray_imgs=images_gray,
                                  ab_imgs=images_lab,
                                  n=images_gray.shape[0])).transpose(
                                      0, 3, 1, 2)
        print("rearranged", self.files_A.shape)
Beispiel #30
0
def predict_testimg(args, test_img, model):

    target = [
        'chihuahua', 'collie', 'dachshunds', 'golden_retriever', 'jindodog',
        'maltese', 'poodle', 'pug', 'shih-tzu', 'welshcorgi'
    ]
    # test_path = '../images/dog1.jpg'
    # test_img = load_img(test_path, target_size = (299,299))
    # test_img = img_to_array(test_img)
    test_img = cv2.resize(test_img, (299, 299))
    test_img = np.expand_dims(test_img, axis=0)
    test_img = preprocess_input(test_img)

    predict = model.predict(test_img)
    max_index = np.argmax(predict, 1)
    dog_pred = target[max_index[0]]
    # print(dog_pred, np.max(predict))
    return dog_pred, np.max(predict)