コード例 #1
0
def predict(image1):
    model = VGG16()
    image = load_img(image1, target_size=(224, 224))
    # convert the image pixels to a numpy array
    image = img_to_array(image)
    # reshape data for the model
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    # prepare the image for the VGG model
    image = preprocess_input(image)
    # predict the probability across all output classes
    yhat = model.predict(image)
    # convert the probabilities to class labels
    label = decode_predictions(yhat)
    # retrieve the most likely result, e.g. highest probability
    label = label[0][0]
    return label
コード例 #2
0
    def run(self, image):
        """Computes feature relevance maps for a single image.

        Args:
            image: array of shape (W, H, C)

        Returns:
            RGB or grayscale relevance map.

        """
        f = K.function(inputs=self.model.input, outputs=self.relevance)
        image = preprocess_input(image)
        image = tf.expand_dims(image, axis=0)
        relevance_scores = f(inputs=image)
        relevance_scores = self.postprocess(relevance_scores)
        return np.squeeze(relevance_scores)
コード例 #3
0
def extract_features(filename):
    # load the model
    model = VGG16()
    # re-structure the model
    model = Model(inputs=model.inputs, outputs=model.layers[-2].output)
    # load the photo
    image = load_img(filename, target_size=(224, 224))
    # convert the image pixels to a numpy array
    image = img_to_array(image)
    # reshape data for the model
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    # prepare the image for the VGG model
    image = preprocess_input(image)
    # get features
    feature = model.predict(image, verbose=0)
    return feature
コード例 #4
0
    def preprocess_image(self, img_path):
        if self.method == "v0":
            FIXED_SIZE = (96, 96)  # self-adjusted vgg16
            #FIXED_SIZE = (224, 224) # vgg16-default
        elif self.method == "v1":
            FIXED_SIZE = (299, 299)  # xception-default
        elif self.method == "v2":
            FIXED_SIZE = (224, 224)  # resnet-default

        cv_img = cv2.imread(img_path)
        cv_img = cv2.resize(cv_img,
                            FIXED_SIZE,
                            interpolation=cv2.INTER_NEAREST)
        cv_img = np.expand_dims(cv_img, axis=0)
        cv_img = preprocess_input(cv_img)
        return cv_img
コード例 #5
0
    def build_data(self):
        print(self.n, " Image Loading start... ", self.img_dirpath)
        for i, img_file in enumerate(self.img_dir):

            img = image.load_img(self.img_dirpath + img_file,
                                 target_size=SIZE[::-1],
                                 interpolation='bicubic')
            img = image.img_to_array(img)
            img = preprocess_input(img)
            self.imgs[i] = img
            if self.labels != None:
                self.texts.append(self.labels[img_file][:MAX_LEN])
            else:
                #valid mode
                self.texts.append(img_file)
        print("Image Loading finish...")
コード例 #6
0
    def extract(self, img):
        """
        Extract a deep feature from an input image
        Args:
            img: from PIL.Image.open(path) or tensorflow.keras.preprocessing.image.load_img(path)

        Returns:
            feature (np.ndarray): deep feature with the shape=(4096, )
        """
        img = img.resize((224, 224))  # VGG must take a 224x224 img as an input
        img = img.convert('RGB')  # Make sure img is color
        x = image.img_to_array(img)  # To np.array. Height x Width x Channel. dtype=float32
        x = np.expand_dims(x, axis=0)  # (H, W, C)->(1, H, W, C), where the first elem is the number of img
        x = preprocess_input(x)  # Subtracting avg values for each pixel
        feature = self.model.predict(x)[0]  # (1, 4096) -> (4096, )
        return feature / np.linalg.norm(feature)  # Normalize
コード例 #7
0
ファイル: Mark_func.py プロジェクト: scchy/My_Competition
    def get_pred_df(self):
        """
        预测数据
        """
        te_dt, _ = get_data(self.te_jpg)
        te_dt = te_dt.astype('float32') / 255
        if self.vgg16:
            print('Start dealing with vgg16')
            te_dt = preprocess_input(te_dt)  # 预处理(减去均值)
            te_dt = self.vgg16_model.predict(te_dt)

        print('start predict')
        pred_te = np.argmax(self.model.predict(te_dt), axis=1)
        df = pd.DataFrame({'ID': self.te_id, 'Label': pred_te})
        df.Label = df.Label.map({1: 'pos', 0: 'neg'})
        return df
コード例 #8
0
ファイル: bot.py プロジェクト: Hadar933/InstaBot
def get_image_description(image_path):
    """
    uses VGG16 neural-network to get image description.
    the description will be used to generate more suited comments.
    :param image_path: media item to get description of
    :return: image description (string)
    """
    im_array = img_to_array(
        load_img(image_path, color_mode="rgb", target_size=(224, 224)))
    shape = (1, ) + im_array.shape
    image_data = preprocess_input(im_array.reshape(shape))
    prediction = VGG16().predict(image_data)
    labels = decode_predictions(prediction)
    labels = [(label[1], label[2]) for label in labels[0]]
    print("description confident=", labels[0][1])
    return (labels[0][0]).replace("_", " ")
コード例 #9
0
def parse_function(example_proto):
    feature = {
        'image/height': tf.FixedLenFeature((), tf.int64, -1),
        'image/width': tf.FixedLenFeature((), tf.int64, -1),
        'image/filename': tf.FixedLenFeature((), tf.string, default_value=''),
        'image/source_id': tf.FixedLenFeature((), tf.string, default_value=''),
        'image/key/sha256': tf.FixedLenFeature((), tf.string,
                                               default_value=''),
        'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
        'image/format': tf.FixedLenFeature((), tf.string,
                                           default_value='jpeg'),
        'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
        'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
        'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
        'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
        'image/object/class/text': tf.VarLenFeature(tf.string),
        'image/object/class/label': tf.VarLenFeature(tf.int64),
        'image/object/difficult': tf.VarLenFeature(tf.int64),
    }
    parsed_features = tf.parse_single_example(example_proto, feature)
    file = tf.string_join([dir_head, parsed_features["image/filename"]])
    x = parsed_features['image/object/bbox/xmax'].values - parsed_features[
        'image/object/bbox/xmin'].values
    y = parsed_features['image/object/bbox/ymax'].values - parsed_features[
        'image/object/bbox/ymin'].values
    area = tf.math.multiply(x, y)
    index = tf.math.argmax(area)
    image_string = tf.read_file(file)
    image = tf.image.decode_image(image_string, channels=3, dtype=tf.uint8)
    image.set_shape([None, None, None])
    fwidth = tf.to_float(parsed_features["image/width"])
    fheight = tf.to_float(parsed_features["image/height"])
    hgap = (500 - fwidth) / 2
    vgap = (500 - fheight) / 2
    nxmin = (parsed_features["image/object/bbox/xmin"].values[index] * fwidth +
             hgap) / 500.0
    nxmax = (parsed_features["image/object/bbox/xmax"].values[index] * fwidth +
             hgap) / 500.0
    nymin = (parsed_features["image/object/bbox/ymin"].values[index] * fheight
             + vgap) / 500.0
    nymax = (parsed_features["image/object/bbox/ymax"].values[index] * fheight
             + vgap) / 500.0
    image = tf.image.resize_image_with_crop_or_pad(image, 500, 500)
    image = preprocess_input(image)
    #label = tf.cast(parsed_features["image/object/class/label"], tf.float32)
    #label = tf.cast(tf.reshape(parsed_features["image/object/class/label"], shape=[]), dtype=tf.int32)
    return {'vgg16_input': image}, [nxmin, nxmax, nymin, nymax]
コード例 #10
0
ファイル: test.py プロジェクト: xiszishu/tensorflow_examples
def main(argv):
    image_path = '../ILSVRC2012_devkit_t12/image/'
    model = VGG16(weights='imagenet', include_top=True)
    #test_dog_picture()
    result_file = open("val.txt", "r")
    correct_num = 0
    num_images = 1000
    topk = 5
    filename = tf.placeholder(tf.string, name="inputFile")
    fileContent = tf.read_file(filename, name="loadFile")
    image_file = tf.image.decode_jpeg(fileContent,
                                      channels=3,
                                      name="decodeJpeg")
    resize_nearest_neighbor = tf.image.resize_images(
        image_file,
        size=[224, 224],
        method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
    sess = tf.Session()
    suffix = '%(index)d/%(max)d [%(elapsed)d / %(eta)d / %(eta_td)s]'
    bar = IncrementalBar('Processing', max=num_images, suffix=suffix)

    rram_crossbar = Rram_weights(8, 300)
    l_weights = model.get_weights()
    iterate_list(l_weights, rram_crossbar)
    model.set_weights(l_weights)

    for i in range(1, num_images + 1):
        img_file = "{}{}{:0>8d}{}".format(image_path, "ILSVRC2012_val_", i,
                                          ".JPEG")
        feed_dict = {filename: img_file}
        with sess.as_default():
            x = resize_nearest_neighbor.eval(feed_dict)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        features = model.predict(x)
        #result = np.argmax(features)
        result = np.argsort(features)[0][-topk:]
        result_line = result_file.readline()
        correct_result = int(result_line.split()[1])
        #print(correct_result, result)
        if (correct_result in result): correct_num += 1
        #else: print(img_file)
        bar.next()

    bar.finish()
    #resized_images.save("picture_resized.jpeg", "JPEG", optimize=True)
    print("Accuracy: {0:.2f}%".format(float(correct_num) / num_images * 100))
コード例 #11
0
def extract_features():
    vgg_model = VGG16()
    model = Sequential()
    for layer in vgg_model.layers[:-1]:  # this is where I changed your code
        model.add(layer)
    for layer in model.layers:
        layer.trainable = False

    #model.layers.pop()
    model = Model(inputs=model.inputs, outputs=model.layers[-1].output)
    image = load_img('test2.jpg', target_size=(224, 224))
    image = img_to_array(image)
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    image = preprocess_input(image)
    feature = model.predict(image, verbose=0)
    feature = np.reshape(feature, (1, 4096))
    return feature
コード例 #12
0
ファイル: classify.py プロジェクト: tylerwolf35/NotHotdog
def predict(image1):
    model = VGG16()
    image = load_img(image1, target_size=(224, 224))
    # pixels -> numpy array
    image = img_to_array(image)
    # reshape data
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    # prepare for VGG model
    image = preprocess_input(image)
    # predict
    yhat = model.predict(image)
    # convert
    label = decode_predictions(yhat)
    # retrieve result (hotdog or not hotdog)
    label = label[0][0]
    return label
    identify(label)
コード例 #13
0
def make_prediction(image_ids, model, word_to_ind, ind_to_word):
    """
    Function to make a caption prediction for a list of image ids

    Parameters
    ----------
    image_ids: list
        list of path to images
    model: Keras Model
        trained model to make the caption prediction
    word_to_ind: dict
        mapping of word to index
    ind_to_word: dict
        mapping of index to word

    Returns
    -------
    predictions: list
        List of lists corresponding to captions for each image

    """
    predictions = []
    vgg_model = preprocess_images.load_prebuilt_model()
    maxlen = model.input_shape[1][1]
    for ii in image_ids:
        prediction = []
        print('Predicting image: ' + str(ii))
        example = image.load_img(ii, target_size=(224, 224))
        example_arr = image.img_to_array(example, dtype='float32')
        example_arr = np.expand_dims(example_arr, axis=0)
        example_arr = preprocess_input(example_arr)
        example_features = vgg_model.predict(example_arr)
        example_features = np.array(example_features).reshape(-1, 4096)

        start_string = ['*start*']
        start_ind = list(map(word_to_ind.get, start_string))
        for i in range(maxlen):
            start_seq = pad_sequences([start_ind], maxlen)
            yhat = model.predict([example_features, start_seq])
            yhat = np.argmax(yhat)
            if ind_to_word[yhat] == '*end*':
                break
            prediction.append(ind_to_word[yhat])
            start_ind.append(yhat)
        predictions.append(prediction)
    return predictions
コード例 #14
0
    def process_stimuli_dir(stimuli_dir):
        image_files = []
        for f in listdir(stimuli_dir):
            if isdir("{}/{}".format(stimuli_dir, f)):
                cat_images = []
                for f2 in listdir("{}/{}".format(stimuli_dir, f)):
                    if isfile("{}/{}/{}".format(stimuli_dir, f, f2)):
                        img = image.load_img("{}/{}/{}".format(
                            stimuli_dir, f, f2),
                                             target_size=(224, 224))
                        x = image.img_to_array(img)
                        x = np.expand_dims(x, axis=0)
                        x = preprocess_input(x)
                        cat_images.append(x[0])
                image_files.append(cat_images)

        return np.array(image_files)
コード例 #15
0
def model_predict(img, model):
    img = img.resize((224, 224))

    # Preprocessing the image
    x = image.img_to_array(img)
    # x = np.true_divide(x, 255)
    # x = np.expand_dims(x, axis=0)

    # Be careful how your trained model deals with the input
    # otherwise, it won't make correct prediction!
    #x = preprocess_input(x, mode='tf')
    x = preprocess_input(x)

    x = x.reshape(1,224,224,3)
    
    preds = model.predict(x)
    return preds
コード例 #16
0
def get_data(start=0, end=10):
    # ***************************
    # per file
    # ***************************
    feature_list = []
    file_paths_in = []
    file_names_in = []
    file_classes = []  # the subdir name i.e. C7-Maine_coon

    for subdir in SUBDIRS:
        try:
            file_names = os.listdir(f"{DATA_BASEPATH}/{subdir}")

            for file_name in file_names[start:end]:
                image_path = f"{DATA_BASEPATH}/{subdir}/{file_name}"

                # **********
                # corresponding lists for later prediction/evaluation (not necessary for train)
                # **********
                file_paths_in.append(image_path)
                file_names_in.append(file_name)
                file_classes.append(subdir)

                # **********
                # get array representation of image
                # **********
                img = image.load_img(
                    image_path, target_size=SHAPE[:2]
                )  # corresponds with input_shape of vgg16 model
                img_data = image.img_to_array(img)
                img_data = np.expand_dims(img_data, axis=0)
                img_data = preprocess_input(img_data)

                # **********
                # get vgg16 features of image
                # **********
                features = model_vgg16.predict(img_data)
                features = np.array(features)

                feature_list.append(features.flatten())
        except:
            continue

    feature_list = np.array(feature_list)
    return feature_list, file_paths_in, file_names_in, file_classes
コード例 #17
0
ファイル: augments.py プロジェクト: hegu2692/HistomicsML2
    def prepare_image(self, aurl, mean, std):

        img = np.array(
            Image.open(cStringIO.StringIO(urllib.urlopen(aurl).read())))

        # wsi_mean_std = self.find_mean_std(slide)
        img_norm = self.reinhard(img, self.REFERENCE_MU_LAB,
                                 self.REFERENCE_STD_LAB, mean, std)

        img_norm = img_to_array(imresize(img_norm, self.IMAGE_SHAPE))

        image_dim = np.expand_dims(img_norm, axis=0)
        batch_angle = self.generator(image_dim, rotation=60)
        batch_angle = batch_angle.reshape(2, self.IMAGE_WIDTH,
                                          self.IMAGE_HEIGHT, 3)
        batches = np.round(batch_angle).astype(np.uint8)
        image = preprocess_input(batches)
        return image
コード例 #18
0
    def __getitem__(self, idx):
        batch_paths = self.paths[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]
        x1 = self.x1[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]
        x2 = self.x2[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]
        y1 = self.y1[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]
        y2 = self.y2[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]

        batch_masks = np.zeros((len(batch_paths), GRID_SIZE, GRID_SIZE))
        batch_images = np.zeros((len(batch_paths), IMAGE_SIZE, IMAGE_SIZE, 3),
                                dtype=np.float32)
        for i, f in enumerate(batch_paths):
            img = Image.open(f)
            img = img.resize((IMAGE_SIZE, IMAGE_SIZE))
            img = img.convert('RGB')
            batch_masks[i, y1[i]:y2[i], x1[i]:x2[i]] = 1
            batch_images[i] = preprocess_input(np.array(img, dtype=np.float32))
            img.close()
        return batch_images, batch_masks[:, :, :, np.newaxis]
コード例 #19
0
ファイル: app.py プロジェクト: devarajit/Flask-App---Example
def predict_model(image, model):
    # Pick a test image, run model, show image, and show predicted bounding box overlaid on the image
    feat_scaled = preprocess_input(np.array(image, dtype=np.float32))
    pred = model.predict(x=np.array([feat_scaled]))[0]  # Predict the BBox
    predClass = int(np.argmax(pred))
    print(pred)
    print(predClass)
    x0 = int(pred[0])
    y0 = int(pred[1])
    x1 = int((pred[0] + pred[2]))
    y1 = int((pred[1] + pred[3]))
    print("predition Bounding Box co-ordinates are :", x0, y0, x1, y1)
    cv2.rectangle(image, (x0, y0), (x1, y1), (0, 0, 255), 1)  # Show the BBox
    img = Image.fromarray(image.astype('uint8'))
    file_object = io.BytesIO()
    img.save(file_object, 'PNG')
    file_object.seek(0)
    return file_object
コード例 #20
0
def preprocess_image_crop(image_path, img_size):
    '''
    Preprocess the image scaling it so that its smaller size is img_size.
    The larger size is then cropped in order to produce a square image.
    '''
    img = load_img(image_path)
    scale = float(img_size) / min(img.size)
    new_size = (int(np.ceil(scale * img.size[0])),
                int(np.ceil(scale * img.size[1])))
    # print('old size: %s,new size: %s' %(str(img.size), str(new_size)))
    img = img.resize(new_size, resample=Image.BILINEAR)
    img = img_to_array(img)
    crop_h = img.shape[0] - img_size
    crop_v = img.shape[1] - img_size
    img = img[crop_h:img_size + crop_h, crop_v:img_size + crop_v, :]
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img
コード例 #21
0
ファイル: model.py プロジェクト: anandhupvr/Mining-Objects
def vgg16(im_dir):

    base_model = VGG16(weights='imagenet')
    model = Model(inputs=base_model.input,
                  outputs=(base_model.get_layer('block5_pool').output,
                           base_model.get_layer('block5_conv3').output,
                           base_model.layers[-1].output))
    img = image.load_img(im_dir, target_size=(224, 224))

    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    # all_wg = base_model.layers[-1].get_weights()[0]
    pool, relu, pred = model.predict(x)
    pool_resized = tf.image.resize(pool, (14, 14))
    features = tf.concat([relu, pool_resized], axis=0)

    return features, pred
コード例 #22
0
def crop_imgs_and_prepare(img, roilist, param):
  """ Crop the image on the region of interest and preprocess the crops before the
    CNN network. The function is used in get_proposals, during the refinement step.
        Args:
            img: image.
            roilist: Regions of interest.
            param: parameters of the model.
        Returns: Ip: Preprocessed crops of the image
  """
  Ip = []
  if len(roilist.shape) == 1:
    roilist = roilist.reshape(-1, 1)
  for i in range(roilist.shape[1]):
    roi = roilist[:, i]
    img_cropped = img[int(roi[1]) : int(roi[3]) + 1, int(roi[0]) : int(roi[2]) + 1, :]
    img_cropped = preprocess_input(img_cropped, mode = 'caffe')
    Ip.append(cv2.resize(img_cropped, (param['height'], param['width']), interpolation = cv2.INTER_LINEAR))
  return np.array(Ip)
コード例 #23
0
    def __getitem__(self, index):
        'Generate one batch of data'
        # selects indices of data for next batch (chooses an image using 'index' provided by shuffling the dataset at the end of each pass)
        indexes = self.indexes[index * self.batch_size:(index + 1) *
                               self.batch_size]

        images = [
            cv2.cvtColor(
                cv2.imread(os.path.join(TEST_PATH, self.images_paths[k])),
                cv2.COLOR_BGR2RGB) for k in indexes
        ]
        images = np.array(images)
        images = images.astype(np.float32)
        images = np.array([preprocess_input(img) for img in images])

        labels = [self.labels[k] for k in indexes]
        labels = to_categorical(labels, num_classes=NUM_CLASSES, dtype='int8')

        return images, labels
コード例 #24
0
def classify_image():
    original = Image.open(image_data)
    original = original.resize((224, 224), Image.ANTIALIAS)

    numpy_image = img_to_array(original)
    image_batch = np.expand_dims(numpy_image, axis=0)

    processed_image = vgg16.preprocess_input(image_batch.copy())
    predictions = vgg_model.predict(processed_image)

    label = decode_predictions(predictions)
    table = tk.Label(
        frame, text="Top image class predictions and confidences\n").pack()

    for i in range(0, len(label[0])):
        result = tk.Label(frame,
                          text=str(label[0][i][1]).upper() + ': ' +
                          str(round(float(label[0][i][2]) * 100, 3)) +
                          '%').pack()
コード例 #25
0
def load_data(dir_path, img_size=IMG_SIZE):
    X = []
    y = []
    i = 0
    for path in tqdm(sorted(os.listdir(dir_path))):
        if not path.startswith('.'):
            for file in os.listdir(dir_path + path):
                if not file.startswith('.'):
                    img = cv2.imread(dir_path + path + '/' + file)
                    img = cv2.resize(img,
                                     dsize=img_size,
                                     interpolation=cv2.INTER_CUBIC)
                    X.append(preprocess_input(img))
                    y.append(i)
            i += 1
    X = np.array(X)
    y = np.array(y)
    print(f'{len(X)} images loaded from {dir_path} directory.')
    return X, y
コード例 #26
0
def generate_features_to_disk(folder_path):
    #folder_path = '/home/josemiki/vision/cropssmt/crops/'
    matrix_features_images = []
    for infolder in sorted(glob.glob(os.path.join(folder_path, '*'))):
        list_features_per_image = []
        #list_name_image=[]
        for infile in sorted(glob.glob(os.path.join(infolder, '*.png'))):
            #    list_name_image.append(infile)
            img = image.load_img(infile, target_size=(224, 224))
            img_data = image.img_to_array(img)
            img_data = np.expand_dims(img_data, axis=0)
            img_data = preprocess_input(img_data)
            vgg16_feature = model.predict(img_data)
            vgg16_feature_np = np.array(vgg16_feature)
            list_features_per_image.append(vgg16_feature_np.flatten())

        vgg16_feature_list_np = np.array(list_features_per_image)
        with open(infolder + "/features.txt", "wb") as fp:
            pickle.dump(list_features_per_image, fp)
コード例 #27
0
def embed_image_vgg(img_path, model):
    """
    Args:
        - img_path: path to image
        
    Return:
        - (4096,) vector embedding of image
    """     
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    
    features = model.predict(x)
    fc2_features_extractor_model = Model(inputs=model.input, outputs=model.get_layer('fc2').output)
    
    fc2_features = fc2_features_extractor_model.predict(x)
    fc2_features = fc2_features.reshape((4096,))
    
    return fc2_features
コード例 #28
0
def imagenet_generator(dataset,
                       batch_size=32,
                       num_classes=1000,
                       is_training=False):
    images = np.zeros((batch_size, 224, 224, 3))
    labels = np.zeros((batch_size, num_classes))
    while True:
        count = 0
        for sample in tfds.as_numpy(dataset):
            image = sample["image"]
            label = sample["label"]

            images[count % batch_size] = preprocess_input(
                np.expand_dims(cv.resize(image, (224, 224)), 0))
            labels[count % batch_size] = np.expand_dims(
                to_categorical(label, num_classes=num_classes), 0)

            count += 1
            if (count % batch_size == 0):
                yield images, labels
コード例 #29
0
def getNNEmbedding(imCollection, modelType='vgg16'):
	from tensorflow.keras.preprocessing import image
	from tensorflow.keras.applications.vgg16 import preprocess_input
	print("Initiating model: "+modelType)
	if modelType=='vgg16':
		from tensorflow.keras.applications.vgg16 import VGG16
		model = VGG16(weights='imagenet', include_top=False)
	if modelType=='inceptionv3':
		from tensorflow.keras.applications.inception_v3 import InceptionV3
		model = InceptionV3(weights='imagenet', include_top=False)
	if modelType=='efficientnet':
		from tensorflow.keras.applications import EfficientNetB3
		model = EfficientNetB3(weights='imagenet',include_top=False)
	imlist = [im['arrays'] for im in imCollection]
	predictions = []
	for thisim in tqdm.tqdm(imlist):
		x = np.expand_dims(transform.resize(thisim,(224,224)), axis=0)
		x = preprocess_input(x)
		predictions.append(model.predict(x).flatten())
	return predictions
コード例 #30
0
    def predict():

        if request.method == "POST":

            data = request.files["file"]
            data.save("img.jpg")
            
            img = load_img('img.jpg',target_size=(128, 128))
            x = img_to_array(img)
            x = np.expand_dims(x, axis=0)
            img_data = preprocess_input(x)
            classes = model.predict(img_data)

            A = np.squeeze(np.asarray(classes))
            if(A==1):
                return render_template('index.html',predicted_label="Xray contains Pneumonia")
            else:
                return render_template('index.html',predicted_label="Xray doesn't contains Pneumonia")

        return render_template("index.html")