Пример #1
0
def predict_ml_yolo(img_path, model, lang_db, dictionary, k_closest=1):
    img = read_img(img_path)
    boxes = load_image_boxes_yolo(img, dictionary, img_path)
    if len(boxes) == 0: return []
    pred_vects = model.predict_on_batch(
        boxes)  # Return predictions on the boxes, predictions 300d vector

    predictions = lang_db.closest_labels(
        pred_vects,
        k_closest=k_closest)  # Return closest label to each vector box
    dict_counter = {}
    preds = []

    for k in range(k_closest):
        for i in range(len(predictions)):
            preds.append(predictions[i][k])

    for pred in preds:
        if pred in dict_counter:
            dict_counter[pred] += 1
        else:
            dict_counter[pred] = 1
    sorted_dict = sorted(dict_counter.items(),
                         key=operator.itemgetter(1),
                         reverse=True)
    return [x[0] for x in sorted_dict[:25]]
Пример #2
0
 def load_val_data(self, lang_db):
     val_file_set = osp.join(self._devkit_path, 'ImageSets/DET/val_bts.txt')
     val_folder_path = osp.join(self._devkit_path, 'Data', 'DET', 'val')
     with open(val_file_set) as f:
         images_data = [line.split() for line in f]
         while 1:
             for img_data in images_data:
                 img_path = osp.join(val_folder_path,
                                     img_data[0] + self._image_ext[0])
                 x = read_img(img_path)
                 word_vec = lang_db.word_vector(img_data[1])
                 yield np.expand_dims(x, axis=0), np.expand_dims(word_vec,
                                                                 axis=0)
Пример #3
0
def load_data(imdb, lang_db, batch_size=32):
    total_batch = int(imdb.num_images / batch_size)
    index = [ix for ix in range(imdb.num_images)]

    while 1:
        shuffle(index)
        i = 0
        for _ in range(total_batch):
            X_train = []
            y_train = []
            for j in range(batch_size):
                x_path, y = imdb.get_image_data_clean(index[i], lang_db)
                x = read_img(x_path)
                X_train.append(x)
                y_train.append(y)
                i = i + 1
            yield np.array(X_train,
                           dtype=np.float32), np.array(y_train,
                                                       dtype=np.float32)
Пример #4
0
def predict_ml_random(img_path, model, lang_db, extra):
    # Generate boxes for image
    img = read_img(img_path, target_size=(299, 299))
    boxes = generate_n_boxes(img, pool=extra)
    pred_vects = model.predict_on_batch(
        boxes)  # Return predictions on the boxes, predictions 300d vector
    predictions = lang_db.closest_labels(
        pred_vects, k_closest=1)  # Return closest label to each vector box
    dict_counter = {}
    for pred in predictions:
        pred = pred[0]
        # Retrieve closest word in semantic space
        if pred in dict_counter:
            dict_counter[pred] += 1
        else:
            dict_counter[pred] = 1

    sorted_dict = sorted(dict_counter.items(),
                         key=operator.itemgetter(1),
                         reverse=True)
    return [x[0] for x in sorted_dict[:20]]
Пример #5
0
def predict_image_singlelabel(img_path, model, lang_db, extra=None):
    img = read_img(img_path, target_size=(299, 299))
    img = np.expand_dims(img, axis=0)
    pred_vector = model.predict(img)
    predictions = lang_db.closest_labels(pred_vector, k_closest=20)
    return predictions