Example #1
0
def run(path):
    video_filelist = sorted(get_all_filesname(f"{path}/videos"))[100:]

    for videonr in tqdm(video_filelist):
        try:
            for filename in tqdm(
                    get_all_filesname(f"{path}/videos/{videonr}/")):
                if filename.endswith(".txt"):
                    with open(f"{path}/videos/{videonr}/{filename}") as f:
                        data = f.readlines()
                        data = "".join(data).lower()
                        with CottontailDBClient('localhost', 1865) as client:
                            entry = {
                                'video_id': Literal(stringData=videonr),
                                'audio_transcription': Literal(stringData=data)
                            }
                            client.insert('tal_db', 'transcription', entry)
        except OSError:
            continue
        except UnicodeDecodeError:
            continue
        except UnicodeEncodeError:
            continue
        except UnicodeError:
            continue
Example #2
0
def store_color_sketch_from_masks(image, video_id, keyframe_id, counter,start_time):  
    detectron2_img = cv2.imread(image)
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5  # set threshold for this model
    # Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
    predictor = DefaultPredictor(cfg)
    outputs = predictor(detectron2_img)
    boxes = outputs["instances"].pred_boxes.tensor.cpu().numpy()
    classes = outputs["instances"].pred_classes.cpu().numpy()
    
    im = Image.open(image)
    for data, box in zip(classes, boxes):
        num = data.item()
        object = MetadataCatalog.get(cfg.DATASETS.TRAIN[0]).thing_classes[num]
        im_crop = im.crop(tuple(box))
        dominant_color = list(find_dominant_color(im_crop))
        counter +=1
        keyframe_nr = int(keyframe_id)-1
        ###### cottontail db logic ######
        with CottontailDBClient('localhost', 1865) as client:
            # Insert entry
            entry = {
                'box_id': Literal(intData = counter),
                'video_id': Literal(stringData=str(video_id)),
                'keyframe_id': Literal(intData=int(keyframe_id)), 
                'sketch_vector': float_vector(box.tolist()),
                'color_vector': float_vector(dominant_color),
                'object': Literal(stringData = object),
                'start_time':Literal(floatData = float(start_time.iloc[keyframe_nr]["starttime"]))
            }
            client.insert('tal_db', 'sketch', entry)
    return counter
Example #3
0
def run(path):
    video_filelist = sorted(
        get_all_filesname(f"{path}/keyframes_filtered"))[700:750]

    for videonr in tqdm(video_filelist):
        #f = open(f"D:\\Video Retrieval System\\info\\{videonr}.json")
        try:
            with CottontailDBClient('localhost', 1865) as client:
                f = open(f"{path}/msb/{videonr}.tsv")
                start_time = pd.read_csv(f, delimiter="\t")

                for filename in tqdm(
                        get_all_filesname(
                            f"{path}/keyframes_filtered/{videonr}")):
                    if filename != "Thumbs.db" and filename != ".DAV":
                        keyframe_id = get_keyframe_id(filename, videonr, path)
                        keyframe_nr = int(keyframe_id) - 1
                        image = f"{path}/keyframes_filtered/{videonr}/{filename}"
                        with open(image, "rb") as file:
                            file_form = {"image": (image, file, "image/png")}
                            text_url = "http://localhost:5000/model/predict"
                            r = requests.post(url=text_url, files=file_form)
                            response = r.json()
                            capture_text = response["predictions"][0][
                                "caption"].lower()
                        img = Image.open(image)
                        text = tess.image_to_string(img).strip("\n\x0c")
                        if text != (" " or "") and len(text) > 0:
                            text = text.replace("/n", " ").lower()
                            entry = {
                                'video_id':
                                Literal(stringData=videonr),
                                'keyframe_id':
                                Literal(intData=int(keyframe_id)),
                                'tesseract_text':
                                Literal(stringData=text),
                                'start_time':
                                Literal(floatData=float(
                                    start_time.iloc[keyframe_nr]
                                    ["starttime"])),
                                'image_capture_text':
                                Literal(stringData=capture_text)
                            }
                            client.insert('tal_db', 'text_search', entry)

        except OSError:
            continue
        except UnicodeDecodeError:
            continue
        except UnicodeEncodeError:
            continue
        except UnicodeError:
            continue
Example #4
0
def run(path):
    #video_filelist = sorted(get_all_filesname(f"{path}/home/keyframes_filtered"))[10:]
    video_filelist = sorted(
        get_all_filesname(f"{path}/keyframes_filtered"))[400:]
    failed = {}

    for videonr in tqdm(video_filelist):
        failed[videonr] = []
        #f = open(f"{path}/home/msb/{videonr}.tsv")
        f = open(f"{path}/msb/{videonr}.tsv")
        start_times = pd.read_csv(f, delimiter="\t")
        #for filename in tqdm(get_all_filesname(f"{path}/home/keyframes_filtered/{videonr}")):
        for filename in tqdm(
                get_all_filesname(f"{path}/keyframes_filtered/{videonr}")):
            if filename != "Thumbs.db" and filename != ".DAV":
                keyframe_id = get_keyframe_id(filename, videonr, path)
                keyframe_nr = int(keyframe_id) - 1
                #image = f"{path}/home/keyframes_filtered/{videonr}/{filename}"
                image = f"{path}/keyframes_filtered/{videonr}/{filename}"
                xPieces = 4
                yPieces = 3
                colors = []
                im = Image.open(image)
                imgwidth, imgheight = im.size
                height = imgheight // yPieces
                width = imgwidth // xPieces
                for i in range(0, yPieces):
                    for j in range(0, xPieces):
                        box = (j * width, i * height, (j + 1) * width,
                               (i + 1) * height)
                        a = im.crop(box)
                        color = find_dominant_color(a)
                        colors.append(color)

                color_list = list(sum(colors, ()))
                with CottontailDBClient('localhost', 1865) as client:
                    # Insert entry
                    entry = {
                        'video_id':
                        Literal(stringData=str(videonr)),
                        'keyframe_id':
                        Literal(intData=int(keyframe_id)),
                        'dominant_color_vector':
                        float_vector(color_list),
                        'start_time':
                        Literal(floatData=float(start_times.iloc[keyframe_nr]
                                                ["starttime"]))
                    }
                    client.insert('tal_db', 'color_image', entry)
def run(path):
    video_filelist = sorted(
        get_all_filesname(f"{path}/keyframes_filtered"))[200:]

    for videonr in tqdm(video_filelist):
        try:
            f = open(f"{path}/info/{videonr}.json")
            data = json.load(f)

            with CottontailDBClient('localhost', 1865) as client:

                entry = {
                    'video_id':
                    Literal(stringData=videonr),
                    'vimeo_id':
                    Literal(stringData=data["vimeoId"].lower()),
                    'title':
                    Literal(stringData=data["title"].lower()),
                    'description':
                    Literal(stringData=cleanhtml(data["description"]).lower())
                }
                client.insert('tal_db', 'video_search', entry)

                for tag in data["tags"]:
                    entry = {
                        'video_id': Literal(stringData=videonr),
                        'tags': Literal(stringData=tag.lower())
                    }
                    client.insert('tal_db', 'video_tags', entry)

        except OSError:
            continue
        except UnicodeDecodeError:
            continue
        except UnicodeEncodeError:
            continue
        except UnicodeError:
            continue
from cottontaildb_client import CottontailDBClient, Type, column_def
from PIL import Image
import numpy as np

with CottontailDBClient('localhost', 1865) as client:
    # Create schema
    client.create_schema('tal_db')

    # Define entity sketch columns
    sketch_columns = [
        column_def('box_id', Type.INTEGER, nullable=False),
        column_def('video_id', Type.STRING, nullable=False),
        column_def('keyframe_id', Type.INTEGER, nullable=False),
        column_def('sketch_vector', Type.FLOAT_VEC, nullable=True, length=4),
        column_def('color_vector', Type.FLOAT_VEC, nullable=True, length=3),
        column_def('object', Type.STRING, nullable=False),
        column_def('start_time', Type.FLOAT, nullable=False)
    ]
    # Create entity color sketch
    client.create_entity('tal_db', 'sketch', sketch_columns)

    # Define entity color_image columns
    color_image_columns = [
        column_def('video_id', Type.STRING, nullable=False),
        column_def('keyframe_id', Type.INTEGER, nullable=False),
        column_def('dominant_color_vector',
                   Type.FLOAT_VEC,
                   nullable=True,
                   length=36),
        column_def('start_time', Type.FLOAT, nullable=False)
    ]