コード例 #1
0
def test_load_drawings():
    qd = QuickDrawData()
    qd.load_drawings(["anvil", "ant"])
    assert qd.loaded_drawings == ["anvil", "ant"]

    qd.get_drawing("angel")
    assert qd.loaded_drawings == ["anvil", "ant", "angel"]
コード例 #2
0
def pegar_desenho(Escolha="flower"):
    qd = QuickDrawData()  #define todos o dataset disponivel
    arquivo_str = ''  #começa uma string onde as informaçoes serão escritas

    if Escolha != "random":
        desenho = qd.get_drawing(Escolha)  #desenho especifico
    else:
        desenho = qd.get_drawing(random.choice(
            qd.drawing_names))  #desenho aleatorio

    desenho.image.save("Desenho.png")
    data_array = desenho.image_data  #data bruto

    print(data_array)
    print(len(data_array))

    #number of strokes
    arquivo_str += "{} ".format(len(data_array))

    for stroke in desenho.strokes:
        #tamanho do stroke
        arquivo_str += "{} ".format(len(stroke))
        print(len(stroke))

        for x, y in stroke:
            #stroke
            arquivo_str += "{} {} ".format(x, y)

    return arquivo_str  #devolve a string construida
コード例 #3
0
def find(different_object, the_object, closer, K):
    # closer can be True or False. If True, it means closest. If False, it means farthest.
    the_point = the_dict.get(the_object)
    distance = []
    coordinates = latent_dictionary.get(different_object)

    for i in range(len(coordinates[0])):
        xy = [coordinates[0][i], coordinates[1][i]]
        dist = dstnc.euclidean(the_point, xy)
        distance.append(dist)

    for M in range(K):
        if closer:
            the_distance = min(distance)
        elif not closer:
            the_distance = max(distance)

        for i in range(len(distance)):
            if distance[i] == the_distance:
                index = i
                break

        coordinate = [coordinates[0][index], coordinates[1][index]]
        for key in id_vector:
            if id_vector.get(key) == coordinate:
                the_id = key
                break

        qd = QuickDrawData()
        doodle = qd.get_drawing(different_object)
        while not doodle.key_id == the_id:
            doodle = qd.get_drawing(different_object)
            found = True
        if found:
            DEFAULT_SIZE_WHITE_CHANNEL = (1000, 1000, 1)
            canvas = np.ones(DEFAULT_SIZE_WHITE_CHANNEL, dtype="uint8") * 255
            cv2.namedWindow('Window')
            my_stroke_list = doodle.image_data
            for N in my_stroke_list:
                x_list = N[0]
                y_list = N[1]
                for point in range((len(x_list) - 1)):
                    cv2.line(canvas, (x_list[point], y_list[point]),
                             (x_list[point + 1], y_list[point + 1]), (0, 0, 0),
                             2)
            cv2.imwrite(
                "./png_sketches/latent_png/" + different_object + "_" +
                the_object + "_" + str(closer) + "_" + str(M) + ".png", canvas)

            distance.pop(index)
            coordinates[0].pop(index)
            coordinates[1].pop(index)

        else:
            print("Image cannot be found.")
コード例 #4
0
class Quickdraw(Dataset):
    def __init__(self, datasize, classname=None, transforms=None):
        self.datasize = datasize
        self.qd = QuickDrawData(max_drawings=self.datasize)

        if classname is None:
            self.classname = self.qd.drawing_names
        else:
            self.classname = classname
        self.transfroms = transforms

        self.label_ids = self.getLabelID(self.classname, self.datasize)
        self.img_ids = self.getImageID(self.classname, self.datasize)

    def getLabelID(self, classname, datasize):
        label_ids = []
        for i in range(len(classname)):
            label_id = [i for _ in range(datasize)]
            label_ids.append(label_id)
        label_ids = [element for sublist in label_ids for element in sublist]
        return label_ids

    def getImageID(self, classname, datasize):
        img_ids = []
        for i in range(len(classname)):
            for j in range(datasize):
                img = self.qd.get_drawing(classname[i], index=j)
                img_ids.append(img.image)
        return img_ids

    def __getitem__(self, idx):
        return self.img_ids[idx], self.label_ids[idx]

    def __len__(self):
        return self.datasize * len(self.classname)
コード例 #5
0
def test_get_specific_drawing():
    qd = QuickDrawData()

    # get the first anvil drawing and test the values
    d = qd.get_drawing("anvil", 0)
    assert d.name == "anvil"
    assert d.key_id == 5355190515400704
    assert d.recognized == True
    assert d.countrycode == "PL"
    assert d.timestamp == 1488368345

    # 1 stroke, 2 x,y coords, 33 points
    assert len(d.image_data) == 1
    assert len(d.image_data[0]) == 2
    assert len(d.image_data[0][0]) == 33
    assert len(d.image_data[0][1]) == 33

    assert d.no_of_strokes == 1
    assert len(d.strokes) == 1
    assert len(d.strokes[0]) == 33
    assert len(d.strokes[0][0]) == 2

    assert isinstance(d.image, Image)
    assert isinstance(
        d.get_image(stroke_color=(10, 10, 10),
                    stroke_width=4,
                    bg_color=(200, 200, 200)), Image)
コード例 #6
0
def test_anvil():
    qd = QuickDrawData()
    anvil = qd.get_drawing("anvil")
    # anvil.image.show()
    trans = transforms.Compose([transforms.Resize(64), transforms.ToTensor()])
    anvil_tensor = transforms.ToTensor()(anvil.image)
    down_sized_anvil = trans(anvil.image)
    plt.imshow(anvil_tensor.permute(1, 2, 0))
    plt.show()
    plt.imshow(down_sized_anvil.permute(1, 2, 0));
    plt.show()
コード例 #7
0
ファイル: util.py プロジェクト: rickzx/Doodle
def loadNdjson(tag, scale, pos, index=None, immutable=False):
    try:
        if tag not in Status.cache:
            qd = QuickDrawData(True)
            anvil = qd.get_drawing(tag, index)
            Status.cache[tag] = list(anvil.strokes)
            if not immutable:
                Status.objects[tag] = [scale, pos[0], pos[1]]
        if tag in Status.cache and not immutable:
            Status.objects[tag] = [scale, pos[0], pos[1]]
    except:
        pass
コード例 #8
0
ファイル: guess_drawing.py プロジェクト: Jahn16/JBoTSeries
 def get_drawing(self, recognized=True):
     dirname = os.path.dirname(__file__)
     cache_dir = os.path.join(dirname, '.quickdrawcache')
     qd = QuickDrawData(max_drawings=1, cache_dir=cache_dir)
     group_name = random.choice(qd.drawing_names)
     qd.load_drawings([group_name])
     drawing_data = qd.get_drawing(group_name)
     while drawing_data.recognized != recognized:
         group_name = random.choice(qd.drawing_names)
         qd.load_drawings([group_name])
         drawing_data = qd.get_drawing(group_name)
     drawing_size = drawing_data.get_image().size
     drawing = Image.new('RGB', drawing_size, 'white')
     draw = ImageDraw.Draw(drawing)
     frame = copy.deepcopy(drawing)
     frames = []
     for stroke in drawing_data.strokes:
         draw.line(stroke, 'black')
         frame = copy.deepcopy(drawing)
         frames.append(frame)
     return frames, drawing_data
コード例 #9
0
ファイル: draw.py プロジェクト: zarifmahmud/DoodleMyWorld
def bad_sketch(keyword: str) -> str:
    """
    Input a noun you want a sketch of, and if Google Quickdraw finds it,
    it will save a random doodle of it to keyword.gif, and return the filepath.
    """
    qd = QuickDrawData()
    if keyword is not None:
        keyword = keyword.lower()
        if keyword == "person":
            keyword = "smiley face"
    try:
        key = qd.get_drawing(keyword)
        filepath = "keyword.gif"
        key.image.save(filepath)
        return filepath
    except ValueError:
        return "blank.png"
コード例 #10
0
class QuickDrawDataset(data.Dataset):
    def __init__(self, root, classes, transform):
        self.classes = classes
        self.labels = torch.arange(len(classes))
        self.transform = transform
        self.qdd = QuickDrawData(recognized=True, max_drawings=10000, cache_dir=root)
        self.qdd.load_drawings(classes)

    def __getitem__(self, idx):
        c = self.classes[idx%len(self.classes)]
        label = self.labels[idx%len(self.classes)]
        img = self.qdd.get_drawing(c).image
        if self.transform:
            img = self.transform(img)
        return img, label

    def __len__(self):
        return 10000
コード例 #11
0
def test_get_random_drawing():
    qd = QuickDrawData()

    d = qd.get_drawing("anvil", 0)
    assert d.name == "anvil"
    assert isinstance(d.key_id, int)
    assert isinstance(d.recognized, bool)
    assert isinstance(d.timestamp, int)
    assert isinstance(d.countrycode, str)

    assert isinstance(d.image_data, list)
    assert len(d.image_data) == d.no_of_strokes

    assert isinstance(d.strokes, list)
    assert len(d.strokes) == d.no_of_strokes
    for stroke in d.strokes:
        for point in stroke:
            assert len(point) == 2

    assert isinstance(d.image, Image)
    assert isinstance(
        d.get_image(stroke_color=(10, 10, 10),
                    stroke_width=4,
                    bg_color=(200, 200, 200)), Image)
コード例 #12
0
    return object_list


qd_categories = open_and_read(file_path)
number_of_categories_to_subsample = 60

qd_categories_subsample = random.sample(qd_categories,
                                        number_of_categories_to_subsample)
print("The selected categories are:")
print(qd_categories_subsample)

for category in qd_categories_subsample:
    sketch_name = category
    number_of_drawings = 20
    qd = QuickDrawData()
    doodle = qd.get_drawing(sketch_name)
    drawing_list = []
    with open("./ndjson_files/" + sketch_name + "_simplified_qd.ndjson",
              'w') as f:
        writer = ndjson.writer(f, ensure_ascii=False)
        for i in range(number_of_drawings):
            while doodle.recognized is False or doodle in drawing_list:
                doodle = qd.get_drawing(sketch_name)
            drawing_list.append(doodle)
            drawing_map = {
                "word": sketch_name,
                "key_id": doodle.key_id,
                "drawing": doodle.image_data
            }
            writer.writerow(drawing_map)
コード例 #13
0
from quickdraw import QuickDrawData

qd = QuickDrawData()

anvil = qd.get_drawing("anvil")
stroke_no = 0
for stroke in anvil.strokes:
    for x, y in stroke:
        # x = xy[0]
        # y = xy[1]
        print("stroke={} x={} y={}".format(stroke_no, x, y))
    stroke_no += 1
コード例 #14
0
ファイル: quickDraw.py プロジェクト: sriramNV1301/Project1
import quickDraw
from quickdraw import QuickDrawData
import SptoText as st

tag = st.out
qd = QuickDrawData()
ext = '.jpg'
verb = []

for i in tag:
    if i[1] == 'NN' or i[1] == 'NNP':
        try:
            obj = qd.get_drawing(i[0].lower())
            name = i[0].lower()
            obj.image.save(name + ext)
        except:
            continue

    elif i[1] == 'VBG':
        verb.append(i[0])

    else:
        continue

for v in verb:
    print(v)
コード例 #15
0
ファイル: main.py プロジェクト: jumahe/story-draw
                   max_drawings=1000,
                   refresh_data=False,
                   jit_loading=True,
                   print_messages=True,
                   cache_dir='./quickdrawcache')

data_out = {}
data_out["categories"] = {}
data_categories = data_out["categories"]

cat_list_f = open(cat_file, encoding="utf-8")
lines = cat_list_f.readlines()
print(len(lines))

for line in lines:
    line = line.rstrip('\n')
    data_categories[line] = {}
    cat_obj = data_categories[line]
    cat_obj["drawings"] = []
    drawings = cat_obj["drawings"]
    for i in range(3):
        content = qd.get_drawing(line)
        drawing = {}
        drawing["id"] = content.key_id
        drawing["nb_strokes"] = content.no_of_strokes
        drawing["strokes"] = content.strokes
        drawings.append(drawing)

with io.open('quickdraw.json', 'w', encoding='utf8') as outfile:
    data = json.dumps(data_out, ensure_ascii=False, sort_keys=True, indent=1)
    outfile.write(data)
コード例 #16
0
from quickdraw import QuickDrawData

qd = QuickDrawData()

anvil = qd.get_drawing("anvil")
print(anvil)

ant = qd.get_drawing("ant")
print(ant)
コード例 #17
0
import pgzrun
from random import randint
from quickdraw import QuickDrawData
qd = QuickDrawData()

rnd_raccoon = qd.get_drawing("raccoon")
rnd_raccoon.image.save("images/raccoon.png")
raccoon = Actor("raccoon")
beep = tone.create('A3', 0.5)
def draw():
    screen.clear()
    screen.fill('white')
    raccoon.draw()

def place_raccoon():
    raccoon.x = randint(100, 750)
    raccoon.y = randint(100, 550)

def set_raccoon_normal():
    images.cache.clear()
    rnd_raccoon = qd.get_drawing("raccoon")
    rnd_raccoon.image.save("images/raccoon.png")
    raccoon.image = "raccoon.png"

def set_skull():
    rnd_skull = qd.get_drawing("skull")
    rnd_skull.image.save("images/skull.png")
    raccoon.image = "skull.png"

def on_mouse_down(pos):
    if raccoon.collidepoint(pos):
コード例 #18
0
from quickdraw import QuickDrawData

if __name__ == '__main__':
    quickdraw = QuickDrawData()
    print(quickdraw.drawing_names)
    n = 10
    for i in range(n * 2):
        draw = quickdraw.get_drawing("bicycle")
        draw.image.save("datasets/trainA/train_bicycle{}.gif".format(i))
    for i in range(n):
        draw = quickdraw.get_drawing("bicycle")
        draw.image.save("datasets/testA/test_bicycle{}.gif".format(i))
    for i in range(n * 2):
        draw = quickdraw.get_drawing("campfire")
        draw.image.save("datasets/trainB/train_campfire{}.gif".format(i))
    for i in range(n):
        draw = quickdraw.get_drawing("campfire")
        draw.image.save("datasets/testB/test_campfire{}.gif".format(i))
コード例 #19
0
import streamlit as st
import pandas as pd
from quickdraw import QuickDrawData

qd = QuickDrawData(recognized=True, jit_loading=False, max_drawings=1000)
qd.load_all_drawings()
drawings = qd.loaded_drawings
n_strokes = [d.no_of_strokes for d in drawings]

anvil = qd.get_drawing(name="dolphin")
st.image(anvil.image)
st.write(f"Groups loaded: {qd.drawing_names}")
コード例 #20
0
from quickdraw import QuickDrawData
from PIL import Image, ImageDraw, ImageOps
import cv2
import numpy as np

qd = QuickDrawData()
anvil = qd.get_drawing("circle")

anvil_image = Image.new("RGB", (600, 600), color=(255, 255, 255))
anvil_drawing = ImageDraw.Draw(anvil_image)

for stroke in anvil.strokes:
    # anvil_drawing.line(stroke, fill=(0,0,0), width=2)

    for coordinate in range(len(stroke) - 1):
        x1 = stroke[coordinate][0] * 2
        y1 = stroke[coordinate][1] * 2
        x2 = stroke[coordinate + 1][0] * 2
        y2 = stroke[coordinate + 1][1] * 2
        anvil_drawing.line((x1, y1, x2, y2), fill=(0, 0, 0), width=2)

old_size = anvil_image.size  # old_size[0] is in (width, height) format

new_im = ImageOps.expand(anvil_image, border=10, fill=(255, 255, 255))

new_im.show()

anvil_image.show()
anvil_image = new_im
#image processing
open_cv_image = np.array(anvil_image)
コード例 #21
0
class Detector:
    def __init__(self):
        # Load a model imported from Tensorflow.
        frozen_ssd_model = os.path.join(os.path.dirname(__file__), '../models',
                                        'ssd_mobilenet_model',
                                        'frozen_inference_graph.pb')
        ssd_config = os.path.join(os.path.dirname(__file__), '../models',
                                  'ssd_mobilenet_model',
                                  'ssd_mobilenet_v1_coco_2017_11_17.pbtxt')
        self.tensorflowNet = cv2.dnn.readNetFromTensorflow(
            frozen_ssd_model, ssd_config)

        # List of dictionaries containg detected objects information.
        self.detected_objects = []

        # QuckDraw object
        cache_path = os.path.join(os.path.dirname(__file__), 'quickdrawcache')
        self.qd = QuickDrawData(recognized=True,
                                max_drawings=1000,
                                cache_dir=cache_path)

        # Label HashMap
        self.classNames = {
            0: 'background',
            1: 'person',
            2: 'bicycle',
            3: 'car',
            4: 'motorcycle',
            5: 'airplane',
            6: 'bus',
            7: 'train',
            8: 'truck',
            9: 'boat',
            10: 'traffic light',
            11: 'fire hydrant',
            13: 'stop sign',
            14: 'parking meter',
            15: 'bench',
            16: 'bird',
            17: 'cat',
            18: 'dog',
            19: 'horse',
            20: 'sheep',
            21: 'cow',
            22: 'elephant',
            23: 'bear',
            24: 'zebra',
            25: 'giraffe',
            27: 'backpack',
            28: 'umbrella',
            31: 'handbag',
            32: 'tie',
            33: 'suitcase',
            34: 'frisbee',
            35: 'skis',
            36: 'snowboard',
            37: 'sports ball',
            38: 'kite',
            39: 'baseball bat',
            40: 'baseball glove',
            41: 'skateboard',
            42: 'surfboard',
            43: 'tennis racket',
            44: 'bottle',
            46: 'wine glass',
            47: 'cup',
            48: 'fork',
            49: 'knife',
            50: 'spoon',
            51: 'bowl',
            52: 'banana',
            53: 'apple',
            54: 'sandwich',
            55: 'orange',
            56: 'broccoli',
            57: 'carrot',
            58: 'hot dog',
            59: 'pizza',
            60: 'donut',
            61: 'cake',
            62: 'chair',
            63: 'couch',
            64: 'potted plant',
            65: 'bed',
            67: 'dining table',
            70: 'toilet',
            72: 'tv',
            73: 'laptop',
            74: 'mouse',
            75: 'remote',
            76: 'keyboard',
            77: 'cell phone',
            78: 'microwave',
            79: 'oven',
            80: 'toaster',
            81: 'sink',
            82: 'refrigerator',
            84: 'book',
            85: 'clock',
            86: 'vase',
            87: 'scissors',
            88: 'teddy bear',
            89: 'hair drier',
            90: 'toothbrush'
        }
        # Tensorflow to QuickDraw HashMap
        self.tensorflow_to_quickdraw_hash = {
            'background': '',
            'person': 'face',
            'bicycle': 'bicycle',
            'car': 'car',
            'motorcycle': 'motorbike',
            'airplane': 'airplane',
            'bus': 'bus',
            'train': 'train',
            'truck': 'truck',
            'boat': 'sailboat',
            'traffic light': 'traffic light',
            'fire hydrant': 'fire hydrant',
            'stop sign': 'stop sign',
            'parking meter': '',
            'bench': 'bench',
            'bird': 'bird',
            'cat': 'cat',
            'dog': 'dog',
            'horse': 'horse',
            'sheep': 'sheep',
            'cow': 'cow',
            'elephant': 'elephant',
            'bear': 'bear',
            'zebra': 'zebra',
            'giraffe': 'giraffe',
            'backpack': 'backpack',
            'umbrella': 'umbrella',
            'handbag': 'purse',
            'tie': 'bowtie',
            'suitcase': 'suitcase',
            'frisbee': 'circle',
            'skis': '',
            'snowboard': '',
            'sports ball': 'soccer ball',
            'kite': 'scissors',
            'baseball bat': 'baseball bat',
            'baseball glove': '',
            'skateboard': 'skateboard',
            'surfboard': '',
            'tennis racket': 'tennis racquet',
            'bottle': 'wine bottle',
            'wine glass': 'wine glass',
            'cup': 'cup',
            'fork': 'fork',
            'knife': 'knife',
            'spoon': 'spoon',
            'bowl': '',
            'banana': 'banana',
            'apple': 'apple',
            'sandwich': 'sandwich',
            'orange': '',
            'broccoli': 'broccoli',
            'carrot': 'carrot',
            'hot dog': 'hot dog',
            'pizza': 'pizza',
            'donut': 'donut',
            'cake': 'cake',
            'chair': 'chair',
            'couch': 'couch',
            'potted plant': 'house plant',
            'bed': 'bed',
            'dining table': 'table',
            'toilet': 'toilet',
            'tv': 'television',
            'laptop': 'laptop',
            'mouse': 'mouse',
            'remote': 'remote control',
            'keyboard': 'keyboard',
            'cell phone': 'cell phone',
            'microwave': 'microwave',
            'oven': 'oven',
            'toaster': 'toaster',
            'sink': 'sink',
            'refrigerator': '',
            'book': 'book',
            'clock': 'clock',
            'vase': 'vase',
            'scissors': 'scissors',
            'teddy bear': 'teddy-bear',
            'hair drier': '',
            'toothbrush': 'toothbrush'
        }

    def detect_object(self, img):
        # Input image
        img = cv2.cvtColor(numpy.array(img), cv2.COLOR_BGR2RGB)
        img_height, img_width, channels = img.shape

        # Use the given image as input, which needs to be blob(s).
        self.tensorflowNet.setInput(
            cv2.dnn.blobFromImage(img,
                                  size=(300, 300),
                                  swapRB=True,
                                  crop=False))

        # Runs a forward pass to compute the net output.
        networkOutput = self.tensorflowNet.forward()

        # For mask rcnn model
        # networkOutput, mask = self.tensorflowNet.forward(["detection_out_final", "detection_masks"])
        '''
        Loop over the detected objects
            Detection Indexes:
            0: (not used)
            1: the identifier of the object's class ex. 5 = 'airplane'
            2: the accuracy (score) of the object detected
            3: dist of object from the left
            4: dist of object from the top
            5: dist of object from the right
            6: dist of object from the bottom
        '''

        for detection in networkOutput[0, 0]:

            score = float(detection[2])
            if score > 0.5:

                # Object dimensions
                obj_left = detection[3] * img_width
                obj_top = detection[4] * img_height
                obj_right = detection[5] * img_width
                obj_bottom = detection[6] * img_height

                # Object name, scale, and x,y offet
                name_of_object = self.classNames[detection[1]]
                xy_scale = self.get_object_scale(obj_left, obj_right, obj_top,
                                                 obj_bottom, img_width,
                                                 img_height)
                xy_normalized = self.normalize_object_coordinates(
                    obj_left, obj_top, img_width, img_height)
                strokes = self.get_quickdraw_drawing(
                    self.tensorflow_to_quickdraw_hash[name_of_object])

                if strokes is not None:
                    self.detected_objects.append({
                        "name": name_of_object,
                        "scale": xy_scale,
                        "normalized": xy_normalized,
                        "img_width": img_width,
                        "img_height": img_height,
                        "strokes": strokes
                    })

                # Check for a person to be detected
                if detection[1] == 1:
                    self.person_detected(obj_left, obj_right, obj_top,
                                         obj_bottom, img_width, img_height)

                # draw a red rectangle around detected objects
                cv2.rectangle(img, (int(obj_left), int(obj_top)),
                              (int(obj_right), int(obj_bottom)), (0, 0, 255),
                              thickness=8)

        # Resize the image
        # scaled_width = 1000
        # scaled_height = int(scaled_width * img_height / img_width)
        # img = cv2.resize(img, (scaled_width, scaled_height), interpolation = cv2.INTER_AREA)

        object_info = self.detected_objects

        # Empty the detected objects for the next call
        self.detected_objects = []

        return img, object_info

    def person_detected(self, obj_left, obj_right, obj_top, obj_bottom,
                        img_width, img_height):
        # Remove face from detected objects
        object_data = self.detected_objects.pop()

        # Calculate new top and bottom heights
        face_bottom = obj_bottom - ((obj_bottom - obj_top) * (2 / 3))
        shirt_bottom = obj_bottom - ((obj_bottom - obj_top) * (1 / 3))
        pants_bottom = obj_bottom
        face_top = obj_top
        shirt_top = obj_top + ((obj_bottom - obj_top) * (1 / 3))
        pants_top = obj_top + ((obj_bottom - obj_top) * (2 / 3))

        # Scale
        face_scale = self.get_object_scale(obj_left, obj_right, face_top,
                                           face_bottom, img_width, img_height)
        shirt_scale = self.get_object_scale(obj_left, obj_right, shirt_top,
                                            shirt_bottom, img_width,
                                            img_height)
        pants_scale = self.get_object_scale(obj_left, obj_right, pants_top,
                                            pants_bottom, img_width,
                                            img_height)

        # Normalize
        face_normalize = self.normalize_object_coordinates(
            obj_left, face_top, img_width, img_height)
        shirt_normalize = self.normalize_object_coordinates(
            obj_left, shirt_top, img_width, img_height)
        pants_normalize = self.normalize_object_coordinates(
            obj_left, pants_top, img_width, img_height)

        # Strokes
        face_strokes = object_data["strokes"]
        shirt_strokes = self.get_quickdraw_drawing("t-shirt")
        pants_strokes = self.get_quickdraw_drawing("pants")

        # Add objects
        self.detected_objects.append({
            "name": "face",
            "scale": face_scale,
            "normalized": face_normalize,
            "img_width": img_width,
            "img_height": img_height,
            "strokes": face_strokes
        })
        self.detected_objects.append({
            "name": "t-shirt",
            "scale": shirt_scale,
            "normalized": shirt_normalize,
            "img_width": img_width,
            "img_height": img_height,
            "strokes": shirt_strokes
        })
        self.detected_objects.append({
            "name": "pants",
            "scale": pants_scale,
            "normalized": pants_normalize,
            "img_width": img_width,
            "img_height": img_height,
            "strokes": pants_strokes
        })

    def get_object_scale(self, obj_left, obj_right, obj_top, obj_bottom,
                         img_width, img_height):
        scale_x = (obj_right - obj_left) / img_width
        scale_y = (obj_bottom - obj_top) / img_height
        return [scale_x, scale_y]

    def normalize_object_coordinates(self, obj_left, obj_top, img_width,
                                     img_height):
        x_normalized = obj_left / img_width
        y_normalized = obj_top / img_height
        return [x_normalized, y_normalized]

    '''
    Example Input:
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~
    Lets say we want to normalize the cartoon point of (100,100) ->
        cartoon_img_width = 255
        cartoon_img_height = 255
        img_width = 2000
        img_height = 1300
        canvas_width = 1200
        canvas_height = 700
        img_scale_x = found w/ get_object_scale() = 0.30
        img_scale_y = found w/ get_object_scale() = 0.98
        obj_left = 1000
        obj_top = 20
        obj_bottom = 1300
        obj_right = 1600
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~
        Determining the normalized coordinates for the cartoon drawing:
            - Call normalize_object_coordinates() to get the normalized x and y coordinates for the object: x_norm = 0.5 | y_norm = 0.015
            - x point scaled and normalized = ((cartoon_point_x / cartoon_img_width) * (img_scale_x * img_width)) + (x_norm*canvas_width) -> ((100/255)*.30*1200) + (.5*1200)
            - y point scaled and normalized = ((cartoon_point_y / cartoon_img_height) * (img_scale_y * img_height)) + (y_norm*canvas_height) -> ((100/255)*0.98*700) + (.015*700)

    '''

    def get_quickdraw_drawing(self, name):
        # Initialize a QuickDraw object to access the API
        if name != "":
            cur_object = self.qd.get_drawing(name)
        else:
            print("Not a valid QuickDraw image!")
            return None
        return cur_object.strokes
コード例 #22
0
def visualize_the_matrix(number_of_categories):
    for obj in matrix_info:
        row_source = obj[0]
        for i in range(3):
            column_target = (obj[1])[i]
            the_id = (obj[2])[i]
            most_similar_source_id = (obj[3])[i]

            # Find the most similar target sketches.
            qd = QuickDrawData()
            doodle = qd.get_drawing(column_target)
            while not doodle.key_id == the_id:
                doodle = qd.get_drawing(column_target)
            found = True
            if found:
                DEFAULT_SIZE_WHITE_CHANNEL = (300, 300, 1)
                canvas = np.ones(DEFAULT_SIZE_WHITE_CHANNEL,
                                 dtype="uint8") * 255
                cv2.namedWindow('Window')
                my_stroke_list = doodle.image_data
                for N in my_stroke_list:
                    x_list = N[0]
                    y_list = N[1]
                    for point in range((len(x_list) - 1)):
                        cv2.line(canvas, (x_list[point], y_list[point]),
                                 (x_list[point + 1], y_list[point + 1]),
                                 (0, 0, 0), 2)
                cv2.imwrite(
                    "./png_sketches/trial_pngs" + "source_" + row_source +
                    "_" + str(i) + ".png", canvas)

        for i in range(3):
            column_target = (obj[1])[i]
            the_id = (obj[2])[i]
            most_similar_source_id = (obj[3])[i]

            # Find the most similar target sketches.
            qd = QuickDrawData()
            doodle = qd.get_drawing(row_source)
            while not doodle.key_id == most_similar_source_id:
                doodle = qd.get_drawing(row_source)
            found = True
            if found:
                DEFAULT_SIZE_WHITE_CHANNEL = (300, 300, 1)
                canvas = np.ones(DEFAULT_SIZE_WHITE_CHANNEL,
                                 dtype="uint8") * 255
                cv2.namedWindow('Window')
                my_stroke_list = doodle.image_data
                for N in my_stroke_list:
                    x_list = N[0]
                    y_list = N[1]
                    for point in range((len(x_list) - 1)):
                        cv2.line(canvas, (x_list[point], y_list[point]),
                                 (x_list[point + 1], y_list[point + 1]),
                                 (0, 0, 0), 2)
                cv2.imwrite(
                    "./png_sketches/trial_pngs" + "source_" + row_source +
                    "_" + str(i) + "_sourceimage.png", canvas)

    excel = xlsxwriter.Workbook("./png_sketches/" + "matrix_six_with_targets" +
                                ".xlsx")
    worksheet = excel.add_worksheet()
    worksheet.set_default_row(300)
    worksheet.set_column('A:ZZ', 50)

    row_index = -1
    column_empty = True

    for source_categ in reference_dict.keys():
        row_index += 1
        column_index = -1
        for ordered in range(6):
            column_index += 1
            if ordered < 3:
                pic = "./png_sketches/trial_pngs" + "source_" + source_categ + "_" + str(
                    ordered) + ".png"
                worksheet.insert_image(row_index + 1, column_index + 1, pic)

                if column_empty:
                    worksheet.write(0, column_index + 1,
                                    ("target_similar" + str(ordered + 1)))
            else:
                pic = "./png_sketches/trial_pngs" + "source_" + source_categ + "_" + str(
                    ordered - 3) + "_sourceimage.png"
                worksheet.insert_image(row_index + 1, column_index + 1, pic)

                if column_empty:
                    worksheet.write(0, column_index + 1,
                                    ("source_similar_to" + str(ordered - 2)))

        column_empty = False
        worksheet.write(row_index + 1, 0, source_categ)

    excel.close()
コード例 #23
0
def getTree():
    qd = QuickDrawData()
    return qd.get_drawing("tree")
コード例 #24
0
    for l in lines:
        ax.plot(l["x"], l["y"])
    return fig, ax


ad = AxiDraw()
ad.interactive()
ad.connect()
ad.options.speed_pendown = 5
ad.options.speed_penup = 100
ad.options.units = 2
ad.update()
try:
    # 6435186029887488
    drawing = qd.get_drawing(name="key")

    st.write(f"strokes : {drawing.no_of_strokes}")
    lines = reshape_strokes(drawing, scale=6)
    fig, ax = draw_pic_from_lines(lines)
    fig.savefig("test.png")

    reference_xy = (
        np.random.uniform(low=0, high=250, size=1)[0],
        np.random.uniform(low=0, high=150, size=1)[0],
    )
    # draw_lines(ad,lines,reference_xy=reference_xy)
#     st.pyplot(fig,figsize=(2, 2))
#    st.write(lines)

finally: