Пример #1
0
def pegar_desenho(Escolha="flower"):
    qd = QuickDrawData()  #define todos o dataset disponivel
    arquivo_str = ''  #começa uma string onde as informaçoes serão escritas

    if Escolha != "random":
        desenho = qd.get_drawing(Escolha)  #desenho especifico
    else:
        desenho = qd.get_drawing(random.choice(
            qd.drawing_names))  #desenho aleatorio

    desenho.image.save("Desenho.png")
    data_array = desenho.image_data  #data bruto

    print(data_array)
    print(len(data_array))

    #number of strokes
    arquivo_str += "{} ".format(len(data_array))

    for stroke in desenho.strokes:
        #tamanho do stroke
        arquivo_str += "{} ".format(len(stroke))
        print(len(stroke))

        for x, y in stroke:
            #stroke
            arquivo_str += "{} {} ".format(x, y)

    return arquivo_str  #devolve a string construida
Пример #2
0
def test_get_specific_drawing():
    qd = QuickDrawData()

    # get the first anvil drawing and test the values
    d = qd.get_drawing("anvil", 0)
    assert d.name == "anvil"
    assert d.key_id == 5355190515400704
    assert d.recognized == True
    assert d.countrycode == "PL"
    assert d.timestamp == 1488368345

    # 1 stroke, 2 x,y coords, 33 points
    assert len(d.image_data) == 1
    assert len(d.image_data[0]) == 2
    assert len(d.image_data[0][0]) == 33
    assert len(d.image_data[0][1]) == 33

    assert d.no_of_strokes == 1
    assert len(d.strokes) == 1
    assert len(d.strokes[0]) == 33
    assert len(d.strokes[0][0]) == 2

    assert isinstance(d.image, Image)
    assert isinstance(
        d.get_image(stroke_color=(10, 10, 10),
                    stroke_width=4,
                    bg_color=(200, 200, 200)), Image)
Пример #3
0
def find(different_object, the_object, closer, K):
    # closer can be True or False. If True, it means closest. If False, it means farthest.
    the_point = the_dict.get(the_object)
    distance = []
    coordinates = latent_dictionary.get(different_object)

    for i in range(len(coordinates[0])):
        xy = [coordinates[0][i], coordinates[1][i]]
        dist = dstnc.euclidean(the_point, xy)
        distance.append(dist)

    for M in range(K):
        if closer:
            the_distance = min(distance)
        elif not closer:
            the_distance = max(distance)

        for i in range(len(distance)):
            if distance[i] == the_distance:
                index = i
                break

        coordinate = [coordinates[0][index], coordinates[1][index]]
        for key in id_vector:
            if id_vector.get(key) == coordinate:
                the_id = key
                break

        qd = QuickDrawData()
        doodle = qd.get_drawing(different_object)
        while not doodle.key_id == the_id:
            doodle = qd.get_drawing(different_object)
            found = True
        if found:
            DEFAULT_SIZE_WHITE_CHANNEL = (1000, 1000, 1)
            canvas = np.ones(DEFAULT_SIZE_WHITE_CHANNEL, dtype="uint8") * 255
            cv2.namedWindow('Window')
            my_stroke_list = doodle.image_data
            for N in my_stroke_list:
                x_list = N[0]
                y_list = N[1]
                for point in range((len(x_list) - 1)):
                    cv2.line(canvas, (x_list[point], y_list[point]),
                             (x_list[point + 1], y_list[point + 1]), (0, 0, 0),
                             2)
            cv2.imwrite(
                "./png_sketches/latent_png/" + different_object + "_" +
                the_object + "_" + str(closer) + "_" + str(M) + ".png", canvas)

            distance.pop(index)
            coordinates[0].pop(index)
            coordinates[1].pop(index)

        else:
            print("Image cannot be found.")
def test_anvil():
    qd = QuickDrawData()
    anvil = qd.get_drawing("anvil")
    # anvil.image.show()
    trans = transforms.Compose([transforms.Resize(64), transforms.ToTensor()])
    anvil_tensor = transforms.ToTensor()(anvil.image)
    down_sized_anvil = trans(anvil.image)
    plt.imshow(anvil_tensor.permute(1, 2, 0))
    plt.show()
    plt.imshow(down_sized_anvil.permute(1, 2, 0));
    plt.show()
Пример #5
0
def loadNdjson(tag, scale, pos, index=None, immutable=False):
    try:
        if tag not in Status.cache:
            qd = QuickDrawData(True)
            anvil = qd.get_drawing(tag, index)
            Status.cache[tag] = list(anvil.strokes)
            if not immutable:
                Status.objects[tag] = [scale, pos[0], pos[1]]
        if tag in Status.cache and not immutable:
            Status.objects[tag] = [scale, pos[0], pos[1]]
    except:
        pass
Пример #6
0
    def __init__(self, datasize, classname=None, transforms=None):
        self.datasize = datasize
        self.qd = QuickDrawData(max_drawings=self.datasize)

        if classname is None:
            self.classname = self.qd.drawing_names
        else:
            self.classname = classname
        self.transfroms = transforms

        self.label_ids = self.getLabelID(self.classname, self.datasize)
        self.img_ids = self.getImageID(self.classname, self.datasize)
Пример #7
0
class Quickdraw(Dataset):
    def __init__(self, datasize, classname=None, transforms=None):
        self.datasize = datasize
        self.qd = QuickDrawData(max_drawings=self.datasize)

        if classname is None:
            self.classname = self.qd.drawing_names
        else:
            self.classname = classname
        self.transfroms = transforms

        self.label_ids = self.getLabelID(self.classname, self.datasize)
        self.img_ids = self.getImageID(self.classname, self.datasize)

    def getLabelID(self, classname, datasize):
        label_ids = []
        for i in range(len(classname)):
            label_id = [i for _ in range(datasize)]
            label_ids.append(label_id)
        label_ids = [element for sublist in label_ids for element in sublist]
        return label_ids

    def getImageID(self, classname, datasize):
        img_ids = []
        for i in range(len(classname)):
            for j in range(datasize):
                img = self.qd.get_drawing(classname[i], index=j)
                img_ids.append(img.image)
        return img_ids

    def __getitem__(self, idx):
        return self.img_ids[idx], self.label_ids[idx]

    def __len__(self):
        return self.datasize * len(self.classname)
Пример #8
0
def bad_sketch(keyword: str) -> str:
    """
    Input a noun you want a sketch of, and if Google Quickdraw finds it,
    it will save a random doodle of it to keyword.gif, and return the filepath.
    """
    qd = QuickDrawData()
    if keyword is not None:
        keyword = keyword.lower()
        if keyword == "person":
            keyword = "smiley face"
    try:
        key = qd.get_drawing(keyword)
        filepath = "keyword.gif"
        key.image.save(filepath)
        return filepath
    except ValueError:
        return "blank.png"
Пример #9
0
class QuickDrawDataset(data.Dataset):
    def __init__(self, root, classes, transform):
        self.classes = classes
        self.labels = torch.arange(len(classes))
        self.transform = transform
        self.qdd = QuickDrawData(recognized=True, max_drawings=10000, cache_dir=root)
        self.qdd.load_drawings(classes)

    def __getitem__(self, idx):
        c = self.classes[idx%len(self.classes)]
        label = self.labels[idx%len(self.classes)]
        img = self.qdd.get_drawing(c).image
        if self.transform:
            img = self.transform(img)
        return img, label

    def __len__(self):
        return 10000
Пример #10
0
def get_array_numpy_images():
    """returns labels, ndarry (shape (~, 28, 28))"""

    # max_drawings -- maximum number of drawings to be loaded into memory,
    # defaults to 1000 (3 minutes)
    qd = QuickDrawData(max_drawings=1000)

    # LIST OBJECT
    drawing_names = qd.drawing_names

    # LABELS
    list_of_labels = []

    # just append your arrays to a Python list and convert it at the end;
    # the result is simpler and faster
    list_of_np = []

    for label in drawing_names:

        # QuickDrawDataGroup Object
        qd_data_group = qd.get_drawing_group(label)

        # QuickDrawing Object
        for drawing in qd_data_group.drawings:

            list_of_labels.append(label)

            # PIL IMAGE
            PIL_im = drawing.image.convert('L').resize((28, 28),
                                                       Image.ANTIALIAS)
            nump_im = np.array(PIL_im)
            list_of_np.append(nump_im)

    # convert to ndarray
    array_of_np = np.asarray(list_of_np)
    array_of_labels = np.asarray(list_of_labels)
    print("IMAGES SHAPE ", array_of_np.shape)
    print("LABELS SHAPE ", array_of_labels.shape)

    np.save("images-new.npy", array_of_np)
    np.save("labels-new.npy", array_of_labels)
Пример #11
0
def test_load_drawings():
    qd = QuickDrawData()
    qd.load_drawings(["anvil", "ant"])
    assert qd.loaded_drawings == ["anvil", "ant"]

    qd.get_drawing("angel")
    assert qd.loaded_drawings == ["anvil", "ant", "angel"]
Пример #12
0
def test_get_random_drawing():
    qd = QuickDrawData()

    d = qd.get_drawing("anvil", 0)
    assert d.name == "anvil"
    assert isinstance(d.key_id, int)
    assert isinstance(d.recognized, bool)
    assert isinstance(d.timestamp, int)
    assert isinstance(d.countrycode, str)

    assert isinstance(d.image_data, list)
    assert len(d.image_data) == d.no_of_strokes

    assert isinstance(d.strokes, list)
    assert len(d.strokes) == d.no_of_strokes
    for stroke in d.strokes:
        for point in stroke:
            assert len(point) == 2

    assert isinstance(d.image, Image)
    assert isinstance(
        d.get_image(stroke_color=(10, 10, 10),
                    stroke_width=4,
                    bg_color=(200, 200, 200)), Image)
Пример #13
0
def test_unrecognized_data():
    qdg = QuickDrawData(recognized=False).get_drawing_group("anvil")
    assert qdg.drawing_count == 1000

    rec = 0
    unrec = 0

    for drawing in qdg.drawings:
        if drawing.recognized:
            rec += 1
        else:
            unrec += 1

    assert rec == 0
    assert unrec == qdg.drawing_count
Пример #14
0
def test_search_drawings():
    qd = QuickDrawData()
    # test a search with no criteria returns 1000 results
    r = qd.search_drawings("anvil")
    assert len(r) == 1000

    # test a recognized search
    r = qd.search_drawings("anvil", recognized=True)
    for d in r:
        assert d.recognized

    r = qd.search_drawings("anvil", recognized=False)
    for d in r:
        assert not d.recognized

    # test a country search
    r = qd.search_drawings("anvil", countrycode="US")
    for d in r:
        assert d.countrycode == "US"

    # pull first drawing
    key_id = r[0].key_id
    timestamp = r[0].timestamp

    # test key_id search
    r = qd.search_drawings("anvil", key_id=key_id)
    for d in r:
        assert d.key_id == key_id

    # test timestamp search
    r = qd.search_drawings("anvil", timestamp=timestamp)
    for d in r:
        assert d.timestamp == timestamp

    # test a compound search of recognized and country code
    r = qd.search_drawings("anvil", recognized=True, countrycode="US")
    for d in r:
        assert d.recognized
        assert d.countrycode == "US"
Пример #15
0
    def __init__(self,
                 recognized: Optional[bool] = None,
                 transform: Callable[[QuickDrawing], torch.Tensor] = None):

        self.qd = QuickDrawData()
        self.qd_class_names = self.qd.drawing_names

        # dictionary of QuickDrawDataGroups based on all possible names, loads 1000 examples from each class, but can
        # be changed by specifying max_drawings
        self.qd_DataGroups = {
            name: QuickDrawDataGroup(name, recognized=recognized)
            for name in self.qd_class_names
        }

        if transform is None:
            self.transform = lambda x: x
        else:
            self.transform = transform
Пример #16
0
 def get_drawing(self, recognized=True):
     dirname = os.path.dirname(__file__)
     cache_dir = os.path.join(dirname, '.quickdrawcache')
     qd = QuickDrawData(max_drawings=1, cache_dir=cache_dir)
     group_name = random.choice(qd.drawing_names)
     qd.load_drawings([group_name])
     drawing_data = qd.get_drawing(group_name)
     while drawing_data.recognized != recognized:
         group_name = random.choice(qd.drawing_names)
         qd.load_drawings([group_name])
         drawing_data = qd.get_drawing(group_name)
     drawing_size = drawing_data.get_image().size
     drawing = Image.new('RGB', drawing_size, 'white')
     draw = ImageDraw.Draw(drawing)
     frame = copy.deepcopy(drawing)
     frames = []
     for stroke in drawing_data.strokes:
         draw.line(stroke, 'black')
         frame = copy.deepcopy(drawing)
         frames.append(frame)
     return frames, drawing_data
Пример #17
0
def find_doodle():
    label = request.args.get('label')
    index = request.args.get('index')

    # label이 labels.txt에 존재하는지 확인
    # label에 맞는 낙서 img를 찾아서 key, img, status 보내기
    try:
        doodle = QuickDrawData().get_drawing(label, int(index))
    except:
        return jsonify({'success': False})

    print(TEMP_DIR)
    filename = '%s_%s.png' % (
        label,
        index,
    )
    doodle.image.save(os.path.join(TEMP_DIR, filename))

    return send_from_directory(TEMP_DIR, filename)
Пример #18
0
    def draw_doodle(self, img, label, d_w0, d_h0, d_w, d_h):

        idx = random.randrange(0, 999)
        self.random_Key = idx
        doodle = QuickDrawData().get_drawing(label, idx)

        # 낙서 그릴때 dobot_coord.txt도 동시에 만든다.
        if self.d_strokes is None:
            self.d_strokes = []

        # with open(self.OUTPUT_FILE, 'w') as f:
        for stroke in doodle.strokes:
            d_points = []
            for i in range(0, len(stroke) - 1):
                x = int(d_w * stroke[i][0] / 255) + d_w0
                y = int(d_h * stroke[i][1] / 255) + d_h0

                pre_x = int(d_w * stroke[i + 1][0] / 255) + d_w0
                pre_y = int(d_h * stroke[i + 1][1] / 255) + d_h0

                cv2.line(img, (pre_x, pre_y), (x, y), (0, 0, 0), 10)

                d_x = int((x / self.width * 255) / 2 + 180)
                d_y = int((y / self.height * 255) / 2 - 80)
                d_points.append((d_x, d_y))
                # f.write('%d %d\n' % (d_x, d_y,))

                if i == len(stroke) - 2:
                    x = int(d_w * stroke[i + 1][0] / 255) + d_w0
                    y = int(d_h * stroke[i + 1][1] / 255) + d_h0
                    d_x = int((x / self.width * 255) / 2 + 180)
                    d_y = int((y / self.height * 255) / 2 - 80)
                    d_points.append((d_x, d_y))
                    # f.write('%d %d\n' % (d_x, d_y,))

            # f.write('\n')
            self.d_strokes.append(d_points)
from quickdraw import QuickDrawDataGroup
from quickdraw import QuickDrawData
import sys
import os

qd = QuickDrawData()

##for drawing_num, drawing_name in enumerate(qd.drawing_names):
##    if drawing_num < 86:
##        continue
##    print("Saving " + drawing_name + " images")
##    qdg = QuickDrawDataGroup(drawing_name, max_drawings=20)
##    directory_name = 'images/' + drawing_name
##    if not os.path.isdir(os.path.join(os.getcwd(), directory_name)):
##        os.mkdir(directory_name)
##    for drawing_count, drawing in enumerate(qdg.drawings, 1):
##        drawing.image.save(directory_name + "/" + drawing_name + "{:0>4d}.png".format(drawing_count))

drawing_name = "face"
print("Saving " + drawing_name + " images")
qdg = QuickDrawDataGroup(drawing_name, max_drawings=300)
directory_name = 'images/' + drawing_name
if not os.path.isdir(os.path.join(os.getcwd(), directory_name)):
    os.mkdir(directory_name)
for drawing_count, drawing in enumerate(qdg.drawings, 1):
    drawing.image.save(directory_name + "/" + drawing_name +
                       "{:0>4d}.png".format(drawing_count))
Пример #20
0
class Detector:
    def __init__(self):
        # Load a model imported from Tensorflow.
        frozen_ssd_model = os.path.join(os.path.dirname(__file__), '../models',
                                        'ssd_mobilenet_model',
                                        'frozen_inference_graph.pb')
        ssd_config = os.path.join(os.path.dirname(__file__), '../models',
                                  'ssd_mobilenet_model',
                                  'ssd_mobilenet_v1_coco_2017_11_17.pbtxt')
        self.tensorflowNet = cv2.dnn.readNetFromTensorflow(
            frozen_ssd_model, ssd_config)

        # List of dictionaries containg detected objects information.
        self.detected_objects = []

        # QuckDraw object
        cache_path = os.path.join(os.path.dirname(__file__), 'quickdrawcache')
        self.qd = QuickDrawData(recognized=True,
                                max_drawings=1000,
                                cache_dir=cache_path)

        # Label HashMap
        self.classNames = {
            0: 'background',
            1: 'person',
            2: 'bicycle',
            3: 'car',
            4: 'motorcycle',
            5: 'airplane',
            6: 'bus',
            7: 'train',
            8: 'truck',
            9: 'boat',
            10: 'traffic light',
            11: 'fire hydrant',
            13: 'stop sign',
            14: 'parking meter',
            15: 'bench',
            16: 'bird',
            17: 'cat',
            18: 'dog',
            19: 'horse',
            20: 'sheep',
            21: 'cow',
            22: 'elephant',
            23: 'bear',
            24: 'zebra',
            25: 'giraffe',
            27: 'backpack',
            28: 'umbrella',
            31: 'handbag',
            32: 'tie',
            33: 'suitcase',
            34: 'frisbee',
            35: 'skis',
            36: 'snowboard',
            37: 'sports ball',
            38: 'kite',
            39: 'baseball bat',
            40: 'baseball glove',
            41: 'skateboard',
            42: 'surfboard',
            43: 'tennis racket',
            44: 'bottle',
            46: 'wine glass',
            47: 'cup',
            48: 'fork',
            49: 'knife',
            50: 'spoon',
            51: 'bowl',
            52: 'banana',
            53: 'apple',
            54: 'sandwich',
            55: 'orange',
            56: 'broccoli',
            57: 'carrot',
            58: 'hot dog',
            59: 'pizza',
            60: 'donut',
            61: 'cake',
            62: 'chair',
            63: 'couch',
            64: 'potted plant',
            65: 'bed',
            67: 'dining table',
            70: 'toilet',
            72: 'tv',
            73: 'laptop',
            74: 'mouse',
            75: 'remote',
            76: 'keyboard',
            77: 'cell phone',
            78: 'microwave',
            79: 'oven',
            80: 'toaster',
            81: 'sink',
            82: 'refrigerator',
            84: 'book',
            85: 'clock',
            86: 'vase',
            87: 'scissors',
            88: 'teddy bear',
            89: 'hair drier',
            90: 'toothbrush'
        }
        # Tensorflow to QuickDraw HashMap
        self.tensorflow_to_quickdraw_hash = {
            'background': '',
            'person': 'face',
            'bicycle': 'bicycle',
            'car': 'car',
            'motorcycle': 'motorbike',
            'airplane': 'airplane',
            'bus': 'bus',
            'train': 'train',
            'truck': 'truck',
            'boat': 'sailboat',
            'traffic light': 'traffic light',
            'fire hydrant': 'fire hydrant',
            'stop sign': 'stop sign',
            'parking meter': '',
            'bench': 'bench',
            'bird': 'bird',
            'cat': 'cat',
            'dog': 'dog',
            'horse': 'horse',
            'sheep': 'sheep',
            'cow': 'cow',
            'elephant': 'elephant',
            'bear': 'bear',
            'zebra': 'zebra',
            'giraffe': 'giraffe',
            'backpack': 'backpack',
            'umbrella': 'umbrella',
            'handbag': 'purse',
            'tie': 'bowtie',
            'suitcase': 'suitcase',
            'frisbee': 'circle',
            'skis': '',
            'snowboard': '',
            'sports ball': 'soccer ball',
            'kite': 'scissors',
            'baseball bat': 'baseball bat',
            'baseball glove': '',
            'skateboard': 'skateboard',
            'surfboard': '',
            'tennis racket': 'tennis racquet',
            'bottle': 'wine bottle',
            'wine glass': 'wine glass',
            'cup': 'cup',
            'fork': 'fork',
            'knife': 'knife',
            'spoon': 'spoon',
            'bowl': '',
            'banana': 'banana',
            'apple': 'apple',
            'sandwich': 'sandwich',
            'orange': '',
            'broccoli': 'broccoli',
            'carrot': 'carrot',
            'hot dog': 'hot dog',
            'pizza': 'pizza',
            'donut': 'donut',
            'cake': 'cake',
            'chair': 'chair',
            'couch': 'couch',
            'potted plant': 'house plant',
            'bed': 'bed',
            'dining table': 'table',
            'toilet': 'toilet',
            'tv': 'television',
            'laptop': 'laptop',
            'mouse': 'mouse',
            'remote': 'remote control',
            'keyboard': 'keyboard',
            'cell phone': 'cell phone',
            'microwave': 'microwave',
            'oven': 'oven',
            'toaster': 'toaster',
            'sink': 'sink',
            'refrigerator': '',
            'book': 'book',
            'clock': 'clock',
            'vase': 'vase',
            'scissors': 'scissors',
            'teddy bear': 'teddy-bear',
            'hair drier': '',
            'toothbrush': 'toothbrush'
        }

    def detect_object(self, img):
        # Input image
        img = cv2.cvtColor(numpy.array(img), cv2.COLOR_BGR2RGB)
        img_height, img_width, channels = img.shape

        # Use the given image as input, which needs to be blob(s).
        self.tensorflowNet.setInput(
            cv2.dnn.blobFromImage(img,
                                  size=(300, 300),
                                  swapRB=True,
                                  crop=False))

        # Runs a forward pass to compute the net output.
        networkOutput = self.tensorflowNet.forward()

        # For mask rcnn model
        # networkOutput, mask = self.tensorflowNet.forward(["detection_out_final", "detection_masks"])
        '''
        Loop over the detected objects
            Detection Indexes:
            0: (not used)
            1: the identifier of the object's class ex. 5 = 'airplane'
            2: the accuracy (score) of the object detected
            3: dist of object from the left
            4: dist of object from the top
            5: dist of object from the right
            6: dist of object from the bottom
        '''

        for detection in networkOutput[0, 0]:

            score = float(detection[2])
            if score > 0.5:

                # Object dimensions
                obj_left = detection[3] * img_width
                obj_top = detection[4] * img_height
                obj_right = detection[5] * img_width
                obj_bottom = detection[6] * img_height

                # Object name, scale, and x,y offet
                name_of_object = self.classNames[detection[1]]
                xy_scale = self.get_object_scale(obj_left, obj_right, obj_top,
                                                 obj_bottom, img_width,
                                                 img_height)
                xy_normalized = self.normalize_object_coordinates(
                    obj_left, obj_top, img_width, img_height)
                strokes = self.get_quickdraw_drawing(
                    self.tensorflow_to_quickdraw_hash[name_of_object])

                if strokes is not None:
                    self.detected_objects.append({
                        "name": name_of_object,
                        "scale": xy_scale,
                        "normalized": xy_normalized,
                        "img_width": img_width,
                        "img_height": img_height,
                        "strokes": strokes
                    })

                # Check for a person to be detected
                if detection[1] == 1:
                    self.person_detected(obj_left, obj_right, obj_top,
                                         obj_bottom, img_width, img_height)

                # draw a red rectangle around detected objects
                cv2.rectangle(img, (int(obj_left), int(obj_top)),
                              (int(obj_right), int(obj_bottom)), (0, 0, 255),
                              thickness=8)

        # Resize the image
        # scaled_width = 1000
        # scaled_height = int(scaled_width * img_height / img_width)
        # img = cv2.resize(img, (scaled_width, scaled_height), interpolation = cv2.INTER_AREA)

        object_info = self.detected_objects

        # Empty the detected objects for the next call
        self.detected_objects = []

        return img, object_info

    def person_detected(self, obj_left, obj_right, obj_top, obj_bottom,
                        img_width, img_height):
        # Remove face from detected objects
        object_data = self.detected_objects.pop()

        # Calculate new top and bottom heights
        face_bottom = obj_bottom - ((obj_bottom - obj_top) * (2 / 3))
        shirt_bottom = obj_bottom - ((obj_bottom - obj_top) * (1 / 3))
        pants_bottom = obj_bottom
        face_top = obj_top
        shirt_top = obj_top + ((obj_bottom - obj_top) * (1 / 3))
        pants_top = obj_top + ((obj_bottom - obj_top) * (2 / 3))

        # Scale
        face_scale = self.get_object_scale(obj_left, obj_right, face_top,
                                           face_bottom, img_width, img_height)
        shirt_scale = self.get_object_scale(obj_left, obj_right, shirt_top,
                                            shirt_bottom, img_width,
                                            img_height)
        pants_scale = self.get_object_scale(obj_left, obj_right, pants_top,
                                            pants_bottom, img_width,
                                            img_height)

        # Normalize
        face_normalize = self.normalize_object_coordinates(
            obj_left, face_top, img_width, img_height)
        shirt_normalize = self.normalize_object_coordinates(
            obj_left, shirt_top, img_width, img_height)
        pants_normalize = self.normalize_object_coordinates(
            obj_left, pants_top, img_width, img_height)

        # Strokes
        face_strokes = object_data["strokes"]
        shirt_strokes = self.get_quickdraw_drawing("t-shirt")
        pants_strokes = self.get_quickdraw_drawing("pants")

        # Add objects
        self.detected_objects.append({
            "name": "face",
            "scale": face_scale,
            "normalized": face_normalize,
            "img_width": img_width,
            "img_height": img_height,
            "strokes": face_strokes
        })
        self.detected_objects.append({
            "name": "t-shirt",
            "scale": shirt_scale,
            "normalized": shirt_normalize,
            "img_width": img_width,
            "img_height": img_height,
            "strokes": shirt_strokes
        })
        self.detected_objects.append({
            "name": "pants",
            "scale": pants_scale,
            "normalized": pants_normalize,
            "img_width": img_width,
            "img_height": img_height,
            "strokes": pants_strokes
        })

    def get_object_scale(self, obj_left, obj_right, obj_top, obj_bottom,
                         img_width, img_height):
        scale_x = (obj_right - obj_left) / img_width
        scale_y = (obj_bottom - obj_top) / img_height
        return [scale_x, scale_y]

    def normalize_object_coordinates(self, obj_left, obj_top, img_width,
                                     img_height):
        x_normalized = obj_left / img_width
        y_normalized = obj_top / img_height
        return [x_normalized, y_normalized]

    '''
    Example Input:
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~
    Lets say we want to normalize the cartoon point of (100,100) ->
        cartoon_img_width = 255
        cartoon_img_height = 255
        img_width = 2000
        img_height = 1300
        canvas_width = 1200
        canvas_height = 700
        img_scale_x = found w/ get_object_scale() = 0.30
        img_scale_y = found w/ get_object_scale() = 0.98
        obj_left = 1000
        obj_top = 20
        obj_bottom = 1300
        obj_right = 1600
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~
        Determining the normalized coordinates for the cartoon drawing:
            - Call normalize_object_coordinates() to get the normalized x and y coordinates for the object: x_norm = 0.5 | y_norm = 0.015
            - x point scaled and normalized = ((cartoon_point_x / cartoon_img_width) * (img_scale_x * img_width)) + (x_norm*canvas_width) -> ((100/255)*.30*1200) + (.5*1200)
            - y point scaled and normalized = ((cartoon_point_y / cartoon_img_height) * (img_scale_y * img_height)) + (y_norm*canvas_height) -> ((100/255)*0.98*700) + (.015*700)

    '''

    def get_quickdraw_drawing(self, name):
        # Initialize a QuickDraw object to access the API
        if name != "":
            cur_object = self.qd.get_drawing(name)
        else:
            print("Not a valid QuickDraw image!")
            return None
        return cur_object.strokes
Пример #21
0
    def __init__(self):
        # Load a model imported from Tensorflow.
        frozen_ssd_model = os.path.join(os.path.dirname(__file__), '../models',
                                        'ssd_mobilenet_model',
                                        'frozen_inference_graph.pb')
        ssd_config = os.path.join(os.path.dirname(__file__), '../models',
                                  'ssd_mobilenet_model',
                                  'ssd_mobilenet_v1_coco_2017_11_17.pbtxt')
        self.tensorflowNet = cv2.dnn.readNetFromTensorflow(
            frozen_ssd_model, ssd_config)

        # List of dictionaries containg detected objects information.
        self.detected_objects = []

        # QuckDraw object
        cache_path = os.path.join(os.path.dirname(__file__), 'quickdrawcache')
        self.qd = QuickDrawData(recognized=True,
                                max_drawings=1000,
                                cache_dir=cache_path)

        # Label HashMap
        self.classNames = {
            0: 'background',
            1: 'person',
            2: 'bicycle',
            3: 'car',
            4: 'motorcycle',
            5: 'airplane',
            6: 'bus',
            7: 'train',
            8: 'truck',
            9: 'boat',
            10: 'traffic light',
            11: 'fire hydrant',
            13: 'stop sign',
            14: 'parking meter',
            15: 'bench',
            16: 'bird',
            17: 'cat',
            18: 'dog',
            19: 'horse',
            20: 'sheep',
            21: 'cow',
            22: 'elephant',
            23: 'bear',
            24: 'zebra',
            25: 'giraffe',
            27: 'backpack',
            28: 'umbrella',
            31: 'handbag',
            32: 'tie',
            33: 'suitcase',
            34: 'frisbee',
            35: 'skis',
            36: 'snowboard',
            37: 'sports ball',
            38: 'kite',
            39: 'baseball bat',
            40: 'baseball glove',
            41: 'skateboard',
            42: 'surfboard',
            43: 'tennis racket',
            44: 'bottle',
            46: 'wine glass',
            47: 'cup',
            48: 'fork',
            49: 'knife',
            50: 'spoon',
            51: 'bowl',
            52: 'banana',
            53: 'apple',
            54: 'sandwich',
            55: 'orange',
            56: 'broccoli',
            57: 'carrot',
            58: 'hot dog',
            59: 'pizza',
            60: 'donut',
            61: 'cake',
            62: 'chair',
            63: 'couch',
            64: 'potted plant',
            65: 'bed',
            67: 'dining table',
            70: 'toilet',
            72: 'tv',
            73: 'laptop',
            74: 'mouse',
            75: 'remote',
            76: 'keyboard',
            77: 'cell phone',
            78: 'microwave',
            79: 'oven',
            80: 'toaster',
            81: 'sink',
            82: 'refrigerator',
            84: 'book',
            85: 'clock',
            86: 'vase',
            87: 'scissors',
            88: 'teddy bear',
            89: 'hair drier',
            90: 'toothbrush'
        }
        # Tensorflow to QuickDraw HashMap
        self.tensorflow_to_quickdraw_hash = {
            'background': '',
            'person': 'face',
            'bicycle': 'bicycle',
            'car': 'car',
            'motorcycle': 'motorbike',
            'airplane': 'airplane',
            'bus': 'bus',
            'train': 'train',
            'truck': 'truck',
            'boat': 'sailboat',
            'traffic light': 'traffic light',
            'fire hydrant': 'fire hydrant',
            'stop sign': 'stop sign',
            'parking meter': '',
            'bench': 'bench',
            'bird': 'bird',
            'cat': 'cat',
            'dog': 'dog',
            'horse': 'horse',
            'sheep': 'sheep',
            'cow': 'cow',
            'elephant': 'elephant',
            'bear': 'bear',
            'zebra': 'zebra',
            'giraffe': 'giraffe',
            'backpack': 'backpack',
            'umbrella': 'umbrella',
            'handbag': 'purse',
            'tie': 'bowtie',
            'suitcase': 'suitcase',
            'frisbee': 'circle',
            'skis': '',
            'snowboard': '',
            'sports ball': 'soccer ball',
            'kite': 'scissors',
            'baseball bat': 'baseball bat',
            'baseball glove': '',
            'skateboard': 'skateboard',
            'surfboard': '',
            'tennis racket': 'tennis racquet',
            'bottle': 'wine bottle',
            'wine glass': 'wine glass',
            'cup': 'cup',
            'fork': 'fork',
            'knife': 'knife',
            'spoon': 'spoon',
            'bowl': '',
            'banana': 'banana',
            'apple': 'apple',
            'sandwich': 'sandwich',
            'orange': '',
            'broccoli': 'broccoli',
            'carrot': 'carrot',
            'hot dog': 'hot dog',
            'pizza': 'pizza',
            'donut': 'donut',
            'cake': 'cake',
            'chair': 'chair',
            'couch': 'couch',
            'potted plant': 'house plant',
            'bed': 'bed',
            'dining table': 'table',
            'toilet': 'toilet',
            'tv': 'television',
            'laptop': 'laptop',
            'mouse': 'mouse',
            'remote': 'remote control',
            'keyboard': 'keyboard',
            'cell phone': 'cell phone',
            'microwave': 'microwave',
            'oven': 'oven',
            'toaster': 'toaster',
            'sink': 'sink',
            'refrigerator': '',
            'book': 'book',
            'clock': 'clock',
            'vase': 'vase',
            'scissors': 'scissors',
            'teddy bear': 'teddy-bear',
            'hair drier': '',
            'toothbrush': 'toothbrush'
        }
Пример #22
0
from flask import Flask, render_template, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from quickdraw import QuickDrawData
from sqlalchemy import func, cast, Float, ForeignKey
from sqlalchemy.dialects.postgresql import JSONB

config = configparser.ConfigParser()
config.read_file(open("config.ini"))
z = config["general"].getfloat("confidence_z")  # 1.96 => 0.975 confidence

app = Flask(__name__)
app.config.from_pyfile("app.cfg")
app.secret_key = os.urandom(32)

db = SQLAlchemy(app)
qd = QuickDrawData(cache_dir=config["general"]["dataset_dir"],
                   print_messages=False)


def uuid_gen() -> str:
    return str(uuid.uuid4()).replace("-", "")


# https://www.evanmiller.org/how-not-to-sort-by-average-rating.html
def ci_lower_bound(pos, neg):
    n = pos + neg
    if n == 0:
        return 0
    return ((pos + z**2 / 2) / n - z *
            ((pos * neg) / n + z**2 / 4)**0.5 / n) / (1 + z**2 / n)

Пример #23
0
from quickdraw import QuickDrawData

if __name__ == '__main__':
    quickdraw = QuickDrawData()
    print(quickdraw.drawing_names)
    n = 10
    for i in range(n * 2):
        draw = quickdraw.get_drawing("bicycle")
        draw.image.save("datasets/trainA/train_bicycle{}.gif".format(i))
    for i in range(n):
        draw = quickdraw.get_drawing("bicycle")
        draw.image.save("datasets/testA/test_bicycle{}.gif".format(i))
    for i in range(n * 2):
        draw = quickdraw.get_drawing("campfire")
        draw.image.save("datasets/trainB/train_campfire{}.gif".format(i))
    for i in range(n):
        draw = quickdraw.get_drawing("campfire")
        draw.image.save("datasets/testB/test_campfire{}.gif".format(i))
Пример #24
0
def draw_objects(request):
    qd = QuickDrawData()
    objects = request.GET.get('objects')
    objects = objects.replace("'", "")
    list = objects.strip("][").split(', ')
    return render(request, 'polls/drawing.html', {'object': list[0]})
def main(): 
    qgd = QuickDrawData()
    label_names = qgd.drawing_names
    total_num_labels = len(label_names)
    im_res = 255


    batch_size = 100
    label_list = ["ant", "bear", "bee", "bird", "butterfly", "cat", "cow", "crab", "crocodile", "dog"]
    train_iter, test_iter = load_data(label_list=label_list, batch_size=batch_size)


    # Training

    class Reshape(nn.Module):
        def forward(self, x):
            return x.view(-1,65025)


    net = nn.Sequential(nn.Flatten(),
                        nn.Linear(65025, 1000),
                        nn.ReLU(),
                        nn.Linear(1000, 10))
    def init_weights(m):
        if type(m) == nn.Linear:
            torch.nn.init.normal_(m.weight, std=0.01)
    net.apply(init_weights)
    device = torch.device("cuda:0")
    net.to(device)
    optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
    loss = nn.CrossEntropyLoss()
    num_epochs = 20

    for epoch in range(num_epochs):
        train_loss_sum = train_num_correct_preds = num_examples = 0
        for i, (X, y) in enumerate(train_iter):
            # show_images(X.reshape(batch_size, im_res, im_res), 2, 10, [label_list[int(i.item())] for i in y])
            net.train()
            optimizer.zero_grad()
            X, y = X.to(device), y.to(device)
            y_hat = net(X)
            l = loss(y_hat, y)
            # print(f"loss={l:.3f}", y_hat, y, X.shape, X.reshape(-1, 65025).shape)
            # break
            l.backward()
            optimizer.step()
            with torch.no_grad():
                train_loss_sum += l * X.shape[0]
                if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
                    y_hat = y_hat.argmax(axis=1)    
                cmp = y_hat.type(y.dtype) == y
                train_num_correct_preds += float(cmp.type(y.dtype).sum())
                num_examples += X.shape[0]
            train_loss = train_loss_sum / num_examples
            train_acc = train_num_correct_preds / num_examples
            if (i + 1) % 10 == 0:
                print(f"epoch {epoch + 1}, iteration {i + 1}: train_loss={train_loss:.3f}, train_acc={train_acc:.3f}")

        test_acc = eval_test_acc(net, test_iter, device)
        print(f"epoch {epoch + 1}: test_acc={test_acc:.3f}")
    print(f"Finished training: train_loss={train_loss:.3f}, train_acc={train_acc:.3f}")
from quickdraw import QuickDrawData

qd = QuickDrawData()

anvil = qd.get_drawing("anvil")
stroke_no = 0
for stroke in anvil.strokes:
    for x, y in stroke:
        # x = xy[0]
        # y = xy[1]
        print("stroke={} x={} y={}".format(stroke_no, x, y))
    stroke_no += 1
def visualize_the_matrix(number_of_categories):
    for obj in matrix_info:
        row_source = obj[0]
        for i in range(3):
            column_target = (obj[1])[i]
            the_id = (obj[2])[i]
            most_similar_source_id = (obj[3])[i]

            # Find the most similar target sketches.
            qd = QuickDrawData()
            doodle = qd.get_drawing(column_target)
            while not doodle.key_id == the_id:
                doodle = qd.get_drawing(column_target)
            found = True
            if found:
                DEFAULT_SIZE_WHITE_CHANNEL = (300, 300, 1)
                canvas = np.ones(DEFAULT_SIZE_WHITE_CHANNEL,
                                 dtype="uint8") * 255
                cv2.namedWindow('Window')
                my_stroke_list = doodle.image_data
                for N in my_stroke_list:
                    x_list = N[0]
                    y_list = N[1]
                    for point in range((len(x_list) - 1)):
                        cv2.line(canvas, (x_list[point], y_list[point]),
                                 (x_list[point + 1], y_list[point + 1]),
                                 (0, 0, 0), 2)
                cv2.imwrite(
                    "./png_sketches/trial_pngs" + "source_" + row_source +
                    "_" + str(i) + ".png", canvas)

        for i in range(3):
            column_target = (obj[1])[i]
            the_id = (obj[2])[i]
            most_similar_source_id = (obj[3])[i]

            # Find the most similar target sketches.
            qd = QuickDrawData()
            doodle = qd.get_drawing(row_source)
            while not doodle.key_id == most_similar_source_id:
                doodle = qd.get_drawing(row_source)
            found = True
            if found:
                DEFAULT_SIZE_WHITE_CHANNEL = (300, 300, 1)
                canvas = np.ones(DEFAULT_SIZE_WHITE_CHANNEL,
                                 dtype="uint8") * 255
                cv2.namedWindow('Window')
                my_stroke_list = doodle.image_data
                for N in my_stroke_list:
                    x_list = N[0]
                    y_list = N[1]
                    for point in range((len(x_list) - 1)):
                        cv2.line(canvas, (x_list[point], y_list[point]),
                                 (x_list[point + 1], y_list[point + 1]),
                                 (0, 0, 0), 2)
                cv2.imwrite(
                    "./png_sketches/trial_pngs" + "source_" + row_source +
                    "_" + str(i) + "_sourceimage.png", canvas)

    excel = xlsxwriter.Workbook("./png_sketches/" + "matrix_six_with_targets" +
                                ".xlsx")
    worksheet = excel.add_worksheet()
    worksheet.set_default_row(300)
    worksheet.set_column('A:ZZ', 50)

    row_index = -1
    column_empty = True

    for source_categ in reference_dict.keys():
        row_index += 1
        column_index = -1
        for ordered in range(6):
            column_index += 1
            if ordered < 3:
                pic = "./png_sketches/trial_pngs" + "source_" + source_categ + "_" + str(
                    ordered) + ".png"
                worksheet.insert_image(row_index + 1, column_index + 1, pic)

                if column_empty:
                    worksheet.write(0, column_index + 1,
                                    ("target_similar" + str(ordered + 1)))
            else:
                pic = "./png_sketches/trial_pngs" + "source_" + source_categ + "_" + str(
                    ordered - 3) + "_sourceimage.png"
                worksheet.insert_image(row_index + 1, column_index + 1, pic)

                if column_empty:
                    worksheet.write(0, column_index + 1,
                                    ("source_similar_to" + str(ordered - 2)))

        column_empty = False
        worksheet.write(row_index + 1, 0, source_categ)

    excel.close()
Пример #28
0
def test_get_drawing_group():
    qd = QuickDrawData()
    assert isinstance(qd.get_drawing_group("anvil"), QuickDrawDataGroup)
def getTree():
    qd = QuickDrawData()
    return qd.get_drawing("tree")
Пример #30
0
def test_drawing_names():
    qd = QuickDrawData()
    assert len(qd.drawing_names) == 345