def test_flip_data(self):
     """
     In the toy dataset we have only 7 pictures classified
     as "right". This test checks if the flip function is working
     adding new 7 images with label "left" (2) to the dataset.
     """
     aug_data, aug_labels = extend_dataset_flip_axis(self.data, self.labels)
     data_expected_shape = (25 + 7,
                            self.width * self.height * self.channels)
     self.assertEqual(aug_data.shape, data_expected_shape)
     self.assertEqual(aug_labels.shape, (25 + 7, 1))
     self.assertEqual(np.uint8, aug_labels.dtype)
     self.assertEqual(np.uint8, aug_data.dtype)
     one_right_image = 0
     one_left_image = 25
     original_image = get_image(self.data[one_right_image])
     original_image = np.flip(original_image, axis=1)
     fliped_image = get_image(aug_data[one_left_image])
     condition = np.all(np.equal(original_image, fliped_image))
     msg = "images: {} (orignal) and {} (augmentaded) are not equal".format(
         one_right_image, one_left_image)  # noqa
     self.assertTrue(condition, msg=msg)
     only_left = aug_labels[25:25 + 7]
     only_left = only_left.flatten()
     self.assertEqual(np.min(only_left), np.max(only_left))
Пример #2
0
 def __init__(self, text, func, topleft):
     self.text = text
     self.func = func
     self.image = util.get_image(util.BUTTON)
     self.pushed_image = util.get_image(util.PUSHED_BUTTON)
     self.rect = self.image.get_rect()
     self.rect.topleft = topleft
     self.pushed = 0
Пример #3
0
 def __init__(self,text,func,topleft):
     self.text = text
     self.func = func
     self.image = util.get_image(util.BUTTON)
     self.pushed_image = util.get_image(util.PUSHED_BUTTON)
     self.rect = self.image.get_rect()
     self.rect.topleft = topleft
     self.pushed = 0
	def get_items(self):
		for story in self.stories:
			#类型判断,1原创2转播3评论4对话5视频6音乐9心情12赞
			self.type=1
			try:
				isquotation = story.find_element_by_class_name('replyBox')
				self.type=2
			except:
				None
			try:
				isComm = story.find_element_by_class_name('feedComm')
				self.type=3
			except:
				None
			self.cid = story.get_attribute("id")
			self.tid = story.get_attribute("rel")
			try:
				self.author = story.find_element_by_class_name('userName').find_element_by_tag_name('a').get_attribute('title')
			except:
				print('出现灵异事件?')
				print('----------------------------------------------------------------------------------')
				util.add_log('出现灵异事件'+self.cid)
				continue
			self.time, self.qtime = util.get_time(story)
			
			self.content = util.analyse_content_html(self,story)
			
			#打印信息
			print('CID:', self.cid)
			print('TID:', self.tid)
			if self.type == 1:
				print('类型:原创')
			else:
				if self.type == 2:
					print('类型:转播')
				else:
					print('类型:评论')
			print('作者:', self.author)
			print('内容:', self.content)
			print('时间:', self.time)
			#心情模块
			self.mood = util.get_mood(self, story)
			#视频模块
			util.analyze_video(self,story)
			if self.type!=1:#转评作者内容时间
				self.qauthor , self.qcontent = util.get_quotation_html(self,story)
				if self.qtime !="":
					print('原文时间:', self.qtime)
			else:
				self.qauthor = ""
				self.qcontent = ""
			util.get_image(self,story)
			self.location , self.longitude , self.latitude = util.get_loc(story)#定位
			util.sql_insert(self)
			print('----------------------------------------------------------------------------------')
Пример #5
0
 def __init__(self, id, topleft):
     self.id = id
     self.row = string.atoi(id[0])
     self.col = id[1]
     self.set_color(util.EMPTY)
     self.depth = 0
     self.rect = self.image.get_rect()
     self.highlight = util.get_image(util.HIGHLIGHT)
     self.target = util.get_image(util.TARGET)
     self.dvonn_marker = util.get_image(util.DVONN)
     self.rect.topleft = topleft
     self.dvonn = 0
Пример #6
0
 def __init__(self, id, topleft):
     self.id = id
     self.row = string.atoi(id[0])
     self.col = id[1]
     self.set_color(util.EMPTY)
     self.depth = 0
     self.rect = self.image.get_rect()
     self.highlight = util.get_image(util.HIGHLIGHT)
     self.target = util.get_image(util.TARGET)
     self.dvonn_marker = util.get_image(util.DVONN)
     self.rect.topleft = topleft
     self.dvonn = 0
Пример #7
0
    def __getitem__(self, index):

        # Get z_test noise
        z_test = self.z_tests[index]
        
        # Get target image
        target_name = self.targets[index]
        target = util.get_image(self.target_dir,
                                target_name)

        # Mask image
        mask_name = self.masks[index]
        mask = util.get_image(self.mask_dir,
                              mask_name)
        
        return z_test, target, mask
Пример #8
0
 def create_labeltest_data(self):
     i = 0
     imgdatas = []
     imglabels = []
     imgs = glob.glob(
         self.test_path + "/*" +
         self.img_type)  #get a list of images' full names(including path)
     for imgname in imgs:
         midname = imgname[imgname.rindex("/") + 1:-4]  #get image name
         #            img=img_to_array(load_img(self.data_path+"/"+midname))
         img, img_h, img_w = util.get_image(self.test_path + "/" + midname +
                                            self.img_type)
         #            label=img_to_array(load_img(self.label_path+"/"+midname))
         label, y_h, y_w = util.get_test_label(self.test_label_path + "/" +
                                               midname +
                                               self.annot_img_type)
         imgdatas.append(img)
         imglabels.append(label)
         if i % 100 == 0:
             print('Done:{0}/{1} images'.format(i, len(imgs)))
         i += 1
     imgdatas = np.array(imgdatas, dtype=np.uint8)
     imglabels = np.array(imglabels, dtype=np.uint8)
     print("loading done")
     np.save(self.npy_path + '/imgs_labeltest.npy', imgdatas)
     np.save(self.npy_path + '/imgs_mask_test.npy', imglabels)
     print('Saving to npy files done.')
def get_checkerboard_interactive(camera, cols, rows):
    """
    Draws chessboard corners live. Returns when space (" ") is pressed.

    Returns:
    image of the checkerboard,
    gray_scale image of the checkerbaord,
    list pf corners of the checkerboard in image.
    """
    corners = None
    found_checkerboard = False
    # Get checkerboard interactively
    while True:
        image = util.get_image(camera)
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        found_checkerboard, corners = cv2.findChessboardCorners(
            gray_image, (cols, rows), None
        )
        if found_checkerboard:
            cv2.drawChessboardCorners(image, (cols, rows), corners, True)
        cv2.imshow("Checkerboard Calibration", image)
        if cv2.waitKey(1) & 0xFF == ord(" ") and found_checkerboard:
            break
        
    return image, gray_image, corners
Пример #10
0
def transfor_dataset_with_one_channel(data,
                                      transformation,
                                      height=90,
                                      width=160,
                                      channels=3):
    """
    Create a new dataset by applying a function "transformation"
    available at vision.image_manipulation.
    Returns a new dataset and the new shape of its contents.
    The new shape will have only height and width.

    :param transformation: function
    :type transformation: np.array -> np.array
    :param data: dataset
    :type data: np.array
    :param height: image height
    :type height: int
    :param width: image width
    :type width: int
    :param channels: image channels
    :type channels: int
    :return: transformed dataset, shape
    :rtype: np.array, tuple
    """
    new_dataset = []
    new_shape = ()
    for i in range(data.shape[0]):
        image = get_image(data[i], height, width, channels)
        new_image = transformation(image)
        if new_shape == ():
            new_shape = new_image.shape
        new_image = new_image.reshape(get_flat_shape(new_image))
        new_dataset.append(new_image)
    new_dataset = np.array(new_dataset).astype('uint8')
    return new_dataset, new_shape
Пример #11
0
def get_image_on_keypress(camera):
    image = None
    while True:
        image = util.get_image(camera)
        cv2.imshow("Raw Image", image)
        if cv2.waitKey(1) & 0xFF == ord("q"):
            break
    return image
Пример #12
0
def find_search_bar():
    """
    :return: pair of coordinates to click on search bar.
    """

    # set threshold
    #binarized_img = binarize_image(search_bar, THRESHOLD)
    #sample_text = pytesseract.image_to_string(binarized_img)
    #print('sample text: ' + sample_text)
    #print('sample text similarity: ' + str(get_text_similarity(sample_text)))

    return get_coordinates(util.get_image('search_bar_es.png'))
Пример #13
0
def download_and_resize_image(url, new_width=512, new_height=512, save_path=None,
                              show=False):
    if save_path is None:
        _, save_path = tempfile.mkstemp(suffix=".jpg")
    else:
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
    pil_image = get_image(url, rotate='auto')
    pil_image = ImageOps.fit(pil_image, (new_width, new_height), Image.ANTIALIAS)
    pil_image_rgb = pil_image.convert("RGB")
    pil_image_rgb.save(save_path, format="JPEG", quality=90)
    if show:
        display_image(pil_image)
    return save_path
Пример #14
0
    def __getitem__(self, index):

        # Get input noise
        input_noise = self.input_noises[index]

        # Get target image
        target_name = self.targets[index]
        target = util.get_image(self.target_dir,
                                target_name)

        # Mask image
        mask_name = self.masks[index]
        mask = util.get_image(self.mask_dir,
                              mask_name)

        # Get z-test vec and targets
        z_test_index = self.num_z_test % self.num_targets
        z_test = self.z_tests[z_test_index]
        z_test_target_name = self.z_test_targets[z_test_index]
        z_test_target = util.get_image(self.z_test_target_dir,
                                       z_test_target_name)

        return input_noise, target, mask, z_test_target, z_test
Пример #15
0
def get_checkerboard_interactive(camera, cols, rows):
    corners = None
    found_checkerboard = False
    # Get checkerboard interactively
    while True:
        image = util.get_image(camera)
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        found_checkerboard, corners = cv2.findChessboardCorners(
            gray_image, (cols, rows), None)
        if found_checkerboard:
            cv2.drawChessboardCorners(image, (cols, rows), corners, True)
        cv2.imshow("Checkerboard Calibration", image)
        if cv2.waitKey(1) & 0xFF == ord("q"):
            break
    return image, gray_image, corners
Пример #16
0
    def preprocessing(self, lst, phase):
        labels = []
        images = []

        for file in lst:
            person = re.split('[/_.]+', file)[2]

            labels.append(self.labels_dic[person])

            img = util.get_image(file, 112, phase=phase)
            images.append(img)

        labels = np.array(labels)
        images = np.array(images)

        return images, labels
Пример #17
0
def main():

    if len(sys.argv) > 1 and \
       sys.argv[1] == "--about":
        about()
        return

    pygame.init()
    screen = pygame.display.set_mode(LAUNCH_SCREEN_RECT.size,0)
    util.initialize()
    pygame.display.set_caption("DVONN for One")
    icon = pygame.transform.scale(util.get_image("icon"),(32,32))
    pygame.display.set_icon(icon)
    clock = pygame.time.Clock()

    l = Launcher(screen,clock)
    l.run()
Пример #18
0
def main():

    if len(sys.argv) > 1 and \
       sys.argv[1] == "--about":
        about()
        return

    pygame.init()
    screen = pygame.display.set_mode(LAUNCH_SCREEN_RECT.size, 0)
    util.initialize()
    pygame.display.set_caption("DVONN for One")
    icon = pygame.transform.scale(util.get_image("icon"), (32, 32))
    pygame.display.set_icon(icon)
    clock = pygame.time.Clock()

    l = Launcher(screen, clock)
    l.run()
Пример #19
0
    def create_test_data(self):
        i=0
        f=open(self.npy_path+'/test_image_name.txt','w')
        imgdatas=[]
        imgs=glob.glob(self.test_path+"/*."+self.img_type)#get a list of images' full names(including path)
        for imgname in imgs:
            midname=imgname[imgname.rindex("/")+1:]#get image name
            img,img_h,img_w=util.get_image(self.test_path+"/"+midname)
            f.write(midname+" "+str(img_h)+" "+str(img_w)+'\n')
#            img=img_to_array(load_img(self.test_path+"/"+midname))
            imgdatas.append(img)
            if i%100 ==0:
                print('Done:{0}/{1} images'.format(i,len(imgs)))
            i+=1
        imgdatas=np.array(imgdatas,dtype=np.uint8)
        print("loading done")
        np.save(self.npy_path+'/imgs_test.npy',imgdatas)
        print('Saving to npy files done.')
        f.close
Пример #20
0
def evaluate_inversion(args, inverted_net_path):
    # Load saved inverted net
    device = 'cuda:{}'.format(
        args.gpu_ids[0]) if len(args.gpu_ids) > 0 else 'cpu'
    ckpt_dict = torch.load(inverted_net_path, map_location=device)

    # Build model, load parameters
    model_args = ckpt_dict['model_args']
    inverted_net = models.ResNet18(**model_args)
    inverted_net = nn.DataParallel(inverted_net, args.gpu_ids)
    inverted_net.load_state_dict(ckpt_dict['model_state'])

    import pdb
    pdb.set_trace()

    # Get test images (CelebA)
    initial_generated_image_dir = '/deep/group/sharonz/generator/z_test_images/'
    initial_generated_image_name = '058004_crop.jpg'
    initial_generated_image = util.get_image(initial_generated_image_dir,
                                             initial_generated_image_name)
    initial_generated_image = initial_generated_image / 255.
    intiial_generated_image = initial_generated_image.cuda()

    inverted_noise = inverted_net(initial_generated_image)

    if 'BigGAN' in args.model:
        class_vector = one_hot_from_int(207, batch_size=batch_size)
        class_vector = torch.from_numpy(class_vector)

        num_params = int(''.join(filter(str.isdigit, args.model)))
        generator = BigGAN.from_pretrained(f'biggan-deep-{num_params}')

        generator = generator.to(args.device)
        generated_image = generator.forward(inverted_noise, class_vector,
                                            args.truncation)

    # Get difference btw initial and subsequent generated image
    # Save both

    return
Пример #21
0
def porn_image():
    mode = request.query.mode
    url = request.query.url
    begin = time.time()
    filename = get_image(url)
    if filename == '00000.jpg':
        return 'Image is too large!'
    try:
        image_data = open(filename).read()
    except (HTTPError, URLError):
        return "Image can not be found!"

    after_download_image = time.time()
    # Classify.
    try:
        scores = caffe_preprocess_and_compute(
            image_data,
            caffe_transformer=caffe_transformer,
            caffe_net=nsfw_net,
            output_layers=['prob'])
    except IOError:
        return "The URL is not a image file!"

    after_mode = time.time()
    # Scores is the array containing SFW / NSFW image probabilities
    # scores[1] indicates the NSFW probability
    if mode == 'simple':
        return json.dumps({
            'sfw_score':
            scores[0],
            'nsfw_score':
            scores[1],
            'ats_image_download':
            int((after_download_image - begin) * 1000),
            'ats_model_predict':
            int((after_mode - after_download_image) * 1000)
        })
    else:
        return render(url, scores, (after_download_image - begin) * 1000,
                      (after_mode - after_download_image) * 1000)
Пример #22
0
async def on_message(message):
    if "pump-signal" in str(message.channel) and len(message.attachments) > 0:
        text = util.ocr_image(util.get_image(message.attachments[0].url))
        if (name := util.get_coin_name(text)) is not None:
            if name == "xxx" or name == "XXX":
                print("test name")
                return
            print(f"got coin {name} at {datetime.datetime.now()}")
            symbol = f"{name}BTC"
            if funds := await binance_util.get_remaining_amount(
                    binance_client, "BTC"):

                binance_util.make_market_buy(binance_client, funds, symbol)
            else:
                print("No BTC in wallet")
                return

            if buy := binance_util.get_most_recent_buy_for(
                    binance_client, symbol)["price"]:
                util.print_for_web_only(float(buy["price"]))
                binance_util.make_multiplier_sell(binance_client, symbol,
                                                  float(buy["price"]), 3)
Пример #23
0
 def __init__(self):
     self.spaces = {}
     tempImgRect = util.get_image(util.EMPTY).get_rect()
     top = INITIAL_TOP
     for row in ROW_RANGE:
         left = INITIAL_LEFT + (INITIAL_LEFT_MULT[row - 1] * tempImgRect.width)
         for col in COL_RANGE:
             if ((row == 5 and col in ("A","B")) or
                 (row == 4 and col == "A") or
                 (row == 2 and col == "K") or
                 (row == 1 and col in ("J","K"))):
                 continue
             
             id = str(row) + col
             s = Space(id, (left,top))
             self.spaces[id] = s
             left = left + tempImgRect.width
         top = top + tempImgRect.height
     self.selected = None
     self.targeted = None
     self.is_valid_id = self.spaces.has_key
     self.all_spaces = self.spaces.values()
Пример #24
0
def dataset_augmentation(data, labels, height=120, width=160, channels=3):
    """
    Augment a dataset by inserting a vertical random shadow and
    by bluring with a Gaussian convolution

    :param data: dataset
    :type data: np.array
    :param labels: labels
    :type labels: np.array
    :param width: image width
    :type width: int
    :param height: image height
    :type heights: int
    :param channels: image channels
    :type channels: int
    :return: extended images, extended labels
    :rtype: np.array, np.array
    """
    all_images = []
    all_labels = []
    size = data.shape[0]
    flat_shape = data.shape[1]
    for i in range(size):
        image = get_image(data[i], height, width, channels)
        new_image = img_mani.random_shadow(image)
        new_image = new_image.reshape(flat_shape)
        new_label = labels[i]
        all_images.append(new_image)
        all_labels.append(new_label)
        new_image = img_mani.gaussian_blur(image)
        new_image = new_image.reshape(flat_shape)
        all_images.append(new_image)
        all_labels.append(new_label)
    all_labels = np.array(all_labels).astype('uint8')
    all_labels = all_labels.reshape((all_labels.shape[0], 1))
    extended_images = np.concatenate((data, all_images), axis=0)
    extended_labels = np.concatenate((labels, all_labels), axis=0)
    return extended_images, extended_labels
Пример #25
0
    def __init__(self):
        self.spaces = {}
        tempImgRect = util.get_image(util.EMPTY).get_rect()
        top = INITIAL_TOP
        for row in ROW_RANGE:
            left = INITIAL_LEFT + (INITIAL_LEFT_MULT[row - 1] *
                                   tempImgRect.width)
            for col in COL_RANGE:
                if ((row == 5 and col in ("A", "B"))
                        or (row == 4 and col == "A")
                        or (row == 2 and col == "K")
                        or (row == 1 and col in ("J", "K"))):
                    continue

                id = str(row) + col
                s = Space(id, (left, top))
                self.spaces[id] = s
                left = left + tempImgRect.width
            top = top + tempImgRect.height
        self.selected = None
        self.targeted = None
        self.is_valid_id = self.spaces.has_key
        self.all_spaces = self.spaces.values()
Пример #26
0
def create(input_image, output_path):
    """Create an SVG from the input image."""
    conf = config.get()
    im = util.get_image(input_image)
    (width, height) = im.size
    pix = im.load()

    dwg = svgwrite.Drawing(
        output_path,
        profile='full',
        width=width,
        height=height,
        viewBox='0 0 {0} {1}'.format(width * conf['svg']['ratio'], height),
        style='font-family:\'{0}\';font-weight:900;font-size:{1}'.format(
            conf['svg']['font_family'], conf['svg']['font_size']))
    dwg.attribs['xml:space'] = 'preserve'

    for h in range(0, height):
        colors = []
        for w in range(0, width):
            try:
                colors.append(util.to_hex(pix[w, h]))
            except:
                colors.append('#FFFFFF')
                pass
        colors = [(len(list(g)), k) for k, g in groupby(colors)]
        x = 0
        for c in colors:
            t = ''
            for l in range(0, c[0]):
                t = t + code.get_char()
            text = dwg.text(t, fill='{0}'.format(c[1]))
            text.attribs['y'] = str(h)
            text.attribs['x'] = x * conf['svg']['ratio']
            dwg.add(text)
            x = x + len(t)
    dwg.save()
Пример #27
0
 def set_color(self,color):
     self.color = color
     self.image = util.get_image(self.color)
Пример #28
0
    for vector in b:
        counter += 1
        if counter == 286 or counter == 631 or counter == 1101 or counter == 1601 or counter == 2001:
            played_voice = 0
        print(counter)
        frames += 1

        # Get images
        # video_cv(freenect.sync_get_video()[0] gets single frame of the kinect
        #    webcam_img = get_image(video_cv(freenect.sync_get_video()[0]), args.cam_width, args.cam_height)
        webcam_img = transform_image(video_cv(freenect.sync_get_video()[0]),
                                     args.cam_width, args.cam_height)
        #        webcam_img = transform_image(video_cv(freenect.sync_get_video()[0]), args.cam_height, args.cam_width)
        #    target_img = get_image(target, args.cam_width, args.cam_height)
        #        webcam_img = video_cv(freenect.sync_get_video()[0])
        skeleton_img = get_image(skeleton, args.cam_width, args.cam_height)
        #        print(skeleton_img.shape)
        if webcam_img is None:
            continue

        # Label images
        webcam_datum = label_img(opWrapper, webcam_img)

        # Check if OpenPose managed to label
        ordinal_score = ('', 0.0, (0, 0, 0))
        if type(webcam_datum.poseKeypoints) == np.ndarray and \
           webcam_datum.poseKeypoints.shape == (1, 25, 3):

            # Scale, transform, normalize, reshape, predict
            coords_vec = make_vector(webcam_datum.poseKeypoints)
            input_vec = np.concatenate([coords_vec, vector]).flatten()
Пример #29
0
 def get_image(self, path):
     if type(path) != str:
         return pygame.image.load(path)
     path = self.get_image_path(path)
     return util.get_image(path)
Пример #30
0
 def clear(self):
     self.background.fill(util.get_image(util.EMPTY).get_at((0, 0)))
Пример #31
0
def main():
    args = get_args()
    BOARD_TAG_SIZE = args["board"]
    ORIGIN_TAG_SIZE = args["origin"]
    calib_file_name = args["file"]

    # offsets to reposition where (0,0) is
    x_offset = 0
    y_offset = 0

    camera = VideoCapture(0)  # Open the camera and set camera params
    if not VideoCapture.isOpened(camera):
        print("Failed to open video capture device")
        exit(0)
    camera.set(CAP_PROP_FRAME_WIDTH, 1280)
    camera.set(CAP_PROP_FRAME_HEIGHT, 720)
    camera.set(CAP_PROP_FPS, 30)

    # Get matrices from file
    calib_file, camera_matrix, dist_coeffs = get_matrices_from_file(
        calib_file_name)
    calib_file.close()
    print("Read from calibration file")
    print("CAMERA MATRIX: {}".format(camera_matrix))
    print("DIST COEFFS: {}".format(dist_coeffs))
    print()
    # Initialize the detector
    detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath())
    frame = []
    gray = []
    img_points = np.ndarray((4 * NUM_DETECTIONS, 2))
    obj_points = np.ndarray((4 * NUM_DETECTIONS, 3))
    detections = []

    print(
        "The program will now attempt to detect the 4 tags on the axis calibration board"
    )
    print(
        "The four tags should have a red circle on their centers if detected properly."
    )
    print(
        "There will also be a blue circle in the middle of the 4 tags if 4 are detected."
    )
    print("Align the blue dot with the middle of the screen.")
    print("Then, press SPACE.")
    while True:
        frame = get_image(camera)  # take a new picture

        # For weird reasons, anti-distortion measures WORSENED the problem,
        # so they have been removed. If you need to put them back,
        # this is the place for it.

        # Convert undistorted image to grayscale
        gray = cvtColor(frame, COLOR_BGR2GRAY)

        # Use the detector and compute useful values from it
        detections, det_image = detector.detect(gray, return_image=True)

        x_offset = 0
        y_offset = 0

        for i in range(len(detections)):
            d = detections[i]
            id = int(d.tag_id)

            # Add to offsets
            (ctr_x, ctr_y) = d.center
            x_offset += ctr_x
            y_offset += ctr_y

            # Draw onto the frame
            cv2.circle(frame, (int(ctr_x), int(ctr_y)), 5, (0, 0, 255), 3)

        # Draw origin
        if len(detections) == 4:
            cv2.circle(frame, (int(x_offset / 4), int(y_offset / 4)), 5,
                       (255, 0, 0), 3)
        imshow("Calibration board", frame)
        if cv2.waitKey(1) & 0xFF == ord(" "):
            break
        else:
            continue
    cv2.destroyAllWindows()

    # Compute transformation via PnP
    # TODO What's the reasoning from this math?
    # This was from a tutorial somehwhere and was directly
    # transcribed from the C++ system.
    for d in detections:
        id = int(d.tag_id)
        img_points[0 + 4 * id] = d.corners[0]
        img_points[1 + 4 * id] = d.corners[1]
        img_points[2 + 4 * id] = d.corners[2]
        img_points[3 + 4 * id] = d.corners[3]
        a = (id % 2) * 2 + 1
        b = -((id / 2) * 2 - 1)
        # 8.5 and 11 are letter paper dimensions!
        x1 = -0.5 * BOARD_TAG_SIZE + a * 8.5 * 0.5
        x2 = 0.5 * BOARD_TAG_SIZE + a * 8.5 * 0.5
        y1 = -0.5 * BOARD_TAG_SIZE + b * 11 * 0.5
        y2 = 0.5 * BOARD_TAG_SIZE + b * 11 * 0.5
        obj_points[0 + 4 * id] = (x1, y1, 0.0)
        obj_points[1 + 4 * id] = (x2, y1, 0.0)
        obj_points[2 + 4 * id] = (x2, y2, 0.0)
        obj_points[3 + 4 * id] = (x1, y2, 0.0)

    # Make transform matrices
    ret, rvec, tvec = solvePnP(obj_points, img_points, camera_matrix,
                               dist_coeffs)
    dst, jac = Rodrigues(rvec)

    # Make origin to camera matrix
    """
    The origin_to_camera matrix looks like this:

    dst[0,0] dst[0,1] dst[0,2] tvec[0,0]
    dst[1,0] dst[1,1] dst[1,2] tvec[1,0]
    dst[2,0] dst[2,1] dst[2,2] tvec[2,0]
    0           0       0       1
    """
    temp = np.append(dst, tvec, axis=1)
    temp = np.append(temp, np.array([[0, 0, 0, 1]]), axis=0)
    origin_to_camera = np.asmatrix(temp)
    camera_to_origin = np.linalg.inv(origin_to_camera)
    print("CAMERA TO ORIGIN: {}".format(camera_to_origin))

    # Generate the location of the camera
    # Seems to use a homogenous coordinates system (x,y,z,k)
    gen_out = np.array([0, 0, 0, 1]).T
    camera_coordinates = np.matmul(camera_to_origin, gen_out)
    print("CAMERA COORDINATES: {}".format(camera_coordinates))

    # write matrix to file
    calib_file = open(calib_file_name, "a")
    rows, cols = np.shape(camera_to_origin)
    calib_file.write("transform_matrix =")
    write_matrix_to_file(camera_to_origin, calib_file)

    # Compute offsets via new calibration process
    print("Axis calibration was successful!")
    print("We will now center the camera. Place any apriltag where you would \
        like (0,0) to be.")
    print("A blue dot will appear in the center of the tag you placed to help \
        show where (0,0) will be set to.")
    print("When you have your tag in the right place, press SPACE.")

    while True:
        # Locate tag for use as origin
        frame = get_image(camera)
        gray = cvtColor(frame, COLOR_BGR2GRAY)
        detections, det_image = detector.detect(gray, return_image=True)

        if len(detections) == 0:
            continue
        (x_offset, y_offset, _,
         _) = compute_tag_undistorted_pose(camera_matrix, dist_coeffs,
                                           camera_to_origin, detections[0],
                                           ORIGIN_TAG_SIZE)
        cv2.circle(frame, (int(x_offset), int(y_offset)), 5, (255, 0, 0), 3)

        imshow("Origin tag", frame)
        if cv2.waitKey(1) & 0xFF == ord(" "):
            break
        else:
            continue

    # Write offsets
    calib_file.write("offsets = ")
    calib_file.write(str(-1 * x_offset))
    calib_file.write(" ")
    calib_file.write(str(-1 * y_offset))
    calib_file.write("\n")

    calib_file.close()
    print("Finished writing transformation matrix to calibration file")
    pass
Пример #32
0
 def get_target_shape(self, target_names):
     target_name = target_names[0]
     target = util.get_image(self.target_dir,
                             target_name)
     return target.shape
Пример #33
0
def main():
    # DEBUGGING AND TIMING VARIABLES
    past_time = -1  # time to start counting. Set just before first picture taken
    num_frames = 0  # number of frames processed

    args = get_args()
    url = args['url']
    SEND_DATA = (args['url'] != None)
    calib_file_name = args['file']
    TAG_SIZE = args['size']
    calib_file = open(calib_file_name)

    camera = VideoCapture(DEVICE_ID)  # Open the camera & set camera arams
    if (not VideoCapture.isOpened(camera)):
        print("Failed to open video capture device")
        exit(0)
    camera.set(CAP_PROP_FRAME_WIDTH, 1280)
    camera.set(CAP_PROP_FRAME_HEIGHT, 720)
    camera.set(CAP_PROP_FPS, 30)

    # Get matrices from calibration file
    print("Parsing calibration file " + calib_file_name + "...")
    calib_file, camera_matrix, dist_coeffs = get_matrices_from_file(
        calib_file_name)
    transform_matrix = get_transform_matrix(calib_file)
    x_offset, y_offset = get_offsets_from_file(calib_file)
    calib_file.close()

    assert (camera_matrix.shape == (3, 3))
    assert (dist_coeffs.shape == (1, 5))
    assert (transform_matrix.shape == (4, 4))

    # print("TRANSFORM MATRIX:\n {}\n".format(transform_matrix))
    print("Calibration file parsed successfully.")
    print("Initializing apriltag detector...")

    # make the detector
    detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath())
    frame = []
    gray = []

    print("Detector initialized")
    print("")
    print("The program will begin sending data in 3 seconds.")
    print("Press CTRL+C to stop this program.")
    time.sleep(3)
    print("Starting detection")

    img_points = np.ndarray((4, 2))  # 4 2D points
    obj_points = np.ndarray((4, 3))  # 4 3D points
    detections = []
    past_time = time.time()
    while True:
        if not camera.isOpened():
            print("Failed to open camera")
            exit(0)

        # take a picture and get detections
        frame = get_image(camera)
        gray = cvtColor(frame, COLOR_BGR2GRAY)

        # Un-distorting an image worsened distortion effects
        # Uncomment this if needed
        # dst = undistort_image(frame, camera_matrix, dist_coeffs)
        # gray = cvtColor(dst, COLOR_BGR2GRAY)
        detections, det_image = detector.detect(gray, return_image=True)
        if len(detections) == 0:
            continue  # Try again if we don't get anything

        print("Found " + str(len(detections)) + " apriltags")

        for d in detections:
            # TODO draw tag - might be better to generalize, because
            # locate_cameras does this too.

            (x, y, z, angle) = compute_tag_undistorted_pose(
                camera_matrix, dist_coeffs, transform_matrix, d, TAG_SIZE)

            # Scale the coordinates, and print for debugging
            # prints Device ID :: tag id :: x y z angle
            # TODO debug offset method - is better, but not perfect.
            x = MULT_FACTOR * (x + x_offset)
            y = MULT_FACTOR * (y + y_offset)
            # print(tag_xyz)
            print("{} :: {} :: {} {} {} {}".format(
                DEVICE_ID, d.tag_id, x, y, z, angle))

            # Send the data to the URL specified.
            # This is usually a URL to the base station.
            if SEND_DATA:
                payload = {
                    "id": d.tag_id,
                    "x": x,
                    "y": y,
                    "orientation": angle
                }
                r = requests.post(url, json=payload)
                status_code = r.status_code
                if status_code / 100 != 2:
                    # Status codes not starting in '2' are usually error codes.
                    print("WARNING: Basestation returned status code {}".format(
                        status_code))
                else:
                    num_frames += 1
                    print("Vision FPS (Vision System outflow): {}".format(
                        num_frames / (time.time() - past_time)))
    pass
Пример #34
0
 def clear(self):
     self.background.fill( util.get_image(util.EMPTY).get_at( (0,0)))
Пример #35
0
def main_with_video():
    """
    This is the main entry point of the camera calibration script.

    IMPORTANT!
    Make sure the rows entered is (# of boxes in a column) - 1,
    cols = (# of boxes in a row) - 1

    This has to do with OpenCV's computation of an
    "inner chessboard". This offset will be built in later, but
    that would require changing documentation before an implementation
    has been built.

    Once you've done that, the code will open a program that shows a
    picture of the checkerboard and any calibration lines the
    computer vision library made. Press any key other than
    ESCAPE to continue the calibration process. It is recommended
    to press ESCAPE if the camera does not see the entire board.

    The code will then compute many matrices to calibrate the camera
    and write an output in a file typically named 0.calib.

    """

    args = get_args()

    # Capture vars
    cameras = []  # Which webcam objects you are going to use
    camera_ids = []  # How to find each webcam
    NUM_CAMERAS = args["nums"]
    img_points = []  # Points that the camera percieves (we cannot set these)
    obj_points = []  # Points in physical space (we arbitrarily set these)

    # Constants
    # Prepare object points - taken from tutorials
    objp = np.zeros((args["rows"] * args["cols"], 3), np.float32)
    objp[:, :2] = np.mgrid[0:args["cols"], 0:args["rows"]].T.reshape(-1, 2)

    WIN_SIZE = (11, 11)
    ZERO_ZONE = (-1, -1)
    # TODO TERM_CRITERIA_ITER used in C++, using MAX_ITER OK?
    TERM_CRITERIA = (
        TERM_CRITERIA_EPS + TERM_CRITERIA_MAX_ITER,
        30,
        0.001,
    )  # 0.1 --> 0.001

    # Make sure each camera is accessible, and set it up if it is.
    for i in range(NUM_CAMERAS):
        camera = VideoCapture(i)  # OpenCV's way of knowing what a camera is
        img_points.insert(i, [])  # List of points that camera i sees
        obj_points.insert(i, [])  # List of points we define for camera i
        if VideoCapture.isOpened(camera):
            # Set up the camera
            camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
            camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
            camera.set(cv2.CAP_PROP_FPS, 30)
            cameras.append(camera)
            camera_ids.append(i)
            pass
        else:
            print("Failed to open video capture device " + str(i))
            print("Did you check that all cameras are plugged in and ready?")
            quit()

    # Make checkerboard points "Calibration variables"
    # Official docs says checkerboard size should be (cols, rows)
    checkerboard_size = (args["cols"], args["rows"])

    frame = []  # The image (a 2D array of 3-number tuples)
    gray = None  # The same image as Frame, but in grayscale (so 1 number per point)
    corners = []  # Checkerboard corners (object points?)

    # find the checkerboard
    for i in range(len(cameras)):
        frame.append(None)  # placeholder for the camera's image to overwrite
        while True:  # Loop until checkerboard corners are found
            # Take a picture and convert it to grayscale
            frame[i] = get_image(cameras[i])
            gray = cv2.cvtColor(frame[i], COLOR_BGR2GRAY)

            # Find the checkerboard
            flags = (CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE +
                     CALIB_CB_FAST_CHECK)
            retval, corners = cv2.findChessboardCorners(
                gray, (args["cols"], args["rows"]), None)
            if not retval:
                continue  # No checkerboard, so keep looking

            if len(corners) != 0:
                print("Found checkerboard on " + str(i))
                obj_points[i].append(objp)
                # Find and clean up the intersection points
                corners = cornerSubPix(gray, corners, WIN_SIZE, ZERO_ZONE,
                                       TERM_CRITERIA)
                # Store the points we found after cleanup
                img_points[i].append(corners)
                break
            assert frame[i].any() != None
            assert corners.any() != None
        ret = True
        # Draw corners / lines on the image to show the user
        drawChessboardCorners(frame[i], checkerboard_size, corners, ret)

    # Show the image and prompts to user for confirmation
    print("Press any key other than ESCAPE to continue")
    print("Press ESCAPE to abort calibration. If the camera can't see " +
          "the entire board, you should press ESCAPE and try again.")
    for i in camera_ids:
        print("This is what camera " + str(i) + " sees.")
        imshow(str(i), frame[i])
        key = cv2.waitKey(0)
        if key == 27:
            print("Calibration process aborted.")
            exit(0)

    # Write the calibration file
    print("Beginning calibration file creation process")
    camera_matrix = []
    dist_coeffs = []
    rvecs = []
    tvecs = []
    for i in range(len(cameras)):
        if not cameras[i].isOpened():
            continue
        if len(obj_points[i]) == 0:
            print("No checkerboards detected on camera" + str(i))
            continue

        # TODO check if calib exists first
        # Might not do this and just overwrite each time.
        print("Calibrating camera " + str(camera_ids[i]))
        obj_points[i] = np.asarray(obj_points[i])

        # See OpenCV for matrix dimension docs
        ret_r, mtx_r, dist_r, rvecs_r, tvecs_r = cv2.calibrateCamera(
            obj_points[i],
            img_points[i],
            (len(obj_points), len(obj_points[0])),
            None,
            None,
        )
        print("Writing calibration file")
        calib_file = open(str(10 + camera_ids[i]) + ".calib", "w+")
        calib_file.write("camera_matrix =")
        write_matrix_to_file(mtx_r, calib_file)
        calib_file.write("dist_coeffs =")
        write_matrix_to_file(dist_r, calib_file)
        calib_file.close()

        print("Calibration file written to " + str(camera_ids[i]) + ".calib")
Пример #36
0
    def __init__(self, main, my_id):
        tk.Toplevel.__init__(self, main)
        self.geometry('400x380' + main.get_child_window_position())
        self.iconbitmap(util.get_icon(r.ICO_SIDE))
        self.wm_protocol("WM_DELETE_WINDOW", lambda: self.quit())
        self.minsize(350, 300)
        self.maxsize(main.winfo_width() - 30, main.winfo_height() - 70)
        self.transient(main)
        self.rowconfigure(2, weight=1)
        self.columnconfigure(0, weight=1)
        self.columnconfigure(1, weight=1)
        self.columnconfigure(2, weight=1)
        self.columnconfigure(3, weight=1)
        #self.columnconfigure(4, weight=1)
        self.title('Side-by-side comparision')
        self.bind('<Configure>', main.state_child)

        self.saved = True
        self.main = main
        self.report_list = {}
        self.valid_file_list(self.get_file_list())
        self.output_name = tk.StringVar()

        # Loop of status bar messages
        msg = [
            'Select at least one HTML report to generate the side-by-side',
            'Write a new to side-by-side file or select the auto generate',
            'Only valid HTML report is presented in the list'
        ]
        self.main.status_bar.set_list(msg)

        # LABEL
        tk.Label(self, text='Select the html reports to a side by side comparison')\
            .grid(row=1, column=0, columnspan=4, padx=10, pady=5, stick='w')
        tk.Label(self, text='Side-by-side file name')\
            .grid(row=0, column=0, columnspan=2, pady=5, padx=2, stick='e')

        # ENTRY
        tk.Entry(self, textvariable=self.output_name) \
            .grid(row=0, column=2, columnspan=2, pady=5, stick='we')

        # BUTTON
        self.img = util.get_image(
            r.IMG_SPARK)  #tk.PhotoImage(file=r.IMG_SPARK)
        tk.Button(self, imag=self.img,
                  command=lambda: self._get_title(main)) \
            .grid(row=0, column=4, padx=(0, 10), sticky='w')

        # LISBOX
        self.list_values = tk.StringVar()
        self.list_box = tk.Listbox(self,
                                   listvariable=self.list_values,
                                   height=5,
                                   selectmode=tk.MULTIPLE)
        self.list_box.grid(row=2,
                           column=0,
                           columnspan=4,
                           padx=(10, 0),
                           stick='news')

        # SCROLBAR
        s = tk.Scrollbar(self, orient=tk.VERTICAL, command=self.list_box.yview)
        s.grid(row=2, column=4, padx=(0, 5), stick='ns')
        self.list_box.configure(yscrollcommand=s.set)

        # BUTTONS
        tk.Button(self, text='Clear selection', command=self.button_clear_selection)\
            .grid(row=3, column=0, padx=(10,2), pady=10, stick='we')
        tk.Button(self, text='Select all', command=self.button_list_select_all)\
            .grid(row=3, column=1, padx=2, pady=10, stick='we')
        tk.Button(self, text='Side by side report', command=self.button_report)\
            .grid(row=3, column=2, padx=2, pady=10, stick='we')
        tk.Button(self,text='Close', command=self.quit)\
            .grid(row=3, column=3, padx=2, pady=10, stick='we')

        # SIDEGRIP
        ttk.Sizegrip(self).grid(row=4, column=4, stick='e')

        list = [k for k in self.report_list.keys()]
        list.sort()
        self.list_values.set(list)