Exemple #1
0
def apply(image_path, model_name, model_path):
    transformer = ut.ComposeJoint([[transforms.ToTensor(), None],
                                   [transforms.Normalize(*ut.mean_std), None],
                                   [None, ut.ToLong()]])

    # Load best model
    model = model_dict[model_name](n_classes=2).cuda()
    model.load_state_dict(torch.load(model_path))

    # Read Image
    image_raw = imread(image_path)
    cv2.imshow("img", image_raw)
    cv2.waitKeyEx()
    collection = list(map(FT.to_pil_image, [image_raw, image_raw]))
    image, _ = transformer(collection)

    batch = {"images": image[None]}

    # Make predictions
    pred_blobs = model.predict(batch, method="blobs").squeeze()
    pred_counts = int(model.predict(batch, method="counts").ravel()[0])

    # Save Output
    save_path = "figures/_blobs_count_{}.png".format(pred_counts)

    imsave(save_path, ut.combine_image_blobs(image_raw, pred_blobs))
    print("| Counts: {}\n| Output saved in: {}".format(pred_counts, save_path))
    def __call__(self, pipe):
        self.pipe = pipe
        cv2.namedWindow(self.win_name)
        cv2.moveWindow(self.win_name, 0, 0)
        cv2.waitKeyEx(1)

        while True:
            if self.file_name is not None:
                while self.pipe.poll():  # If there's available images
                    self.file_name = self.pipe.recv(
                    )  # Read their filenames (in case each frame is saved with a unique name)
                img = cv2.imread(self.file_name)
            else:
                img = None
                while self.pipe.poll():  # If there's available images
                    img = self.pipe.recv()  # Read them all
            if img is not None:
                cv2.imshow(self.win_name, img)
            key = cv2.waitKeyEx(1)
            if key >= 0:
                self.pipe.send(key)

            if False:
                print('\t\tProcessImshow: {:.2f}ms'.format(
                    (datetime.now() - self.lastT).total_seconds() * 1000))
            self.lastT = datetime.now()
Exemple #3
0
def plates_detection_test():
    import platesdetector
    plts_detector = platesdetector.PlatesDetector()

    IMAGE = 'data/images/my_extras/one_car_plates2.jpg'
    im_name = 'one_car_plates2'
    im_reader = video_reader.Reader(IMAGE)

    ret, frame = im_reader.read()

    image_h, image_w, _ = frame.shape

    if image_h > 1080:
        frame = cv2.resize(frame, None, fx=0.5, fy=0.5)

    plates_detection_successful, plates_box, confidence = \
        plts_detector.detect_plates(frame)

    print(plates_box)

    plates_crop = crop(frame, plates_box)

    cv2.imshow('[INFO, platesdetector]: plates_image', plates_crop)

    cv2.waitKeyEx(0)

    i = 0
    cv2.imwrite(im_name + str(i) + '.jpg', plates_crop)
Exemple #4
0
def verifyImage(imgPath, showBbox, labels, logs):
    if (not os.path.isfile(imgPath)):
        logs.append("Missing cv2Image {}".format(os.path.abspath(imgPath)))
        return

    parentDir = os.path.abspath(os.path.join(imgPath, os.pardir))
    imgId = os.path.splitext(os.path.basename(imgPath))[0]
    annotationFilePath = os.path.join(parentDir, imgId + ".txt")

    if (not os.path.isfile(annotationFilePath)):
        logs.append("Missing annotation file {}".format(
            os.path.abspath(annotationFilePath)))
        return

    with open(annotationFilePath) as annotationFile:
        lines = annotationFile.read().splitlines()

        #regardless if bboxes will be shown or not - try to read in the image to see if its corrupt
        cv2Image = cv2.imread(imgPath)
        if (np.shape(cv2Image) == ()):
            logs.append("Can not read cv2Image {}".format(imgPath))
            return

        for line in lines:
            line = line.split(" ")
            labelIdx = int(line[0])
            labelText = labels[labelIdx]
            imHeight = np.shape(cv2Image)[0]
            imWidth = np.shape(cv2Image)[1]
            centerX = int(float(line[1]) * imWidth)
            centerY = int(float(line[2]) * imHeight)
            w = int(float(line[3]) * imWidth)
            h = int(float(line[4]) * imHeight)
            x = int(centerX - w / 2)
            y = int(centerY - h / 2)

            try:
                bboxImage = cv2Image[y:y + h, x:x + w]
                if (bboxImage.shape == (0, 0, 3)):
                    raise ValueError("Invalid bounding box")
                if showBbox:
                    cv2.putText(cv2Image,
                                labelText, (x, y),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                1.0, (0, 0, 255),
                                lineType=cv2.LINE_AA)
                    cv2.rectangle(cv2Image, (x, y), (x + w, y + h),
                                  (0, 255, 0), 1)

            except:
                logs.append(
                    "Invalid bounding box: {} {} {} {} in file {}".format(
                        x, y, w, h, annotationFilePath))

        if (showBbox):
            title = os.path.basename(imgPath) + "  from  " + os.path.basename(
                annotationFilePath)
            cv2.imshow(title, cv2Image)
            cv2.waitKeyEx()
            cv2.destroyWindow(title)
def main():
    aug = get_aug([
        A.HorizontalFlip(p=.5),
        A.RandomSizedBBoxSafeCrop(width=448,
                                  height=448,
                                  erosion_rate=0,
                                  interpolation=cv2.INTER_CUBIC),
        A.RGBShift(p=.5),
        A.Blur(blur_limit=5, p=0.5),
        A.RandomBrightnessContrast(p=0.5),
        A.CLAHE(p=0.5),
    ])

    voc = VOCDataset(cfg.DATASET_PATH,
                     classes_list=cfg.CLASSES,
                     image_set='train',
                     transforms=aug)

    for i in range(1000):
        image, out = voc[i]

        boxes = post_processing(out)
        im_size = image.shape[0]
        for det in boxes:
            pt1 = (det[0] - det[2] / 2, det[1] - det[3] / 2)
            pt2 = (det[0] + det[2] / 2, det[1] + det[3] / 2)
            pt1 = (int(pt1[0] * im_size), int(pt1[1] * im_size))
            pt2 = (int(pt2[0] * im_size), int(pt2[1] * im_size))

            cv2.rectangle(image, pt1, pt2, (255, 33, 44))

        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        cv2.imshow("test", image)
        cv2.waitKeyEx(-1)
def test_lmdb(dataroot, index=1):
    env = lmdb.open(dataroot,
                    readonly=True,
                    lock=False,
                    readahead=False,
                    meminit=False)
    meta_info = pickle.load(open(os.path.join(dataroot, 'meta_info.pkl'),
                                 "rb"))
    print('Name: ', meta_info['name'])
    print('Resolution: ', meta_info['resolution'])
    print('# keys: ', len(meta_info['keys']))

    # read one image
    key = meta_info['keys'][index]
    print('Reading {} for test.'.format(key))
    with env.begin(write=False) as txn:
        buf = txn.get(key.encode('ascii'))
    img_flat = np.frombuffer(buf, dtype=np.uint8)

    C, H, W = [int(s) for s in meta_info['resolution'][index].split('_')]
    img = img_flat.reshape(H, W, C)

    cv2.namedWindow('Test')
    cv2.imshow('Test', img)
    cv2.waitKeyEx()
Exemple #7
0
def main(f, canny):
    img = cv2.imread(f)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (5, 5), 0)
    blurred = cv2.GaussianBlur(blurred, (7, 7), 0)

    cv2.imshow("grey scale", gray)
    cv2.imwrite("gray.png", gray)
    cv2.imshow("blurred", blurred)
    cv2.imwrite("blur.png", blurred)
    coeff = int((blurred.max() - blurred.min()) / 100)
    if coeff == 1: coeff = 1
    else: coeff = 3
    outline = cv2.Canny(blurred, 0, int(canny) * coeff)
    outline = cv2.GaussianBlur(outline, (3, 3), 0)
    cv2.imshow("The edges", outline)
    cv2.imwrite("edges.png", outline)
    #(_, cnts, _) = cv2.findContours(outline, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    #version4
    (cnts, _) = cv2.findContours(outline, cv2.RETR_EXTERNAL,
                                 cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(img, cnts, -1, (0, 255, 0), 2)
    cv2.putText(img, str(len(cnts)), (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 1,
                (255, 0, 0), 2)
    cv2.imwrite("r.png", img)
    cv2.imshow("Result", img)
    print("%i blobs" % len(cnts))
    sleep(2)
    #cv2.waitKey(0)
    cv2.waitKeyEx(4000)
def showAnnotation(imagePath, annotationFile):
    p = PascalVocXmlParser(annotationFile)
    im = cv2.imread(imagePath)
    boxes = p.get_boxes()
    for x1, y1, x2, y2 in boxes:
        cv2.rectangle(im, (x1, y1), (x2, y2), (0, 200, 00))
    cv2.imshow("", im)
    cv2.waitKeyEx()
Exemple #9
0
def display(img, name='Image', delay=0):
    cv2.namedWindow(name, cv2.WINDOW_NORMAL)
    cv2.resizeWindow(name, int(img.shape[1] * 1.3), int(img.shape[0] * 1.3))
    cv2.imshow(name, img)
    code = cv2.waitKeyEx(delay)
    if code == 32:
        code = cv2.waitKeyEx(0)
    if code == 113:
        exit()
Exemple #10
0
 def imshow(self, im0s, ret):
     # print(im0s.shape, ret.shape)
     for *xyxy, conf, cls in ret:
         label = f'{self.names[int(cls)]} {conf:.2f}'
         plot_one_box(xyxy,
                      im0s,
                      label=label,
                      color=self.colors[int(cls)],
                      line_thickness=3)
     cv2.imshow('xx.png', im0s)
     cv2.waitKeyEx(0)
def detect_image(img_name):
    image = cv2.imread(img_name)
    assert image
    # Create a FaceDetector 
    face_detector =  pydetector.FaceDetector(model_path='./models', num_thread=1, scale=0.25)
    # Detect 
    boxes = face_detector.detect(img_bgr=image)
    if len(boxes > 0):
        for item in boxes:
            cv2.rectangle(image, (item.x, item.y), (item.x + item.width, item.y + item.height), (0, 255, 255), 2)
    cv2.imshow('face-detect', image)
    cv2.waitKeyEx(0)
    def teaching_backward(self):
        ''' This function shows the images and demovideos in order to demonstrate a backward walker.
        Also it tests whether subject understands the key that needs to be pressed when seeing this stimulus.'''

        self.show_image('Backward_before.png')
        cv2.waitKeyEx(0)

        self.play_two_demovids('RevMirror60demo.mp4')
        self.play_two_demovids('RevOrig60demo.mp4')

        self.checks_which_key('Backward_after.png', [self.Backwardkey])
        self.show_image('well_done.png')
        cv2.waitKey(500)
Exemple #13
0
 def read_pressed_key(self):
     ''' Key Listeners '''
     # to use arrow key, we have to use cv2.waitKeyEx() for Windows system, otherwise, the array key is not recognizable
     if self.platform == 'Windows':
         pressed_key = cv2.waitKeyEx(self.DELAY)
     elif self.platform == 'Linux':  # waitKeyEx(self.DELAY) & 0xFF is same to waitKey(self.DELAY)
         # do not use cv2.waitKey(), as there are few available keys
         # compared with waitKeyEx())
         pressed_key = cv2.waitKeyEx(self.DELAY)
     else:  # other system
         pressed_key = cv2.waitKey(self.DELAY)
     # print('pressed_key=', pressed_key)  # ('pressed_key=', -1) if no key is pressed.
     return pressed_key
def get_custom_pic(font, text):
    print(font)
    font = ImageFont.truetype(font, text_size)
    img = np.zeros((img_height, img_width), np.uint8)
    p_image = Image.fromarray(img, mode='L')
    draw = ImageDraw.Draw(p_image)
    draw.text(xy=(5, 2), text=text, font=font, fill=255)

    cv2.imshow("img", np.array(p_image))
    cv2.waitKeyEx(0)

    image = np.array(p_image)
    return image
def getImagemComid():
    caminhos = [os.path.join('fotos', f) for f in os.listdir('fotos')]
    #print(caminhos)
    faces = []
    ids = []
    for caminhoImagem in caminhos:
        imagemFace = cv2.cvtColor(cv2.imread(caminhoImagem),
                                  cv2.COLOR_BGR2GRAY)
        id = int(os.path.split(caminhoImagem)[-1].split('.')[1])
        ids.append(id)
        faces.append(imagemFace)
        cv2.imshow("face", imagemFace)
        cv2.waitKeyEx(10)

    return np.array(ids), faces
Exemple #16
0
def getImagesWithId(dirPath):
    imagePaths = [os.path.join(dirPath, f) for f in os.listdir(dirPath)]
    faces = []
    IDs = []

    for imagePath in imagePaths:
        faceImage = Image.open(imagePath).convert('L')
        faceNumpy = np.array(faceImage, 'uint8')
        ID = int(os.path.split(imagePath)[-1].split('.')[1])
        faces.append(faceNumpy)
        print(ID)
        IDs.append(ID)
        cv2.imshow("Training", faceNumpy)
        cv2.waitKeyEx(10)
    return np.array(IDs), faces
    def run_test(self):
        ''' This function allows subjects to respond to stimuli with keys without saving any data. 
        This is so that the subjects may get used to the experiment. 
        
        This function returns 10 different videos each time.
        '''
        shuffled_test_samples = shuffle(self.videos_df.head(10))

        for i, sample in shuffled_test_samples.iterrows():

            shuffled_filename = sample['filename']

            cap = cv2.VideoCapture(self.video_path + shuffled_filename)

            if (cap.isOpened() == False):
                print("Error opening video stream or file")
            else:
                framerate = cap.get(5)

            # Read until video is completed
            line2 = self.make_fixationpoint(np.zeros((600, 800, 3)))
            cv2.imshow('cap', line2)
            cv2.waitKeyEx(500)
            while (cap.isOpened()):
                # Capture frame-by-frame
                ret, frame = cap.read()
                if ret == True:
                    capname = "cap"

                    line2 = self.make_fixationpoint(frame)

                    #Display the resulting frame
                    cv2.namedWindow(capname, cv2.WND_PROP_FULLSCREEN)
                    cv2.setWindowProperty(capname, cv2.WND_PROP_FULLSCREEN,
                                          cv2.WINDOW_FULLSCREEN)
                    #cv2.imshow(capname, frame)
                    cv2.imshow(capname, line2)

                    if cv2.waitKeyEx(int(1000 / framerate)) != -1:
                        time.sleep(1 / framerate)
            # Break the loop
                else:
                    break
            cap.release()
            k = -1
            if k != self.Forwardkey and k != self.Backwardkey:
                k = self.checks_which_key('blank_image.png',
                                          [self.Forwardkey, self.Backwardkey])
Exemple #18
0
def play(key):
    global snakeList, img, treatX1, treatY1, score
    dir = ' '
    dead = 0
    #while idhar aayega
    img = np.zeros((300, 300, 3), np.uint8)
    cv2.rectangle(img, (treatX1, treatY1), (treatX1 + 15, treatY1 + 15),
                  (0, 255, 255), -1)
    for i in range(len(snakeList)):
        cv2.rectangle(img, (snakeList[i][0], snakeList[i][1]),
                      (snakeList[i][0] + 15, snakeList[i][1] + 15),
                      (255, 255, 255), -1)
    if snakeList[0][0] < 0 or snakeList[0][0] + 15 > 300 or snakeList[0][
            1] < 0 or snakeList[0][1] + 15 > 300:
        cv2.destroyAllWindows()
        print("game over")
        print("Final score = {}".format(score))
        dead = 1
    elif key == 0:
        print("up")
        dir = 'up'
    elif key == 1:
        print("down")
        dir = 'down'
    elif key == 2:
        print("left")
        dir = 'left'
    elif key == 3:
        print("right")
        dir = 'right'
    if snakeList[0][0] == treatX1 and snakeList[0][1] == treatY1:
        score += 1
        print("score = {}".format(score))
        snakeList.append([treatX1, treatY1])
        treatSpawn()
    if dir != ' ':
        move(dir)
    if len(snakeList) > 1:
        if snakeList[0] in snakeList[1:]:
            cv2.destroyAllWindows()
            print("game over")
            print("final score = {}".format(score))
            dead = 1
    cv2.imshow("image", img)
    cv2.waitKeyEx(1)
    time.sleep(0.5)
    if dead:
        return score
Exemple #19
0
def video():
    config.batch_size = 1
    ig = tf.placeholder(shape=(1, config.Config['min_dim'], config.Config['min_dim'], 3), dtype=tf.float32)
    pred_loc, pred_confs, vbs = inception_500_dsl.inception_v2_ssd(ig, config)
    box, score, pp = predict(ig, pred_loc, pred_confs, vbs, config.Config)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, '/home/dsl/all_check/face_detect/voc-aug/model.ckpt-91518')
        cap = cv2.VideoCapture('/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/face_detect/jijing.mp4')
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')

        out = cv2.VideoWriter('output.mpg', fourcc, 20.0, (1920, 1080))
        #cap = cv2.VideoCapture(0)
        cap.set(3,320*3)
        cap.set(4,320*3)
        t1 = time.time()
        while True:
            ret ,frame = cap.read()
            if not ret:
                break
            if time.time() -t1 >240:
                break

            img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            org, window, scale, padding, crop = utils.resize_image(img, min_dim=config.Config['min_dim'], max_dim=config.Config['min_dim'])

            img = (org / 255.0 - 0.5) * 2
            img = np.expand_dims(img, axis=0)
            t = time.time()

            bx, sc, p = sess.run([box, score, pp], feed_dict={ig: img})

            fps = int(1/(time.time() - t)*10)/10.0

            cv2.putText(frame,  'fps:' + str(fps), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), lineType=cv2.LINE_AA)

            bxx = []
            cls = []
            scores = []
            for s in range(len(p)):
                if sc[s] > 0.4:
                    bxx.append(bx[s])
                    cls.append(p[s])
                    scores.append(sc[s])
            if len(bxx)>0:
                finbox = utils.revert_image(scale,padding,config.Config['min_dim'],np.asarray(bxx))
                for ix,s in enumerate(finbox):
                    cv2.rectangle(frame,pt1=(s[0],s[1]),pt2=(s[2],s[3]),color=(0,255,0),thickness=2)
                    cv2.putText(frame, config.VOC_CLASSES[cls[ix]]+'_'+str(scores[ix])[0:4], (s[0], s[1]), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), lineType=cv2.LINE_AA)
            out.write(frame)
            cv2.imshow('fram',frame)

            if cv2.waitKeyEx(1) & 0xFF == ord('q'):
                break
        print('ss')
        out.release()
        cap.release()
        cv2.destroyAllWindows()
    def determine_keys(self):
        ''' Function that allows the keys to be determined for the experiment. 
        Key response are the ones that will be used in the experiment to indicate forward or backward
        This function returns a tuple of 3 keys: Forwardkey, Backwardkey, and Continuekey. 
        When calling the function, the tuple needs to be separated'''

        self.show_image('indicate_forward.png')
        self.Forwardkey = cv2.waitKeyEx(0)

        #Determining the 'backward' Key
        self.show_image('indicate_backward.png')
        self.Backwardkey = cv2.waitKeyEx(0)

        #Determining the 'continue' Key
        self.show_image('indicate_continue.png')
        self.Continuekey = cv2.waitKeyEx(0)
 def show(self):
     #print(self._led_data)
     if len(self._led_data) != self.N_LEDS:
         print('ERROR length of leds has changed to', len(self._led_data))
     led_ix = 0
     for s in STRIPS_DEF:
         x0 = s[1] - XMIN
         y0 = s[2] - YMIN
         xm = s[3]
         ym = s[4]
         for i in range(s[0]):
             ix = led_ix + i
             if ix >= self.N_LEDS: break
             x = x0 + xm * i
             y = y0 + ym * i
             # ~ print('x,y',x,y)
             adjusted_colour = _adjust_colour(self._led_data[ix],
                                              self.brightness)
             cv2.circle(self.IMAGE, (self.LED_W * (x) + LED_R, self.LED_W *
                                     (y) + LED_R), LED_R, adjusted_colour,
                        -1)
         led_ix = ix
     cv2.imshow('neopixel', self.IMAGE)
     key = cv2.waitKeyEx(1)
     if key != -1:  # seem to need about 40ms before anything appears on the screen
         print('*** Interrupted by keyboard *** character code=', key)
         sys.exit(99)
def draw_trajectory(lines):
    pad = np.zeros((70 * scale, 70 * scale), dtype=np.uint8)
    pad = cv2.cvtColor(pad, cv2.COLOR_GRAY2BGR)
    color_ls = []
    for index in range(256):
        color_ls.append(cm.jet(index)[:3])
    color_ls = np.flip(np.asarray(color_ls), axis=1) * 255

    # 'angle', 'length', 'speed', 'distance', 'time', 'start_time'

    data_to_plot = 'speed'
    inds = np.digitize(lines[data_to_plot].values / (lines[data_to_plot].max() / 255), np.arange(256)) - 1

    for index, (_, row) in enumerate(lines.iterrows()):
        LinkPoints(pad, row['point1'], row['point2'], BGR=tuple(color_ls[inds[index]].tolist()))

    overlay = pad.copy()

    cv2.circle(overlay, (int(pad.shape[0]/2), int(pad.shape[1]/2)), 45, (255, 255, 255),
               -1, cv2.LINE_AA)

    alpha = 0.6

    pad = cv2.addWeighted(overlay,alpha,pad,1-alpha,0)

    pad_copy = pad.copy()
    frame_index = 0
    cv2.namedWindow('draw_pad')

    while True:
        pad = pad_copy.copy()
        cv2.polylines(pad, [np.array([[int(pad.shape[0]/2), int(pad.shape[1]/2)], [lines.loc[frame_index, 'point1'][0], lines.loc[frame_index, 'point1'][1]]])],False,(255, 255, 255),thickness=1, lineType=cv2.LINE_AA)
        cv2.circle(pad, (lines.loc[frame_index, 'point1'][0], lines.loc[frame_index, 'point1'][1]), 3, (255, 255, 255),
                   -1, cv2.LINE_AA)
        cv2.putText(pad, 'Deviation: {}'.format(np.round(lines.loc[frame_index, 'bearing']), 1), (400, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 200, 0))
        # cv2.putText(pad, 'TimeStamp: {}'.format(np.round(lines.loc[frame_index, 'start_time']), 1), (400, 680),
        #             cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 200, 0))
        cv2.putText(pad, 'TimeStamp: {}'.format(lines.loc[frame_index, 'start_time']), (400, 680),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 200, 0))
        cv2.putText(pad, 'Angle: {}'.format(np.round(lines.loc[frame_index, 'angle'], 1)),
                    (10, 680),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 200, 0))
        cv2.putText(pad, '{}: {}'.format(data_to_plot, np.round(lines.loc[frame_index, data_to_plot], 1)),
                    (10, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 200, 0))
        cv2.imshow('draw_pad', pad)
        input = cv2.waitKeyEx(-1)

        if input == 2424832:  # Left Arrow Key
            frame_index -= 1
            if frame_index < 0:
                frame_index = 0
        elif input == 2555904:  # Right Arrow Key
            frame_index += 1
            if frame_index >= lines.shape[0]:
                frame_index = lines.shape[0] - 1
        elif input == 27:  # Esc Key
            cv2.destroyAllWindows()
            break
Exemple #23
0
def take_calibration_images(cam_id=0):
	"""
	Captures calibration images by showing a live feed of the camera and saving the current image when Space is pressed.
	:param cam_settings: Path to the file to load the camera settings from (or None to use Spotter's default settings)
	"""
	v = cv2.VideoCapture(cam_id)

	ensure_folder_exists(CALIB_FOLDER)  # Make sure folder exists to prevent errors on file saving
	for file_name in glob(generate_calibration_filename("{}*".format(CALIB_INPUT_PREFIX))):
		os.remove(file_name)  # Remove any file from a previous calibration so they don't get mixed

	cnt_img = 0
	win_title = "Capturing calibration images - {} captured".format(cnt_img)
	cv2.namedWindow(win_title)
	cv2.moveWindow(win_title, 100,100)
	while True:
		ret, cam_frame = v.read()
		assert ret, "Couldn't grab a frame from cam {}".format(cam_id)
		cv2.imshow(win_title, cv2.resize(cam_frame, dsize=None, fx=0.5, fy=0.5))

		key = cv2.waitKeyEx(1)
		if key == ord(' '):  # Save a new image when Space is pressed
			cnt_img += 1
			file_name = generate_calibration_filename("{}_{}.{}".format(CALIB_INPUT_PREFIX, cnt_img, CALIB_INPUT_FORMAT))
			cv2.setWindowTitle(win_title, "{} - {} captured".format(win_title.split(" - ")[0], cnt_img))
			cv2.imwrite(file_name, cam_frame)
			print("Captured new calibration image: {}".format(file_name))
		elif key >= 0:  # Exit if a key other than Space was pressed
			break

	cv2.destroyAllWindows()
Exemple #24
0
def get_fourkeypoint(dir_path, keypointfile, suffix):

    file_list = glob.glob(r"{}\*{}".format(dir_path, suffix))

    global key_points
    global show_img

    cv2.namedWindow("img", cv2.WINDOW_AUTOSIZE)
    cv2.setMouseCallback("img", mouse_key_point_event)  # 设置了监测鼠标活动情况的进程(线程)

    all_keypoint = dict()
    key_points = []
    for file_path in file_list:
        img = cv2.imread(file_path)

        show_img = img
        while len(key_points) < 4:  # 防止for循环一直前行
            cv2.imshow("img", show_img)
            key = cv2.waitKeyEx(10)
            if key == ord('q'):
                break
        if len(key_points) >= 4:
            all_keypoint['{}'.format(
                osp.basename(file_path))] = copy.deepcopy(key_points)
            key_points.clear()
    cv2.destroyAllWindows()
    with open(keypointfile, 'w') as f:
        json.dump(all_keypoint, f)
def draw_three(sketch,
               window_name="google",
               padding=30,
               random_color=False,
               time=1,
               show=False,
               img_size=512):
    """
    此处主要包含画图部分,从canvas_size_google()获得画布的大小和起始点的位置,根据strokes来画
    :param sketches: google quickDraw, (n, 3)
    :param window_name: pass
    :param thickness: pass
    :return: None
    """
    # print("three ")
    # print(sketch)
    # print("-" * 70)
    thickness = int(img_size * 0.025)

    sketch = scale_sketch(sketch, (img_size, img_size))  # scale the sketch.
    [start_x, start_y, h, w] = canvas_size_google(sketch=sketch)
    start_x += thickness + 1
    start_y += thickness + 1
    canvas = np.ones(
        (max(h, w) + 3 * (thickness + 1), max(h, w) + 3 * (thickness + 1), 3),
        dtype='uint8') * 255
    if random_color:
        color = (random.randint(0, 255), random.randint(0, 255),
                 random.randint(0, 255))
    else:
        color = (0, 0, 0)
    pen_now = np.array([start_x, start_y])
    first_zero = False
    for stroke in sketch:
        delta_x_y = stroke[0:0 + 2]
        state = stroke[2:]
        if first_zero:  # 首个零是偏移量, 不画
            pen_now += delta_x_y
            first_zero = False
            continue
        cv2.line(canvas,
                 tuple(pen_now),
                 tuple(pen_now + delta_x_y),
                 color,
                 thickness=thickness)
        if int(state) == 1:  # next stroke
            first_zero = True
            if random_color:
                color = (random.randint(0, 255), random.randint(0, 255),
                         random.randint(0, 255))
            else:
                color = (0, 0, 0)
        pen_now += delta_x_y
    if show:
        key = cv2.waitKeyEx()
        if key == 27:  # esc
            cv2.destroyAllWindows()
            exit(0)
    # cv2.imwrite(f"./{window_name}.png", canvas)
    return cv2.resize(canvas, (img_size, img_size))
Exemple #26
0
def waitKeys():
	while True:
		key = cv2.waitKeyEx(1)
		
		if key == -1: break
		
		yield key
Exemple #27
0
def adjust_shutter(cam):
    step = 0.10
    cv2.namedWindow(DISP_WIN, cv2.WINDOW_NORMAL)
    cv2.resizeWindow(DISP_WIN, DISP_RES_FULL[0], DISP_RES_FULL[1])
    cam.start_capture()

    while True:
        auto, shutter = cam.shutter
        print('%s, %10.5f\r' % (str(auto), shutter), end='')
        #rotated image 180
        rotated = imutils.rotate(cam.grab_frame(), 180)


        cv2.imshow(DISP_WIN, rotated)
        key = cv2.waitKeyEx(1)
        if key == 27: # escape
            break
        elif key == ARROW['right']:
            cam.shutter = (shutter + step)
        elif key == ARROW['left']:
            cam.shutter = (shutter - step)
        elif key == ARROW['up']:
            cam.shutter = (shutter + (step * 10))
        elif key == ARROW['down']:
            cam.shutter = (shutter - (step * 10))

    cam.stop_capture()
    cv2.destroyAllWindows()
Exemple #28
0
def loop(conn=state):
    global UP, DOWN
    cv.namedWindow('test')
    cv.setMouseCallback('test', mouseHandler)

    while 1:
        im = sct.grab(mon)
        img = Image.frombytes('RGB', im.size, im.rgb).convert('L')

        # print(np.array(img)[40][242])
        if np.array(img)[5][199] > 100:
            # print("not gg")
            conn[1] = False
        else:
            # print("gg")
            conn[1] = True

        playGroundU = np.array(img)[64]
        playGroundB = np.array(img)[80]

        # cv.imshow('test', makeWider(playGroundU, second=playGroundB))
        cv.imshow('test', np.array(img))
        # print(conn[0])
        if not conn[1] and conn[0]:
            writeData(playGroundU, playGroundB, [UP, DOWN], time_inc=True)
        if conn[1] and conn[0]:
            saveData(fid='99')

        # keyboardCont.press('q')

        k = cv.waitKeyEx(25)
        if k & 0xFF == ord('q'):
            cv.destroyAllWindows()
            break
Exemple #29
0
    def handle_key(self):
        sleep_time = math.ceil(1000 / self.current_presentation.fps)
        key = cv2.waitKeyEx(fix_time(sleep_time - self.lag))

        if key == Config.QUIT_KEY:
            self.quit()
        elif self.state == State.PLAYING and key == Config.PLAYPAUSE_KEY:
            self.state = State.PAUSED
        elif self.state == State.PAUSED and key == Config.PLAYPAUSE_KEY:
            self.state = State.PLAYING
        elif self.state == State.WAIT and (key == Config.CONTINUE_KEY
                                           or key == Config.PLAYPAUSE_KEY):
            self.current_presentation.next()
            self.state = State.PLAYING
        elif self.state == State.PLAYING and key == Config.CONTINUE_KEY:
            self.current_presentation.next()
        elif key == Config.BACK_KEY:
            if self.current_presentation.current_slide_i == 0:
                self.current_presentation_i = max(
                    0, self.current_presentation_i - 1)
                self.current_presentation.reset()
                self.state = State.PLAYING
            else:
                self.current_presentation.prev()
                self.state = State.PLAYING
        elif key == Config.REWIND_KEY:
            self.current_presentation.rewind_slide()
            self.state = State.PLAYING
Exemple #30
0
    def make_boxes_correct(self):
        """
        对已经标注的图片,进行人工检查,矫正
        :param img, 需要矫正的图片
        :param boxes, 图片上初步打标数据
        :return: 校正后的boxes以及文本,此box返回后是相对于原始图像尺寸的坐标,
                  而在对象中的self.boxes依然保持相对于缩放后图像尺寸的坐标。
        """
        cv2.namedWindow(self.win_name,
                        cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE)
        # cv2.resizeWindow(self.win_name, 800, 600)
        cv2.setMouseCallback(self.win_name, self.onMouse)

        while self.cur_key != 32 and cv2.getWindowProperty(
                self.win_name, cv2.WND_PROP_VISIBLE) != 0:
            # 切记一定不要这样连续的、无条件的去直接替换(刷新原图),因为这样极快的刷新频率,
            # 会使得任何界面操作(例如画线、画矩形)都会被立马覆盖成原图,导致界面会一直不变。
            # 要在具体的动作事件中添加刷新(替换)操作。
            if self.cur_key == ord('d') or self.cur_key == 'D':
                break
            self.image = cv2.drawContours(self.image, np.array(self.boxes), -1,
                                          (0, 255, 0), 2)
            cv2.imshow(self.win_name, self.image)
            self.cur_key = cv2.waitKeyEx(10)
            self.setKeyboardMotion()
        boxes = [
            np.multiply(box, 1. / self.scaling).astype(np.int32)
            for box in self.boxes
        ]
        return boxes, self.texts
Exemple #31
0
    def faceStreaming(self):
        modelo = cv2.CascadeClassifier('models/frontal/haarcascade_frontalface_default.xml')
        
        video_captura = cv2.VideoCapture(0)
        
        while True:
            ret, frame = video_captura.read()
            cinza = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            faces = modelo.detectMultiScale(cinza, flags=cv2.CASCADE_SCALE_IMAGE)
            
            for face in faces:
                x_axis, y_axis, width, height = [vertice for vertice in face]
                
                cv2.rectangle(frame, (x_axis, y_axis), (x_axis+width, y_axis+height), (255,0,222), 2)
                
                sub_face = frame[y_axis: y_axis+height, x_axis:x_axis+width]
                
                if not os.path.exists(self.PASTA_FRAMES):
                    os.makedirs(self.PASTA_FRAMES)
                
                # foto_nome = 'face_' + self.data('datetime') + '_' + self.CARTAZ_NOME + '_' + str(self.CARTAZ_ORDEM) + '_.jpg'
                # foto_arquivo = self.PASTA_FRAMES + '/' + foto_nome
                
                if self.CARTAZ_ORDEM > 0:
                    foto_nome = 'face_' + self.data('datetime') + '_' + self.CARTAZ_NOME + '_' + str(self.CARTAZ_ORDEM) + '_.jpg'
                    foto_arquivo = self.PASTA_FRAMES + '/' + foto_nome
                    # cv2.imwrite(foto_arquivo, sub_face)                    
                    
                    captura_imagem = [True, False]
                    vlr_captura =  random.randint(0,1)
                    captura = captura_imagem[vlr_captura]
                    
                    # verifica se tem mais de 15 imagens na pasta
                    # escolhe aleatoriamente as imagens que serão salvas
                    # esse processo foi feito para diminuir a quantidade de imagens repetidas
                    
                    if captura and len(os.listdir(self.PASTA_FRAMES)) < 15:
                        cv2.imwrite(foto_arquivo, sub_face)
            
            video_captura_nome = 'Plum - Video Streaming'
            cv2.namedWindow(video_captura_nome)
            cv2.moveWindow(video_captura_nome, 520, 30)         
            cv2.imshow(video_captura_nome, frame)

            tecla_seta_esquerda = 2424832
            tecla_seta_direita = 2555904
            tecla_esc = 27
            
            tecla = cv2.waitKeyEx(30)
            if tecla == tecla_seta_direita and self.CARTAZ_ORDEM < len(os.listdir(self.PASTA_CARTAZES)):
                self.CARTAZ_ORDEM = self.CARTAZ_ORDEM + 1
                self.showNextPoster()
            elif tecla == tecla_seta_esquerda and self.CARTAZ_ORDEM > 1:
                self.CARTAZ_ORDEM = self.CARTAZ_ORDEM - 1
                self.showNextPoster()
            elif tecla == tecla_esc:
                break

        video_captura.release()
        cv2.destroyAllWindows()