Пример #1
0
    def update(self, frames):
        """
        Given a image, find when to trigger and return bounding boxes of people in the trigger region

        Parameters:
        frames (dict): dictionary of frames from all the cameras

        Returns:
        bboxes (ndarry): bounding boxes of detected objects
        sampimg (ndarray): cropped image of the trigger region
        """
        img = frames[self._camera_id]
        chkimg = cv2.cvtColor(crop_image(img, self._check_coords),
                              cv2.COLOR_BGR2GRAY)
        score, diff = compare_ssim(self._ref_img, chkimg, full=True)

        if self._check == 1:
            if score > self._close_thresh:
                sampimg = crop_image(img, self._sample_coords)
                bboxes, scores = self._detector.get_bboxes(sampimg)
                self._check = 0
                return True, bboxes, sampimg
        else:
            if score < self._open_thresh:
                self._check = 1

        return False, None, None
Пример #2
0
def run_mot_and_fill_gallery(video_loader, gallery, detector, sort_trackers,
                             output_files):

    # Iterate through frames of all cameras
    iterator = tqdm(video_loader)
    for findex, frames in iterator:
        if findex >= 400:
            iterator.close()
            break
        # Iterate through each camera
        for vidname, frame in frames.items():
            # Get bounding boxes of all people
            boxes, scores = detector.get_bboxes(frame)

            # Send people bounding boxes to tracker
            # Get three things: normal Sort output (tracking bounding boxes it wants to send), corresponding track objects, and objects of new tracks
            tracker = sort_trackers[vidname]
            dets = np.column_stack((np.reshape(boxes, [-1, 4]), scores))
            matched_tracks, matched_kb_trackers, new_kb_trackers = tracker.update(
                dets)

            gallery.update(vidname, frame, matched_tracks)

            # Find indexes of returned bounding boxes that meet ideal ratio
            trkbboxes = np.array(matched_tracks)
            widths = trkbboxes[:, 2] - trkbboxes[:, 0]
            heights = trkbboxes[:, 3] - trkbboxes[:, 1]
            aspectratio = heights / widths
            readybools = np.isclose(aspectratio, 2, rtol=0.25)
            indexes = np.arange(len(matched_tracks))[readybools]

            # Iterate through returned bounding boxes
            for ind, trk in enumerate(matched_tracks):
                box = ((int(trk[0]), int(trk[1])), (int(trk[2]), int(trk[3])))

                # If bounding box meets ideal ratio, save image of person as reference
                if ind in indexes:
                    cropimg = crop_image(frame, box)
                    if cropimg.size > 5:
                        fd, temp_file_name = tempfile.mkstemp(suffix='.jpg')
                        cv2.imwrite(temp_file_name, cropimg)
                        os.close(fd)
                        matched_kb_trackers[ind].save_img(temp_file_name)

                # Write bounding box, frame number, and trackid to file
                output_files[vidname].write("%d,%d,%.2f,%.2f,%.2f,%.2f\n" %
                                            (findex, trk[4], box[0][0],
                                             box[0][1], box[1][0], box[1][1]))

            # Iterate through new tracks and add their current bounding box to list of track references
            for trk in new_kb_trackers:
                d = trk.get_state()[0]
                box = ((int(d[0]), int(d[1])), (int(d[2]), int(d[3])))
                cropimg = crop_image(frame, box)
                if cropimg.size > 5:
                    fd, temp_file_name = tempfile.mkstemp(suffix='.jpg')
                    cv2.imwrite(temp_file_name, cropimg)
                    os.close(fd)
                    trk.save_img(temp_file_name)
    def get_eye_frame_cropped(self, frame, coords_data_crop_l,
                              coords_data_crop_r):
        img_left_eye = utils.crop_image(frame, coords_data_crop_l[0],
                                        coords_data_crop_l[1],
                                        coords_data_crop_l[2],
                                        coords_data_crop_l[3])
        img_right_eye = utils.crop_image(frame, coords_data_crop_r[0],
                                         coords_data_crop_r[1],
                                         coords_data_crop_r[2],
                                         coords_data_crop_r[3])

        return img_left_eye, img_right_eye
Пример #4
0
def prepare_data(datasets, annotations, threthoud_pos, threthoud_neg,
                 save_path):
    global COUNT_FACE, COUNT_BACKGROUND
    for i in range(len(datasets)):
        image_dir, num_of_faces, gts = datasets[i]
        gts = convert_to_xywh(ellipse_to_rectangle(num_of_faces, gts))

        for gt in gts:
            img = crop_image(image_dir, gt)
            if len(img) == 0:
                continue
            a, b, c = img.shape
            if a == 0 or b == 0 or c == 0:
                continue
            COUNT_FACE += 1
            path = ''.join(
                [save_path, '1/',
                 str(i), '_',
                 str(COUNT_FACE), '.jpg'])
            cv2.imwrite(path, img)

        for candidate in generate_selective_search(image_dir):
            x, y, w, h = candidate
            ious = []
            img = crop_image(image_dir, candidate)
            if len(img) == 0:
                continue
            for gt in gts:
                ious.append(
                    IOU_calculator(x + w / 2, y + h / 2, w, h,
                                   gt[0] + gt[2] / 2, gt[1] + gt[3] / 2, gt[2],
                                   gt[3]))
            if max(ious) > threthoud_pos:
                COUNT_FACE += 1
                path = ''.join(
                    [save_path, '1/',
                     str(i), '_',
                     str(COUNT_FACE), '.jpg'])
                cv2.imwrite(path, img)
            elif max(ious) < threthoud_neg:
                COUNT_BACKGROUND += 1
                path = ''.join([
                    save_path, '0/',
                    str(i), '_',
                    str(COUNT_BACKGROUND), '.jpg'
                ])
                cv2.imwrite(path, img)
        print(
            f"====>>> {i}/{len(datasets)}: Face: {COUNT_FACE}, Background: {COUNT_BACKGROUND}"
        )
def crop_video(subdir, sub_set, video):
    video_crop_base_path = join(save_base_path, sub_set, video)
    if not isdir(video_crop_base_path): makedirs(video_crop_base_path)

    xmls = sorted(glob.glob(join(ann_base_path, subdir, video, '*.xml')))

    for xml in xmls:

        xmltree = ET.parse(xml)
        objects = xmltree.findall('object')
        objs = []
        filename = xmltree.findall('filename')[0].text

        im = cv2.imread(
            xml.replace('xml', 'JPEG').replace('Annotations', 'Data'))
        avg_chans = np.mean(im, axis=(0, 1))
        for object_iter in objects:

            trackid = int(object_iter.find('trackid').text)
            bndbox = object_iter.find('bndbox')

            bbox = [
                int(bndbox.find('xmin').text),
                int(bndbox.find('ymin').text),
                int(bndbox.find('xmax').text),
                int(bndbox.find('ymax').text)
            ]

            z, x = utils.crop_image(im, bbox, padding=avg_chans)
            # cv2.imwrite(join(video_crop_base_path, '{:06d}.{:02d}.z.jpg'.format(int(filename), trackid)), z)
            cv2.imwrite(
                join(video_crop_base_path,
                     '{:06d}.{:02d}.x.jpg'.format(int(filename), trackid)), x)
Пример #6
0
def crop_img(xml, sub_set_crop_path):
    xmltree = ET.parse(xml)
    objects = xmltree.findall('object')

    frame_crop_base_path = join(sub_set_crop_path,
                                xml.split('/')[-1].split('.')[0])
    if not isdir(frame_crop_base_path): makedirs(frame_crop_base_path)

    img_path = xml.replace('xml', 'JPEG').replace('Annotations', 'Data')

    im = cv2.imread(img_path)
    avg_chans = np.mean(im, axis=(0, 1))

    for id, object_iter in enumerate(objects):
        bndbox = object_iter.find('bndbox')
        bbox = [
            int(bndbox.find('xmin').text),
            int(bndbox.find('ymin').text),
            int(bndbox.find('xmax').text),
            int(bndbox.find('ymax').text)
        ]

        z, x = utils.crop_image(im, bbox, padding=avg_chans)
        cv2.imwrite(
            join(frame_crop_base_path, '{:06d}.{:02d}.x.jpg'.format(0, id)), x)
Пример #7
0
    def __getitem__(self, i):
        sample = self.samples[i]
        full_path = sample['full_path']
        bbox = sample['bboxes'][0]
        img = cv.imread(full_path)
        img = crop_image(img, bbox)
        img = cv.resize(img, (im_size, im_size))

        # img aug
        img = img[..., ::-1]  # RGB
        img = transforms.ToPILImage()(img)
        img = self.transformer(img)

        age = sample['attr']['age'] / 100.
        pitch = (sample['attr']['angle']['pitch'] + 180) / 360
        roll = (sample['attr']['angle']['roll'] + 180) / 360
        yaw = (sample['attr']['angle']['yaw'] + 180) / 360
        beauty = sample['attr']['beauty'] / 100.

        expression = name2idx(sample['attr']['expression']['type'])
        gender = name2idx(sample['attr']['gender']['type'])
        glasses = name2idx(sample['attr']['glasses']['type'])
        race = name2idx(sample['attr']['race']['type'])
        return img, np.array([age, pitch, roll, yaw,
                              beauty]), expression, gender, glasses, race
Пример #8
0
def compute_disparity(left_image, right_image, maximum_disparity, noise_filter,
                      width):
    """
    Input:
    -Left & Rectified Right Images, Maximum Disparity Value
    -Noise filter: increase to be more aggressive
    Output:
    -Disparity between images, scaled appropriately
    """
    # convert to grayscale (as the disparity matching works on grayscale)
    grayL, grayR = convert_to_grayscale([left_image, right_image])

    # perform preprocessing - raise to the power, as this subjectively appears
    # to improve subsequent disparity calculation
    grayL = np.power(grayL, 0.75).astype('uint8')
    grayR = np.power(grayR, 0.75).astype('uint8')

    # compute disparity image from undistorted and rectified stereo images
    # (which for reasons best known to the OpenCV developers is returned scaled by 16)
    disparity = stereoProcessor.compute(grayL, grayR)

    # filter out noise and speckles (adjust parameters as needed)
    cv2.filterSpeckles(disparity, 0, 4000, maximum_disparity - noise_filter)

    # threshold the disparity so that it goes from 0 to max disparity
    _, disparity = cv2.threshold(disparity, 0, maximum_disparity * 16,
                                 cv2.THRESH_TOZERO)

    # scale the disparity to 8-bit for viewing
    disparity_scaled = (disparity / 16.).astype(np.uint8)

    # crop area not seen by *both* cameras and and area with car bonnet
    disparity_scaled = utils.crop_image(disparity_scaled, 0, 390, 135, width)

    return disparity_scaled
def crop_video(subdir, subset, video):
    video_dir = join(subdir, video)

    video_crop_base_path = join(save_base_path, subset, video)
    if not isdir(video_crop_base_path): makedirs(video_crop_base_path)

    if debug:
        print("crop video: {}".format(video))
    frames = sorted(glob.glob(join(video_dir, '*.jpg')))

    with open(join(video_dir, 'groundtruth.txt')) as f:
        ann = f.readlines()

    with open(join(video_dir, 'absence.label')) as f:
        absense = f.readlines()

    for id, frame in enumerate(frames):
        im = cv2.imread(frame)
        avg_chans = np.mean(im, axis=(0, 1))

        bbox = [float(s) for s in ann[id].split(',')]

        if (bbox[2] == 0 and bbox[3] == 0) or int(absense[id]) == 1:
            continue

        trackid = 0
        bbox = [bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]

        filename = frame.split('/')[-1].split('.')[0]
        z, x = utils.crop_image(im, bbox, padding=avg_chans)
        cv2.imwrite(
            join(video_crop_base_path,
                 '{:06d}.{:02d}.x.jpg'.format(int(filename), trackid)), x)
def split_AFLW(test_path, dataset_path):
    """
    指定した角度ごとにフォルダを分割
    """
    # フォルダを生成 10度づつに分割
    os.makedirs(test_path, exist_ok=True)
    degree = -90
    folder_name = ''
    test_dict = {}
    while degree < 90:
        folder_name = degree
        test_dir = os.path.join(test_path, str(folder_name))
        test_dict[degree] = test_dir
        os.makedirs(test_dir, exist_ok=True)

        degree += 10

    degree_th = 10
    jpg_images = glob.glob(dataset_path + '/*.jpg')

    for jpg_imgae in jpg_images:
        mat_file = utils.get_matpath(jpg_imgae)
        pitch, yaw, roll = utils.get_degree_from_mat(mat_file)

        if abs(pitch) <= 90:
            # ファイル名を取得
            file_name = os.path.basename(jpg_imgae)
            save_path = os.path.join(
                test_dict[int(pitch - pitch % 10)],
                file_name
            )
            img = utils.crop_image(mat_file, jpg_imgae)
            img.save(save_path)
def align_faces(faces, image, predictor, required_size=(160, 160)):
    # Récupération de la taille de l'image
    (s_height, s_width) = image.shape[:2]

    aligned_faces = []

    # Boucler sur les visages détecté
    for i, det in enumerate(faces):

        # Récupérer les traits du visage
        shape = predictor(image, det)

        # Récupérer les coordonnées des yeux grâce au traits du visages
        left_eye = extract_left_eye_center(shape)
        right_eye = extract_right_eye_center(shape)

        # Récupérer la matice de rotation du visage grace à la position des yeux
        M = get_rotation_matrix(left_eye, right_eye)

        # Appliquer une rotation à l'image afin d'avoir le ième visage aligné
        rotated = cv2.warpAffine(
            image, M, (s_width, s_height), flags=cv2.INTER_CUBIC)

        # Rogner l'image afin de garder uniquement le ième visage
        cropped = crop_image(rotated, det)

        cropped = cv2.resize(cropped, required_size)

        # Ajouter le visage rogné à la liste
        aligned_faces.append(cropped)

    return asarray(aligned_faces)
Пример #12
0
def telemetry(sid, data):
    if data:
        # The current steering angle of the car
        steering_angle = data["steering_angle"]
        # The current throttle of the car
        throttle = data["throttle"]
        # The current speed of the car
        speed = data["speed"]
        # The current image from the center camera of the car
        imgString = data["image"]
        image = Image.open(BytesIO(base64.b64decode(imgString)))
        image_array = crop_image(np.asarray(image))
        steering_angle = float(
            model.predict(image_array[None, :, :, :], batch_size=1))
        min_speed = 8
        max_speed = 10
        if float(speed) < min_speed:
            throttle = 1.0
        elif float(speed) > max_speed:
            throttle = -1.0
        else:
            throttle = 0.1

        print(steering_angle, throttle)
        send_control(steering_angle, throttle)

        # save frame
        if args.image_folder != '':
            timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
            image_filename = os.path.join(args.image_folder, timestamp)
            image.save('{}.jpg'.format(image_filename))
    else:
        # NOTE: DON'T EDIT THIS.
        sio.emit('manual', data={}, skip_sid=True)
Пример #13
0
def of_dataset(folder="testset", model=None, view=False):
    '''measure the error across the given dataset,
    it compares the measured points with the annotated ground truth,
    optionally you can [view] the results'''
    assert (model)

    # load face and landmark detectors
    utils.load_shape_predictor(model)
    # utils.init_face_detector(True, 150)

    # init average-error
    err = 0
    num = 0

    for img, lmarks, path in utils.ibug_dataset(folder):
        # detections
        face = utils.prominent_face(utils.detect_faces(img, detector="dlib"))
        measured = utils.detect_landmarks(img, face)

        # get error
        num += 1
        err += normalized_root_mean_square(lmarks, measured)

        # results:
        if view is True:
            utils.draw_rect(img, face, color=Colors.yellow)
            utils.draw_points(img, lmarks, color=Colors.green)
            utils.draw_points(img, measured, color=Colors.red)
            utils.show_image(utils.show_properly(utils.crop_image(img, face)))

    print(err, num, err / num)
    print("average NRMS Error for {} is {}".format(folder, err / num))
Пример #14
0
    def __init__(self, camera_id, ref_img, open_thresh, close_thresh,
                 check_coords, sample_coords, detector):
        """
        the constructor for BboxTrigger class

        Parameters:
        _camera_id (str): name of the camera location
        _ref_img (cv2 image): reference image (ex. Closed door)
        _open_thresh (int): threshold value for opening a door
        _close_thresh (int): threshold value for closing a door
        _check_coords (list): 2D list of coordinates to check
        _sample_coords (list): 2D list of trigger coordinates
        _detector (detector.py): object detector (default FasterRCNN)

        """
        self._camera_id = camera_id
        self._open_thresh = open_thresh
        self._close_thresh = close_thresh
        self._check_coords = check_coords
        self._sample_coords = sample_coords
        self._detector = detector  # ideally this is not here in the future either
        self._ref_img = cv2.cvtColor(crop_image(ref_img, check_coords),
                                     cv2.COLOR_BGR2GRAY)

        self._check = 0
Пример #15
0
def crop_video(subdir, subset, video):
    video_crop_base_path = join(save_base_path, subset, video)
    if not isdir(video_crop_base_path): makedirs(video_crop_base_path)

    #print("crop video: {}".format(video))
    frames = glob.glob(join(subdir, 'videos', video, '*.jpg'))

    def index_keys(text):
        return int(text.split('/')[-1].split('.')[0])

    frames = sorted(frames, key=index_keys)

    with open(join(subdir, 'anno', video + '.txt')) as f:
        ann = f.readlines()

    for id, frame in enumerate(frames):

        im = cv2.imread(frame)
        avg_chans = np.mean(im, axis=(0, 1))
        bbox_str = [float(s) for s in ann[id].split(',')]

        trackid = 0
        bbox = [
            bbox_str[0], bbox_str[1], bbox_str[0] + bbox_str[2],
            bbox_str[1] + bbox_str[3]
        ]

        filename = frame.split('/')[-1].split('.')[0]
        z, x = utils.crop_image(im, bbox, padding=avg_chans)
        cv2.imwrite(
            join(video_crop_base_path,
                 '{:06d}.{:02d}.x.jpg'.format(int(filename), trackid)), x)
Пример #16
0
def test_shot():
    size = utils.check_os()
    pixel_json = utils.get_pixel_config(size)
    blank_area = pixel_json['blank_area']
    question_area = pixel_json['question_area']
    blank_area_point = blank_area['x1'], blank_area['y1'], blank_area[
        'x2'], blank_area['y2']
    question_area_point = question_area['x1'], question_area[
        'y1'], question_area['x2'], question_area['y2']
    backup_img = None
    if os.path.exists('image/backup.png'):
        backup_img = Image.open('image/backup.png')
    else:
        print('image/backup.png位置图片不存在')
        exit(-1)
    utils.crop_image(backup_img, question_area_point, 'image/crop_test.png')
    utils.crop_image(backup_img, blank_area_point, 'image/blank_test.png')
Пример #17
0
def test_shot(is_ios):
    size = utils.check_os(is_ios)
    pixel_json = utils.get_pixel_config(size)
    blank_area = pixel_json['blank_area']
    question_area = pixel_json['question_area']
    blank_area_point = blank_area['x1'], blank_area['y1'], blank_area[
        'x2'], blank_area['y2']
    question_area_point = question_area['x1'], question_area[
        'y1'], question_area['x2'], question_area['y2']
    backup_img = None
    if os.path.exists('image/backup.png'):
        backup_img = Image.open('image/backup.png')
    else:
        utils.pull_from_screen_ios()
        backup_img = Image.open('image/backup.png')
    utils.crop_image(backup_img, question_area_point, 'image/crop_test.png')
    utils.crop_image(backup_img, blank_area_point, 'image/blank_test.png')
Пример #18
0
def detect_phase(image_path):
    img = np.copy(image_path)
    box = utils.find_circles(img, mg_ratio=0.4, n_circles=1)
    for b in box:
        crop = utils.crop_image(image_path, b)
        resized_im = utils.resize_image(crop)
    #cv.imwrite("Detector_samples/detection_phase_steps/final1.png", resized_im*255)
    return resized_im
Пример #19
0
class Life(pygame.sprite.Sprite):
    life_sprites = utils.load_image(config.images['lives'])
    image_blue = utils.crop_image(life_sprites, pygame.Rect(6, 2, 88, 84))
    image_red = utils.crop_image(life_sprites, pygame.Rect(6, 90, 88, 84))

    def __init__(self, pos, scale, *groups):
        super().__init__(singletons.LivesGroup.get(), *groups)
        self.scale = scale
        self.image = pygame.transform.scale(
            Life.image_blue, (int(Life.image_blue.get_rect().w * scale),
                              int(Life.image_blue.get_rect().h * scale)))
        self.rect = self.image.get_rect()
        self.rect.x, self.rect.y = pos

    def set_red(self):
        self.image = pygame.transform.scale(
            Life.image_red, (int(Life.image_blue.get_rect().w * self.scale),
                             int(Life.image_blue.get_rect().h * self.scale)))
def split_300w_pitch():
    # フォルダを生成
    # pitchフォルダ
    train_path = '../dataset/pitch/train/'
    os.makedirs(train_path, exist_ok=True)
    valid_path = '../dataset/pitch/valid/'
    os.makedirs(valid_path, exist_ok=True)

    # 10度づつに分割
    degree = -90
    folder_name = ''
    train_dict = {}
    valid_dict = {}
    while degree < 90:
        folder_name = degree
        # train folder
        train_dir = os.path.join(train_path, str(folder_name))
        train_dict[degree] = train_dir
        os.makedirs(train_dir, exist_ok=True)

        # validation folder
        valid_dir = os.path.join(valid_path, str(folder_name))
        valid_dict[degree] = valid_dir
        os.makedirs(valid_dir, exist_ok=True)

        degree += 10

    dir_path_ls = ['AFW', 'AFW_Flip',
                   'HELEN', 'HELEN_Flip',
                   'IBUG', 'IBUG_Flip',
                   'LFPW', 'LFPW_Flip']
    degree_th = 10
    #dir_path_ls = ['AFW']
    dataset_path = '../../dataset/300W_LP'

    for each_dir in dir_path_ls:
        dir_path = os.path.join(dataset_path, each_dir)
        jpg_images = glob.glob(dir_path+'/*.jpg')

        for jpg_imgae in jpg_images:
            mat_file = utils.get_matpath(jpg_imgae)
            pitch, yaw, roll = utils.get_degree_from_mat(mat_file)

            print(each_dir)
            if abs(pitch) <= 90:
                # ファイル名を取得
                file_name = os.path.basename(jpg_imgae)
                random_dir = np.random.choice(
                    # 20%
                    [train_dict[int(pitch - pitch % 10)],
                        valid_dict[int(pitch - pitch % 10)]],
                    p=[0.8, 0.2]
                )
                save_path = os.path.join(random_dir, file_name)
                img = utils.crop_image(mat_file, jpg_imgae)
                img.save(save_path)
Пример #21
0
def predict_single_image(model, x):
    """
    Get prediction of model for single image.
    Pad image if it is not multiplier of 32 and
    make needed augmentations.
    """
    x_img, pading = pad_image(x)
    x_img = augment_test(x_img.copy())[0]
    kek = model.predict(np.expand_dims(x_img, 0))
    return crop_image(kek[0], pading)
Пример #22
0
def save_images(full_path, i, bbox):
    raw = cv.imread(full_path)
    resized = cv.resize(raw, (im_size, im_size))
    filename = 'images/{}_raw.jpg'.format(i)
    cv.imwrite(filename, resized)

    img = crop_image(raw, bbox)
    img = cv.resize(img, (im_size, im_size))
    filename = 'images/{}_img.jpg'.format(i)
    cv.imwrite(filename, img)
Пример #23
0
 def update(self, frames):
     """adds people and features to the gallery"""
     for trig in self._triggers:
         add, boxes, img = trig.update(frames)
         if add:
             for box in boxes:
                 cropimg = crop_image(img, box)
                 vect = self._attribute_extractor(cropimg)
                 self._people.append(cropimg)
                 self._feats.append(vect)
Пример #24
0
def image_preprocess(db, cfg_file, db_inds, scales, result_dir, debug, no_flip, im_queue):
    num_images = db_inds.size
    
    for ind in range(0, num_images):
        db_ind = db_inds[ind]

        image_id   = db.image_ids(db_ind)
        image_file = db.image_file(db_ind)
        image      = cv2.imread(image_file)

        height, width = image.shape[0:2]

        for scale in scales:
            new_height = int(height * scale)
            new_width  = int(width * scale)
            new_center = np.array([new_height // 2, new_width // 2])
            
            if 'DLA' in cfg_file:
                inp_height = (new_height | 31)+1
                inp_width  = (new_width | 31)+1
            else:
                inp_height = new_height | 127
                inp_width  = new_width | 127

            images  = np.zeros((1, 3, inp_height, inp_width), dtype=np.float32)
            ratios  = np.zeros((1, 2), dtype=np.float32)
            borders = np.zeros((1, 4), dtype=np.float32)
            sizes   = np.zeros((1, 2), dtype=np.float32)
            
            if 'DLA' in cfg_file:
                out_height, out_width = inp_height // 4, inp_width // 4
            else:
                out_height, out_width = (inp_height + 1) // 4, (inp_width + 1) // 4
            
            height_ratio = out_height / inp_height
            width_ratio  = out_width  / inp_width

            resized_image = cv2.resize(image, (new_width, new_height))
            resized_image, border, offset = crop_image(resized_image, new_center, [inp_height, inp_width])

            resized_image = resized_image / 255.
            normalize_(resized_image, db.mean, db.std)

            images[0]  = resized_image.transpose((2, 0, 1))
            borders[0] = border
            sizes[0]   = [int(height * scale), int(width * scale)]
            ratios[0]  = [height_ratio, width_ratio]       

            if not no_flip:
                images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
            images = torch.from_numpy(images)
            
            im_queue.put([images, ratios, borders, sizes, out_width, image_id])
            
    time.sleep(num_images*10)
Пример #25
0
    def setDartBoardImage(self,dartBoard):
        self.boardImage = transform.rescale(dartBoard, 0.8, anti_aliasing=True,multichannel=True)
        self.boardImage = skimage.img_as_ubyte(self.boardImage)
        self.boardImage = utils.crop_image(self.boardImage)
        
        
        if(self.boardImage is not None):
            self.outputBoardImage = self.boardImage.copy()

            #### Create Pointmap that contain all regions of the dart board
            self.findRegionMasks()

            ### get the center of the inner bull of the dart board
            label_img = label(self.myMask.inner_bull)
            region = regionprops(label_img)
            if(len(region)==0):
                return False,False
            max_index = utils.get_max_index(region)
            self.center = region[max_index].centroid

            ### Edge image for straight line detection
            grayBackgroundImage = rgb2gray(self.boardImage)
            temp_image = grayBackgroundImage * self.myMask.board
            edgeImage = feature.canny(temp_image,high_threshold=0.4,low_threshold=0.2)
            tested_angles = np.linspace(-np.pi / 2, np.pi / 2, 3600)
            [H,theta,rho] = transform.hough_line(edgeImage,theta=tested_angles)
            thresh = np.amax(H)
            thresh = np.ceil(thresh*0.05)
            
            ### Detect Peaks
            hspace, angle, dists = transform.hough_line_peaks(H,theta,rho,num_peaks=10,threshold=thresh)

            ##### change angles theta to degrees 
            theta_degree= np.degrees(angle)
            angles = []
            angles_180 =[]
            for a in theta_degree:
                angles.append(a-90+360)
                angles_180.append(a+90+360)
            
            ###  angles - Zero degrees = up 
            Angles = np.concatenate((angles,angles_180),axis=0)
            Angles = np.mod(Angles,360)
            Angles = np.sort(Angles)

            ### construct the 20 region of the dart board
            regions_values = [10, 15, 2, 17, 3, 19, 7, 16, 8, 11, 14, 9, 12, 5, 20, 1, 18, 4, 13, 6]
            self.regions_details = []
            for i in range(len(regions_values)):
                region = Region()
                region.minAngle = Angles[i]
                region.maxAngle = Angles[np.mod(i+1,np.size(Angles))]
                region.value = regions_values[i]
                self.regions_details.append(region)
Пример #26
0
def save_images(full_path, i, bbox):
    img = cv.imread(full_path)
    img = crop_image(img, bbox)
    img = cv.resize(img, (im_size, im_size))
    img_path1 = 'images/img_norm_{}.jpg'.format(i)
    cv.imwrite(img_path1, img)

    img = cv.flip(img, 1)
    img_path2 = 'images/img_flip_{}.jpg'.format(i)
    cv.imwrite(img_path2, img)
    return img_path1, img_path2
Пример #27
0
 def update(self, video_name, frame, bboxes):
     """adds people and features to the gallery"""
     for trig in self.triggers:
         if trig.video_oi != video_name: continue
         indsToSave = trig.update(bboxes)
         for index in indsToSave:
             cropimg = crop_image(frame,
                                  np.reshape(bboxes[index, :-1], (2, 2)))
             vect = self._attribute_extractor(cropimg)
             self._people.append(cropimg)
             self._feats.append(vect)
Пример #28
0
class Particle(object):
    image = pygame.transform.scale(
        utils.crop_image(utils.load_image(f'particles.png'), pygame.Rect(49, 49, 32, 32)), (10, 10)
    )

    def __init__(self, particle_system, position, velocity, life, colors):
        self.particle_system = particle_system
        
        self.position = list(position)
        self.velocity = list(velocity)

        self.time = 0.0
        self.life = life

        self.colors = list(colors)
        self._padlib_num_colors = len(self.colors)
        self._padlib_color_needs_update = True
        
    def get_color(self):
        if self._padlib_color_needs_update:
            part = self.time / self.life
            part *= self._padlib_num_colors
            index = int(part)
            if index >= self._padlib_num_colors:
                index = self._padlib_num_colors - 1

            color1 = self.colors[index]
            if index + 1 >= self._padlib_num_colors:
                color2 = color1
            else:
                color2 = self.colors[index+1]

            delta = [color2[i]-color1[i] for i in [0,1,2]]
            
            part = part - int(part)
            self.color = [rndint(color1[i]+part*delta[i]) for i in [0,1,2]]
            
            self._padlib_color_needs_update = False

        return self.color
    
    def update(self, dt, accel):
        self.velocity[0] += accel[0]*dt
        self.velocity[1] += accel[1]*dt
        
        self.position[0] += self.velocity[0]*dt
        self.position[1] += self.velocity[1]*dt
        
        self.time += dt
        self._padlib_color_needs_update = True
        
    def draw(self, surface):
        surface.blit(self.image, (rndint(self.position[0]), rndint(self.position[1])))
Пример #29
0
def _full_image_crop(image, detections):
    detections = detections.copy()
    height, width = image.shape[0:2]

    max_hw = max(height, width)
    center = [height // 2, width // 2]
    size = [max_hw, max_hw]

    image, border, offset = crop_image(image, center, size)
    detections[:, 0:4:2] += border[2]
    detections[:, 1:4:2] += border[0]
    return image, detections
        def crop_from_detected(det):
            shape = self.predictor(img, det)
            left_eye = extract_left_eye_center(shape)
            right_eye = extract_right_eye_center(shape)

            M = get_rotation_matrix(left_eye, right_eye)
            rotated = cv2.warpAffine(img,
                                     M, (s_width, s_height),
                                     flags=cv2.INTER_CUBIC)

            cropped = crop_image(rotated, det)
            return cropped
Пример #31
0
    def get(self, photo_id):
        photo = Photo.get(photo_id)
        if photo:

            if self.request.get('type'):
                force_crop = self.request.get('crop', False)
                if force_crop: force_crop = True
                cropped, img = crop_image(photo.file, self.request.get('type'),
                                          strict=force_crop)
                if img:
                    self.response.headers['Content-Type'] = 'image/jpg'
                    self.response.out.write(img)
                else:
                    self.response.out.write('No image')

            self.response.headers['Content-Type'] = str('image/%s' % photo.extension)
            self.response.out.write(photo.file)

        else:
            self.response.out.write('No image')
Пример #32
0
    def post(self):
        user = User.get_by_key_name(self.current_user['id'])

        #  action upload a photo
        if self.request.get('action') == 'upload':

            fd = self.request.get('photo')
            # ext = fd.file.name.split('.')[-1].lower()

            cropped, img = crop_image(fd, 'post', False)
            photo_blob = Photo.create_blob(img)

            extension = 'jpg' # if cropped==True else ext
            photo = Photo(user=user, file=photo_blob, extension=extension, is_used=False)
            photo.put()

            self.response.headers['Content-Type'] = 'application/json'
            self.response.out.write(json.dumps({'success': True, 'photo': str(photo.key())}))

        # Action create Photo post
        if self.request.get('action') == 'boker_photo':

            ispop = self.request.get('pop') == '1'
            if ispop:
                self.template = 'boker_pop.html'

            explicitly_shared = self.request.get('explicit_share', False)
            if explicitly_shared == 'on': explicitly_shared = True

            photokey = self.request.get('photokey')
            desc = self.request.get('desc')

            photo = Photo.get(photokey)
            if photo and desc:
                # Save boker
                boker = Boker(user=user, photo=photo, description=desc)
                boker.put()
                # Set photo used
                photo.is_used = True
                photo.put()

                # Run task: Posting to page wall
                boker_url = "%s/boker/%s" % (settings.APP_DOMAIN, boker.key().id())
                user_access_token = self.current_user['access_token']

                deferred.defer(post_page_photo, boker_url, photokey, desc)
                deferred.defer(publish_upload_action, user_access_token, boker_url, explicitly_shared)

                boker_url = self.uri_for('boker_view', boker_id=boker.key().id())
                if ispop:
                    self.response.out.write(u'<script type="text/javascript">window.parent.window.location.href="%s";</script>' % boker_url);
                else:
                    self.redirect(boker_url)
            else:
                return self.render_response(self.template, {
                            'errors': 'Photo dan Ceritanya harus diisi.',
                            'photokey': photokey,
                            'desc': desc,
                            })

        # Action create Video Post
        if self.request.get('action') == 'boker_video':
            ispop = self.request.get('pop') == '1'
            if ispop:
                self.template = 'boker_video.html'

            explicitly_shared = self.request.get('explicit_share', False)
            if explicitly_shared == 'on': explicitly_shared = True

            video_url = self.request.get('video_url')
            video_id = self.request.get('video_id')
            video_source = self.request.get('video_source')
            desc = self.request.get('desc')

            if video_id and video_source and desc:
                boker = Boker(user=user, video_id=video_id, video_source=video_source, description=desc)
                boker.put()

                boker_url = "%s/boker/%s" % (settings.APP_DOMAIN, boker.key().id())
                user_access_token = self.current_user['access_token']

                # deferred.defer(post_page_video, boker_url, boker.description)
                deferred.defer(publish_posts_action, user_access_token, boker_url, False)

                boker_url = self.uri_for('boker_view', boker_id=boker.key().id())
                if ispop:
                    self.response.out.write(u'<script type="text/javascript">window.parent.window.location.href="%s";</script>' % boker_url);
                else:
                    self.redirect(boker_url)
            else:
                return self.render_response(self.template, {
                            'errors': 'Video dan Ceritanya harus diisi.',
                            'video_url': video_url,
                            'desc': desc,
                            })