def createImage(self, gabor_response):
        large_image_list = []
        for image_list in gabor_response:
            large_image_list.append(cv2.hconcat( image_list ))
#            print cv2.hconcat( image_list ).shape
        combinened_image = cv2.vconcat( large_image_list )
        return combinened_image                                 
Пример #2
0
 def get_concat_img(src):
     output = None
     for v in src:
         tmp = cv2.vconcat(v)
         if output is None:
             output = tmp
         else:
             output = cv2.hconcat([output, tmp])
     return output
Пример #3
0
def get_tile_image(imgs, tile_shape=None):
    # import should be here to avoid import error on server
    # caused by matplotlib's backend
    import matplotlib.pyplot as plt  # noqa

    def get_tile_shape(img_num):
        x_num = 0
        y_num = int(math.sqrt(img_num))
        while x_num * y_num < img_num:
            x_num += 1
        return x_num, y_num

    if tile_shape is None:
        tile_shape = get_tile_shape(len(imgs))

    img_rgb_list = []
    for img in imgs:
        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img_rgb_list.append(img_rgb)
    # check if all the resolution is same
    x_num, y_num = tile_shape
    if all(img.shape == imgs[0].shape for img in imgs[1:]):
        # rospy.loginfo("all the size same images")
        concatenated_image = None
        for y in range(y_num):
            row_image = None
            for x in range(x_num):
                i = x + y * x_num
                if i >= len(imgs):
                    img = np.zeros(imgs[0].shape, dtype=np.uint8)
                else:
                    img = imgs[i]
                if row_image is None:
                    row_image = img
                else:
                    row_image = cv2.hconcat([row_image, img])
            if concatenated_image is None:
                concatenated_image = row_image
            else:
                concatenated_image = cv2.vconcat([concatenated_image, row_image])
        return concatenated_image
    else:
        for i, img_rgb in enumerate(img_rgb_list):
            plt.subplot(y_num, x_num, i + 1)
            plt.axis("off")
            plt.imshow(img_rgb)
        canvas = plt.get_current_fig_manager().canvas
        canvas.draw()
        pil_img = PIL.Image.frombytes("RGB", canvas.get_width_height(), canvas.tostring_rgb())
        out_rgb = np.array(pil_img)
        out_bgr = cv2.cvtColor(out_rgb, cv2.COLOR_RGB2BGR)
        plt.close()
        return out_bgr
 def inspect_kernel(self, time):
     gabor_large_list = []
     for scale, kernel_listn in self.gabor_jet:                
         analyzed_images = []
         for kernel in kernel_listn:
             gabor_image = kernel.get_kernel()
             analyzed_images.append(gabor_image)
             gabor_images = cv2.hconcat(analyzed_images)
         gabor_large_list.append(gabor_images)
     image_to_show = cv2.vconcat(gabor_large_list)
     cv2.imshow("Gabor Kernels", image_to_show)
     cv2.waitKey(time)
     return image_to_show
Пример #5
0
def page_handler(hrefs, url=f'https://www.{SITE}.com/img/contact/'):

    if not hrefs: return
    progress = IncrementalBar(SITE, max=MYSQL.rowcount)
    add = ' reference turnaround'

    for href, in hrefs:

        progress.next()
        image_a = np.asarray(
            bytearray(requests.get(f'{url}{href}contacta.jpg').content))
        image_b = np.asarray(
            bytearray(requests.get(f'{url}{href}contactb.jpg').content))
        image = cv2.vconcat(
            [cv2.imdecode(image_a, -1),
             cv2.imdecode(image_b, -1)])

        try:

            for image in make_gif(image):

                tags, rating = generate_tags(general=get_tags(
                    DRIVER, image, True),
                                             custom=True,
                                             rating=True,
                                             exif=False)
                name = get_name(image)
                hash_ = get_hash(image)
                image.replace(name)

                MYSQL.execute(INSERT[3], (name.name, href[:-3], ' '.join(
                    (tags, add)), rating, 1, hash_, None, SITE, None))

            else:
                MYSQL.execute(DELETE[0], (href, ), commit=1)

        except:
            continue
    print()
Пример #6
0
def maps_show2(weight, n):
    # """h, w
    if 1:  # self.in_channels == 512:
        batch_index = 0   #

        i = 0
        j = 0
        s = 1

        h = 8
        w = 8


        while i < h:
            while j < w:
                index = i * w + j
                if index >= n:
                    break
                print(i, j)
                context_mask = weight[index][0]
                map = map_visualization(context_mask)
                if j == 0:
                    hmap = map
                else:
                    hmap = cv.hconcat([hmap, map])
                j += s
            if index >= n:
                break
            j = 0
            if i == 0:
                wholemap = hmap
            else:
                wholemap = cv.vconcat([wholemap, hmap])
            i += s

        # a = np.max(wholemap)
        cv.imshow("batch_attention", wholemap)
        cv.waitKey(0)
Пример #7
0
def decode_image(in_file, out_file):
    # Load the trained model
    decoder = tf.keras.models.load_model('ResNetIdentitySeperateDecoder.h5')
    print(decoder.summary())
    # Load the compressed file
    compressed_file = np.load(in_file + ".npz")
    #print("------------------")
    #print(compressed_file.shape)
    # Load the Input to Decoder from the compressed file
    prediction = decoder.predict(compressed_file['Pred'])

    # Load the Original Image Size from the compressed file
    size_decoded = compressed_file['Size']

    # Assign the Image Size parameters to Length and Width
    width_fragments_count = math.ceil(size_decoded[0]/32)  # Width
    height_fragments_count = math.ceil(size_decoded[1]/32)  # Length

    # Initialize arrays to load the Decoded Image
    vertical_concatenated_image = []
    horizontal_concatenated_image = []
    # Initialize counter to load the Decoded Image
    fragments_count = 0

    # Loop to load and concatenate the image from the Decoder
    for i in range(0, (width_fragments_count * height_fragments_count)):
        image_fragment_array = prediction[i].reshape(32, 32, 3)  # Load 32x32 block from the decoder
        horizontal_concatenated_image.append(image_fragment_array)  # Push 32x32 block into Horizontal Concatenate Array
        fragments_count = fragments_count + 1  # Increment the Counter
        if fragments_count == width_fragments_count:  # Check if Image Width is reached by the Counter
            fragments_count = 0  # Initialize Counter
            im_h = cv2.hconcat(horizontal_concatenated_image)  # Concatenate Horizontally
            vertical_concatenated_image.append(im_h)  # Push Widthx32 blocks into Vertical Concatenate Array
            horizontal_concatenated_image.clear()  # Clear the Horizontal Concatenate Array

    reconstructed_image = cv2.vconcat(vertical_concatenated_image)  # Concatenate Vertically
    reconstructed_image = reconstructed_image[:size_decoded[1], :size_decoded[0],:] # Resize the image to it's original resolution and trim black padding
    plt.imsave(out_file, reconstructed_image)  # Save the Output Image
def callback_depth(data):
    global bridge
    global depth1
    global frame

    try:
        # Incoding data is 16bit, check by errors, convert to range of 256
        depth_image = bridge.imgmsg_to_cv2(data, "16UC1")
        depth_image = (depth_image / 16).astype(np.uint8)

        # Reshape or unsqueeze to add a channel
        shape = np.array(depth_image).shape
        depth_image = np.reshape(depth_image, (shape[0], shape[1], 1))

        # apply RAINBOW, can add JET, WINTER, BONE, OCEAN, SUMMER, SPRING, COOL etc.
        depth_image = cv2.applyColorMap(depth_image, cv2.COLORMAP_RAINBOW)

        # Store in shared variable
        depth1 = depth_image

        path_save = os.path.join(cwd, "depth_images", "{}".format(frame))
        np.save(depth1, path_save)

        # Concatenate with other frames
        concat_full1 = cv2.hconcat([depth1, depth2])
        concat_full1_1 = cv2.hconcat([concat_full1, depth3])
        dummy = np.zeros((480, 640, 3), dtype=np.uint8)
        concat_full2 = cv2.hconcat([color1, color2])
        concat_full2_1 = cv2.hconcat([concat_full2, dummy])
        concat_full_all = cv2.vconcat([concat_full1_1, concat_full2_1])

        # Display
        cv2.imshow("Image window", concat_full_all)
        if cv2.waitKey(3) & 0xFF == ord('q'):
            exit()

    except CvBridgeError, e:
        print e
Пример #9
0
    def read_roi(cls, roi):
        from .screen import screen_grab
        screen = screen_grab()
        roi_im = screen[roi[0]:roi[1], roi[2]:roi[3]]

        # add a white part above and below the name. This makes it easier to find contours.
        h1, w1 = roi_im.shape
        white = 248 * np.ones((5, w1), dtype=np.uint8)
        roi_im = cv2.vconcat([white, roi_im, white])
        h1, w1 = roi_im.shape
        white = 248 * np.ones((h1, 5), dtype=np.uint8)
        roi_im = cv2.hconcat([white, roi_im, white])

        # # for testing
        # cv2.imshow('a', roi_im)
        # cv2.waitKey()

        contours = cls._preprocess_for_contours(
            roi_im)  # find the rectangles around contours
        bboxes = cls._get_bbox(
            contours)  # get a list of bounding boxes around character

        # testing
        # roi_im = cv2.cvtColor(roi_im, cv2.COLOR_GRAY2RGB)
        # for box in bboxes:
        #     cv2.rectangle(roi_im, (box[0], box[1]), (box[2], box[3]), (255,0,0))
        # cv2.imshow('a', roi_im)
        # cv2.waitKey()
        # roi_im = cv2.cvtColor(roi_im, cv2.COLOR_RGB2GRAY)
        ''''splits the roi in images defined by the bounding boxes'''
        images_for_nn = cls._char_images(roi_im, bboxes)

        try:
            characters = cls._read_characters(images_for_nn)
        except ValueError:
            characters = "OCR ERROR unable to read characters"

        return characters
Пример #10
0
def constructMap(top=101239,
                 left=232243,
                 bottom=101240,
                 right=232244,
                 zoomlevel=18,
                 filename=None,
                 url_=None):
    lengthX = right - left + 1
    lengthY = bottom - top + 1
    rows = []
    for y in range(lengthY):
        row = []
        for x in range(lengthX):
            try:
                if url_ != None:
                    result = getTilefromGSI(left + x,
                                            top + y,
                                            zoomlevel,
                                            url_=url_)
                else:
                    result = getTilefromGSI(left + x, top + y, zoomlevel)
                print("\r[{}]".format(
                    ('progress:	' + str(y * lengthX + x + 1) + '/' +
                     str(lengthX * lengthY)),
                    end=""))
                #print('progress:	'+str(y*lengthX + x+1)+'/'+str(lengthX*lengthY))
            except Exception as e:
                raise e
            row.append(result['img'])
        rows.append(row)
    map_ = cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in rows])

    if filename != None:
        cv2.imwrite(filename, map_)
    else:
        cv2.imshow('img', map_)

    return map_
Пример #11
0
def _detect_locale(item_rows: List[numpy.ndarray], locale: str) -> str:
    """Detects the right locale for the given items if required."""
    if locale != 'auto':
        # If locale is already specified, return as is.
        return locale

    # Sample a subset of the rows and convert to Pillow image.
    if len(item_rows) > 300:
        item_rows = random.sample(item_rows, 300)
    image = Image.fromarray(cv2.vconcat(item_rows))

    try:
        osd_data = typing.cast(Dict[str, str], pytesseract.image_to_osd(
            image, output_type=pytesseract.Output.DICT))
    except pytesseract.TesseractError:
        return 'en-us'

    possible_locales = SCRIPT_MAP.get(osd_data['script'])
    assert possible_locales, 'Failed to automatically detect language.'

    # If we can uniquely guess the language from the script, use that.
    if len(possible_locales) == 1:
        logging.info('Detected locale: %s', possible_locales[0])
        return possible_locales[0]

    # Otherwise, run OCR on the first few items and try to find the best matching locale.
    if len(item_rows) > 30:
        item_rows = random.sample(item_rows, 30)
    item_names = run_ocr(item_rows, lang='script/Latin')

    def match_score_func(locale):
        """Computes how many items match for a given locale."""
        item_db = _get_item_db(locale)
        return sum(name in item_db for name in item_names)

    best_locale = max(possible_locales, key=match_score_func)
    logging.info('Detected locale: %s', best_locale)
    return best_locale
Пример #12
0
def randomScale(
        img, dist,
        d):  # original img, distence between two lines, direction for scaling

    h, w, _ = img.shape
    x = w if d == 0 else h

    n = int(x / dist) - 1  # number of lines
    lines = [i * dist for i in range(n + 2)]  # initial idx of each line

    for i in range(1, len(lines) - 1):
        s, m, e = lines[i - 1], lines[i], lines[i +
                                                1]  # start, middle, end Lines
        plusminus = random.randrange(0, 2)  # 0 = plus, 1 = minus
        scale_range = random.randrange(int(dist / 4),
                                       int(dist / 2) +
                                       1)  # number of pixels for scaling
        scale_range *= 1 if plusminus == 0 else -1

        # scaling
        firstB = cv2.resize(img[0:h, s:m], (m - s + scale_range,
                                            h)) if d == 0 else cv2.resize(
                                                img[s:m, 0:w],
                                                (w, m - s + scale_range))
        secondB = cv2.resize(img[0:h, m:e], (e - m - scale_range,
                                             h)) if d == 0 else cv2.resize(
                                                 img[m:e, 0:w],
                                                 (w, e - m - scale_range))

        # applying scaled part to original img
        if d == 0:
            temp = cv2.hconcat([firstB, secondB])
            img[0:h, s:e] = temp
        else:
            temp = cv2.vconcat([firstB, secondB])
            img[s:e, 0:w] = temp

    return img
Пример #13
0
def stitch_image(img_to_write, columns):
    """Stitch list of images into a cohesive image

    Stitchs across rows first.

    Args:
        img_to_write: The list of images to write
        columns: The number of columns
    """
    LOGGER.debug('Stitching images with sizes: %s', str(
        [np.shape(img) for img in img_to_write]))
    num_rows = int(math.ceil(len(img_to_write)/columns))
    rows = [None]*num_rows
    for r_idx in range(num_rows):
        if columns > 1:
            # TODO: make sure that images that are being horizontally
            #       concatenated have the same height
            rows[r_idx] = cv2.hconcat(img_to_write[
                r_idx*columns:
                min(r_idx*columns+columns, len(img_to_write))
            ])
        else:
            rows[r_idx] = img_to_write[r_idx]
    # Need to make sure that every row has the same width.
    # find max width and
    max_w = np.max([row.shape[1] for row in rows])
    for row_idx, _ in enumerate(rows):
        left_padding = int(np.floor((max_w - np.shape(rows[row_idx])[1])/2))
        right_padding = int(np.ceil((max_w - np.shape(rows[row_idx])[1])/2))
        if left_padding > 0 or right_padding > 0:
            rows[row_idx] = cv2.copyMakeBorder(
                rows[row_idx],
                0, 0,
                left_padding, right_padding,
                cv2.BORDER_CONSTANT
            )
    out_img = cv2.vconcat(rows)
    return out_img
Пример #14
0
    def create_combined_image(self, video):
        # get 9 images
        frames = []
        total_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
        for i in range(10, 100, 10):
            video.set(1, floor(i / 100 * total_frames))
            ret, frame = video.read()
            # we got 1600x900 (4:3) to cover - anything wider must be scaled by width
            # (you shouldnt find anything with narrower aspect ratio, so no scaling by height)
            # scaling to 1920 width, qt has really hard time with unusual aspect ratios (perhaps you need to add step)
            resized_frame = self.image_resize(frame, width=640)
            frames.append(resized_frame)

        # horizontal concatenation
        h_img = []
        for i in range(3):
            h_img.append(
                cv2.hconcat(
                    [frames[i * 3 + 0], frames[i * 3 + 1], frames[i * 3 + 2]]))

        # vertical concatenation
        v_img = cv2.vconcat([h_img[0], h_img[1], h_img[2]])
        return v_img
Пример #15
0
def initialize_board(initiative, start, end):
    board = shogi.Board()
    if initiative:
        sign_horizontal = cv2.imread('./images/sign_horizontal.png')
        sign_vertical = cv2.imread('./images/sign_vertical.png')
    else:
        sign_horizontal = cv2.imread('./images/sign_horizontal2.png')
        sign_vertical = cv2.imread('./images/sign_vertical2.png')

    # 1st image
    if start == 0:
        board0 = cv2.hconcat([
            cv2.imread('./images/komadai.png'),
            cv2.vconcat([sign_horizontal,
                         cv2.imread('./images/0.png')]), sign_vertical,
            cv2.imread('./images/komadai.png')
        ])
        cv2.imwrite('./img/' + '/' + '0.png', board0)
        images = [board0]
    else:
        images = []

    return board, sign_horizontal, sign_vertical, images
Пример #16
0
async def hehe(event):
    if not event.reply_to_msg_id:
        await event.edit("Reply to media")
        return
    await event.edit("```Processing...```")
    reply = await event.get_reply_message()
    pathh = await bot.download_media(reply.media, path)
    img = cv2.VideoCapture(pathh)
    ret,frame = img.read()
    flip = cv2.flip(frame, 1)
    up = cv2.rotate(flip, cv2.ROTATE_180)
    cv2.imwrite("cobra.jpg", frame)
    cv2.imwrite("danish.jpg", up) 
    dark = cv2.imread("cobra.jpg")
    cobra = cv2.imread("danish.jpg")
    merge = cv2.vconcat([dark, cobra])
    cv2.imwrite('dark.jpg', merge)
    await event.client.send_file(event.chat_id, "dark.jpg" , reply_to=event.reply_to_msg_id) 
    await event.delete()
    shutil.rmtree(path)
    os.remove("danish.jpg")
    os.remove("dark.jpg")
    os.remove("cobra.jpg")
Пример #17
0
    def on_btn_concat(self, event=None):
        current_filelist_relative = self.currentFilelistRelative()
        images = [cv2.imread(path) for path in current_filelist_relative]
        height, width, __ = images[0].shape

        concat = (cv2.vconcat(images) if width > height else cv2.hconcat(images))

        current_file_dir, current_file = os.path.split(self.current_file.get())
        simple_name, __ = os.path.splitext(current_file)

        newFileName = os.path.normpath(filedialog.asksaveasfilename(
            initialdir=current_file_dir,
            initialfile=f"{simple_name}_concat.jpg"
        ))

        if newFileName == ".":
            return

        logger.debug("concatinating '%s' to '%s' with method '%s'", current_filelist_relative, newFileName, concat)

        cv2.imwrite(newFileName, concat)
        self.duplicates[self.current_hash].append(newFileName)
        self.onHashSelect()
Пример #18
0
def mosaic(image):
    divisions = 4
    order = np.arange(divisions**2)
    height, width, c = image.shape

    random.shuffle(order)

    tiles_list = []
    tile_height = int(height / divisions)
    tile_width = int(width / divisions)

    for tile in order:
        line = tile % divisions
        column = int(tile / divisions)

        tiles_list.append(
            image[(line * tile_height):((line * tile_height) + tile_height),
                  (column * tile_width):((column * tile_width) + tile_width)])

    imgs_lines_gen = chunks(tiles_list, divisions)

    return cv2.vconcat(
        [cv2.hconcat(imgs_line) for imgs_line in imgs_lines_gen])
Пример #19
0
    def render(self):
        """
        Render images in an OpenCV window (has to be called on a regular basis)
        """
        if self._cv_image_actor is not None and self._cv_image_bird is not None:
            im_v = cv2.vconcat([self._cv_image_actor, self._cv_image_bird])
            cv2.circle(im_v, (900, 300), 80, (170, 170, 170), -1)
            text = str(int(round(
                (self._actor.get_velocity().x * 3.6)))) + " kph"

            speed = np.sqrt(self._actor.get_velocity().x**2 +
                            self._actor.get_velocity().y**2)

            text = str(int(round((speed * 3.6)))) + " kph"
            text = ' ' * (7 - len(text)) + text
            im_v = cv2.putText(im_v, text, (830, 310),
                               cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2,
                               cv2.LINE_AA)
            cv2.imshow("", im_v)
            cv2.waitKey(1)

            if self._video_writer:
                self._video.write(im_v)
Пример #20
0
def save_output(image_name, pred, d_dir):

    predict = pred
    predict = predict.squeeze()
    predict_np = predict.cpu().data.numpy()
    predict_np = predict_np*255

    mask = cv2.cvtColor(predict_np, cv2.COLOR_GRAY2BGR).astype(np.uint8)
    orig_image = cv2.imread(image_name)
    mask = cv2.resize(
        mask, (orig_image.shape[1], orig_image.shape[0]), interpolation=cv2.INTER_LINEAR)

    masked_image = cv2.bitwise_and(orig_image, mask)
    masked_white_bg = cv2.bitwise_or(orig_image, 255-mask)

    img_tile = [[orig_image, mask],
                [masked_image, masked_white_bg]]

    img_tile = cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in img_tile])

    # pb_np = np.array(imo)
    img_name = image_name.split("/")[-1].rsplit(".", 1)[0]
    cv2.imwrite(d_dir+img_name+'.png', img_tile)
Пример #21
0
def getPathWay():
    try:
        shutil.rmtree('path')
        os.remove('velody.png')
    except:
        pass
    try:
        os.mkdir('path')
    except:
        pass


 

    log = request.json['log']
    rv.draw_reaction2(log)
    
    img1 = cv.imread('path/'+os.listdir('path')[0])
    for filename in os.listdir('path')[1:]:
        img2 = cv.imread('path/'+filename)
        img1 = cv.vconcat([img1, img2])
    cv.imwrite('tmp/velody.png', img1)
    return {'log':log}
Пример #22
0
    def concat_VH(imgs: list, ver_num: int, hor_num: int) -> np.ndarray:
        """画像を縦横指定結合する

        Args:
            imgs (list[numpy.ndarray]): 分割画像リスト
            ver_num (int): 縦分割数
            hor_num (int): 横分割数

        Returns:
            numpy.ndarray: 結合画像
        """
        assert ver_num >= 1
        assert hor_num >= 1
        if hor_num == 1 and ver_num == 1:
            return imgs[0]

        rows = []
        for idx in range(ver_num):
            rows.append(
                cv2.hconcat(imgs[hor_num * idx:hor_num * idx + hor_num]))
        concat_img = cv2.vconcat(rows)

        return concat_img
Пример #23
0
def getSquareMatrixOfImages(imgList,
                            path2pic=None
                            ):  #can specify path here too, default is None
    #ie if its not an integer or empty it then defaults to nearest square larger than len(imgList)
    import math
    numberOfRows = math.ceil(math.sqrt(len(imgList)))
    numImagesToAppend = numberOfRows**2 - len(
        imgList)  #ie square matrix needs to be size of next perfect square
    for i in range(numImagesToAppend):
        imgList.append(makePlaceholderImage(path2pic))

    rowsOfImages = [None] * numberOfRows
    for k in range(numberOfRows):  #iter over list [0,1,2,3,..., numberOfRows]
        rowsOfImages[k] = cv2.hconcat(imgList[k * numberOfRows:(k + 1) *
                                              numberOfRows])

    #Now have a list of lists that contains all rows of images. So rowsOfImages[k] should all be horizontal row of 5 images
    #Recall we resize them all to same size, so in the 300x300 resizing we now have a width=1500, height=300 set of 5 images
    #Now we just vertically concatenate this to get a square image:

    finalSquareMatrixOfImages = cv2.vconcat(rowsOfImages)

    return finalSquareMatrixOfImages
def put_lane_information(img, polynomial_fit_img, curvatures, position):
    info_img = np.copy(img)
    org_b_img = birds_eye_view(img)
    destination_point = np.float32([[MARGIN, 0], [img.shape[1] - MARGIN, 0],
                                    [MARGIN, img.shape[0]],
                                    [img.shape[1] - MARGIN, img.shape[0]]])
    inverse_t_mtx = cv2.getPerspectiveTransform(destination_point,
                                                SOURCE_POINT)
    original_view_line = cv2.warpPerspective(polynomial_fit_img, inverse_t_mtx,
                                             (img.shape[1], img.shape[0]))
    info_img = cv2.addWeighted(info_img, 1, original_view_line, 0.5, 0)
    left_c = round(curvatures[0], 2)
    cv2.putText(info_img, 'Left Lane Curve', (200, 50),
                cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 0), 4)
    cv2.putText(info_img,
                str(left_c) + ' m', (200, 100), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                (255, 255, 0), 4)
    right_c = round(curvatures[1], 2)
    cv2.putText(info_img, 'Right Lane Curve', (880, 50),
                cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 0), 4)
    cv2.putText(info_img,
                str(right_c) + ' m', (880, 100), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                (255, 255, 0), 4)
    position = round(position, 2)
    cv2.putText(info_img, 'Center Offset m', (550, 50),
                cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 0), 4)
    cv2.putText(info_img,
                str(position) + ' m', (550, 100), cv2.FONT_HERSHEY_SIMPLEX,
                1.0, (255, 255, 0), 4)
    org_b_img = cv2.resize(org_b_img,
                           (org_b_img.shape[1] // 2, org_b_img.shape[0] // 2))
    polynomial_fit_img = cv2.resize(
        polynomial_fit_img,
        (polynomial_fit_img.shape[1] // 2, polynomial_fit_img.shape[0] // 2))
    debug_info_img = cv2.hconcat([org_b_img, polynomial_fit_img])
    info_img = cv2.vconcat([debug_info_img, info_img])
    return info_img
Пример #25
0
    def concatinate_emojis_and_save_image(self, nearest_emoji_name_list,
                                          similarity, save_dir,
                                          target_file_name):
        '''
		concatinate emojis
		'''
        print(len(nearest_emoji_name_list), self.raws)
        print(len(nearest_emoji_name_list[0]), self.column)

        vertical_imgs = []

        for h in range(self.raws):
            horison_imgs = []

            for w in range(self.column):
                img = cv2.imread('{}/{}'.format(self.whiten_emoji_path,
                                                nearest_emoji_name_list[h][w]))
                horison_imgs.append(img)

            im_v = cv2.hconcat(horison_imgs)

            vertical_imgs.append(im_v)

        converted_img = cv2.vconcat(vertical_imgs)
        target_name, target_ext = os.path.splitext(target_file_name)

        x = self.convolution_resolution * self.column
        y = self.convolution_resolution * self.raws

        resized_converted_img = cv2.resize(converted_img,
                                           (1080, int(1080 * y / x)))
        _conversion = "{0:04d}".format(int(self.conversion * 10000))
        cv2.imwrite(
            '{}/{}_step_{}_sim_{}_c_{}{}'.format(save_dir, target_name,
                                                 self.hash_step, similarity,
                                                 _conversion, target_ext),
            resized_converted_img)
Пример #26
0
def MakeCollage(framechange_array, frames_jpg_path, collage_path):
    # creates a collage of the shots in a video, the collage shows shot # and frame #
    # imporant - the top.jpg must be in the folder path, and it has to be exact width of 2240px
    # take the frame one forward of the shot change
    offset = 1
    i = 0
    # start with a blank image that is the same width (2240px) of 7 frames
    im_v = cv2.imread('top.jpg')
    # make a collage that is 7 frames wide
    for x in range (0, len(framechange_array)-7, 7):
        im_a = cv2.imread(frames_jpg_path+'frame'+str(framechange_array[x]+offset)+'.jpg')
        im_b = cv2.imread(frames_jpg_path+'frame'+str(framechange_array[x+1]+offset)+'.jpg')
        im_c = cv2.imread(frames_jpg_path+'frame'+str(framechange_array[x+2]+offset)+'.jpg')
        im_d = cv2.imread(frames_jpg_path+'frame'+str(framechange_array[x+3]+offset)+'.jpg')
        im_e = cv2.imread(frames_jpg_path+'frame'+str(framechange_array[x+4]+offset)+'.jpg')
        im_f = cv2.imread(frames_jpg_path+'frame'+str(framechange_array[x+5]+offset)+'.jpg')
        im_g = cv2.imread(frames_jpg_path+'frame'+str(framechange_array[x+6]+offset)+'.jpg')
        # add the shot numbers to the collage images
        cv2.putText(im_a, str(x), (10,50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 4)
        cv2.putText(im_b, str(x+1), (10,50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 4)
        cv2.putText(im_c, str(x+2), (10,50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 4)
        cv2.putText(im_d, str(x+3), (10,50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 4)
        cv2.putText(im_e, str(x+4), (10,50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 4)
        cv2.putText(im_f, str(x+5), (10,50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 4)
        cv2.putText(im_g, str(x+6), (10,50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 4)
        # add the frame numbers to the collage images
        cv2.putText(im_a, str(framechange_array[x]), (120,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 4)
        cv2.putText(im_b, str(framechange_array[x+1]), (120,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 4)
        cv2.putText(im_c, str(framechange_array[x+2]), (120,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 4)
        cv2.putText(im_d, str(framechange_array[x+3]), (120,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 4)
        cv2.putText(im_e, str(framechange_array[x+4]), (120,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 4)
        cv2.putText(im_f, str(framechange_array[x+5]), (120,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 4)
        cv2.putText(im_g, str(framechange_array[x+6]), (120,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 4)
        # build the collage
        im_h = cv2.hconcat([im_a, im_b, im_c, im_d, im_e, im_f, im_g])
        im_v = cv2.vconcat([im_v, im_h])
    cv2.imwrite(collage_path, im_v)
Пример #27
0
    def _Imagesave(self, output, saveQueue):
        if self.video:
            videoName = '{}/{}.avi'.format(output, self.videoName)
            fourcc = cv2.VideoWriter_fourcc(*'MJPG')
            vout = cv2.VideoWriter(videoName, fourcc, self.fps, self.videoSize)
        time.sleep(5)
        while True:
            if saveQueue.empty() and self.finish_signal:
                break
            if not saveQueue.empty():
                try:
                    image_name, image_source, image_pred = saveQueue.get()
                    pred_lbl = np.array(_mask[image_pred[0]], np.uint8)
                    blend = np.bitwise_or(image_source, pred_lbl)
                    show_image = cv2.vconcat([image_source, pred_lbl, blend])
                    if self.video:
                        video_image = cv2.resize(show_image, self.videoSize)

                    if self.video and self.image:
                        vout.write(video_image)
                        cv2.imwrite("{}/{}".format(output, image_name),
                                    show_image)
                    elif self.video:
                        vout.write(video_image)
                    elif self.image:
                        cv2.imwrite("{}/{}".format(output, image_name),
                                    show_image)
                    else:
                        pass
                except:
                    print("done")
                    assert False
                    break
        if self.video:
            vout.release()
        self.finish_signal = 1
        print("save done")
Пример #28
0
async def ultd(event):
    ureply = await event.get_reply_message()
    xx = await eor(event, "`...`")
    if not (ureply and (ureply.media)):
        await xx.edit("`Reply to any media`")
        return
    ultt = await ureply.download_media()
    if ultt.endswith(".tgs"):
        await xx.edit("`Ooo Animated Sticker 👀...`")
        cmd = ["lottie_convert.py", ultt, "ult.png"]
        file = "ult.png"
        process = await asyncio.create_subprocess_exec(
            *cmd,
            stdout=asyncio.subprocess.PIPE,
            stderr=asyncio.subprocess.PIPE)
        stdout, stderr = await process.communicate()
        stderr.decode().strip()
        stdout.decode().strip()
    else:
        await xx.edit("`Processing...`")
        img = cv2.VideoCapture(ultt)
        heh, lol = img.read()
        cv2.imwrite("ult.png", lol)
        file = "ult.png"
    ult = cv2.imread(file)
    trn = cv2.flip(ult, 1)
    ish = cv2.rotate(trn, cv2.ROTATE_180)
    ultroid = cv2.vconcat([ult, ish])
    cv2.imwrite("ult.jpg", ultroid)
    await event.client.send_file(event.chat_id,
                                 "ult.jpg",
                                 force_document=False,
                                 reply_to=event.reply_to_msg_id)
    await xx.delete()
    os.remove("ult.png")
    os.remove("ult.jpg")
    os.remove(ultt)
Пример #29
0
    def concat_first_four_slices_and_resize(self, width, height):
        logging.debug('start concatenating image, joint type: %s',
                      self.defect_name)
        if not self.is_square:
            logging.debug('joint roi is rectangular, concatenation canceled')
            return None, None

        #  check whether 1st 4 slices are available
        if 0 in self.slice_dict.keys() and 1 in self.slice_dict.keys(
        ) and 2 in self.slice_dict.keys() and 3 in self.slice_dict.keys():
            slice_0 = cv2.imread(self.slice_dict[0])
            slice_1 = cv2.imread(self.slice_dict[1])
            slice_2 = cv2.imread(self.slice_dict[2])
            slice_3 = cv2.imread(self.slice_dict[3])

            slice_0_roi = slice_0[self.y_min:self.y_max, self.x_min:self.x_max]
            slice_1_roi = slice_1[self.y_min:self.y_max, self.x_min:self.x_max]
            slice_2_roi = slice_2[self.y_min:self.y_max, self.x_min:self.x_max]
            slice_3_roi = slice_3[self.y_min:self.y_max, self.x_min:self.x_max]

            im_h1 = cv2.hconcat([slice_0_roi, slice_1_roi])
            im_h2 = cv2.hconcat([slice_2_roi, slice_3_roi])

            im_concat = cv2.vconcat([im_h1, im_h2])
            if im_concat is None:
                logging.error('im_concat is none, skipping concatenation')
                return None, None

            resized_image = cv2.resize(im_concat, (width, height),
                                       interpolation=cv2.INTER_AREA)
            logging.debug('First 4 slices available, concatenation done')
            return resized_image, self.defect_name

        else:
            logging.error(
                'First 4 slices not available, canceling concatenation')
            return None, None
Пример #30
0
def join_vids(path_in1, path_in2, path_out, horiz=True, padding=False):

    dir_out = os.path.dirname(path_out)

    print(path_out)
    if os.path.exists(path_out):
        os.remove(path_out)

    video_clip1 = cv2.VideoCapture(path_in1)
    video_clip2 = cv2.VideoCapture(path_in2)

    fps = int(video_clip1.get(cv2.CAP_PROP_FPS))

    i = 0
    while (video_clip1.isOpened()):
        #print(i)
        ret1, img1 = video_clip1.read()
        ret2, img2 = video_clip2.read()
        if (not ret1) | (not ret2):
            break

        if padding:
            # to avoid a thicker line in the middle
            img2[:, 0:-1] = img2[:, 1:]

        output = cv2.hconcat([img1, img2]) if horiz else cv2.vconcat(
            [img1, img2])
        cv2.imwrite(os.path.join(dir_out, "im%04d.png" % (i)), output)
        i += 1


#    cv2.destroyAllWindows()
    os.system("ffmpeg -r %d -i images/im%%04d.png %s -r %d %s" %
              (fps, FFMPEGOPTIONS, fps, path_out))
    files = glob.glob(os.path.join(dir_out, "*.png"))
    for f in files:
        os.remove(f)
Пример #31
0
def video_frame(ncams):
    """
    Concat different frames to make a displaying image
    :return: image to be displayed on UI
    """
    try:
        global Frames_list
        if len(Frames_list) % 2 == 1:
            img = np.zeros((720, 1280, 3))
            img[img == 0] = 255
            img = img.astype('uint8')
            Frames_list.append(img)
        final_img = cv2.hconcat([Frames_list[0], Frames_list[1]])
        i = 2
        while i < len(Frames_list):
            image = cv2.hconcat([Frames_list[i], Frames_list[i + 1]])
            final_img = cv2.vconcat([final_img, image])
            i += 2
        final_img = cv2.cvtColor(final_img, cv2.COLOR_BGR2RGB)
        screensize = int((ncams + 1) / 2)
        #final_img = cv2.resize(final_img, (screensize * 640, screensize * 360))
        return final_img
    except:
        pass
Пример #32
0
def Build_from_images(image, scale, dataset):
    x,y = math.floor(image.shape[1]/scale), math.floor(image.shape[0]/scale)

    result = None
    row= None
    for i in range(y):
        for j in range(x):
            img = image[i*scale:(i+1)*scale, j*scale:(j+1)*scale]
            if type(row) == type(None):
                row = cv.resize(cv.imread(dataset+find_color(assign_color(img), dataset)), (scale,scale))
            else:
                row2 = cv.hconcat([row, cv.resize(cv.imread(dataset+find_color(assign_color(img), dataset)), (scale,scale))])
                row = row2
        if not type(row) == type(None):
            if type(result)==type(None):
                result = row
                row=None
            else:
                result2 = cv.vconcat([result, row])
                result= result2
                row = None

            
    return result
Пример #33
0
def main(video_path, tau=15, threshold=50, auto_play=False):
    categories = ['running', 'jogging', 'walking',
                'handclapping', 'handwaving', 'boxing']

    cap = cv2.VideoCapture(video_path)
    ret, frame = cap.read()
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    tracked_frame = frame.copy()
    mhi = np.zeros(frame.shape)
    mhi_norm = mhi.copy().astype(np.uint8)

    # Start go through the video
    while ret:
        cv2.namedWindow('demo', cv2.WINDOW_NORMAL)
        cv2.resizeWindow('demo', 321, 481)
        enlarge_f = cv2.resize(tracked_frame, (320, 240), interpolation=cv2.INTER_NEAREST)
        # enlarge_f = cv2.resize(frame, (320, 240), interpolation=cv2.INTER_NEAREST)
        enlarge_m = cv2.resize(mhi_norm, (320, 240), interpolation=cv2.INTER_NEAREST)
        combine = cv2.vconcat((enlarge_f, enlarge_m))
        cv2.imshow('demo', combine)
        cv2.waitKey(auto_play)
        ret, frame_next = cap.read()
        if ret:
            frame_next = cv2.cvtColor(frame_next, cv2.COLOR_BGR2GRAY)
            diff = compute_diff(frame, frame_next)
            mhi[diff > threshold] = tau
            mhi[(diff <= threshold)*(mhi > 0)] -= 1
            mhi_norm = (mhi.copy() * 255 / tau).astype(np.uint8)
            frame = frame_next.copy()

            category_ind, prob = mhi_matching(classifier, mhi_norm)
            # if prob > 0.1:
            action = categories[category_ind]
            tracked_frame = draw_contour(mhi, frame.copy(), action, prob)
        else:
            cv2.destroyAllWindows()
    def concat_first_four_slices_2d(self):
        logging.debug('start concatenating image, joint type: %s', self.defect_name)

        if not self.is_square():
            logging.debug('joint roi is rectangular, canceling concatenation')
            return None, None

        if len(self.slice_dict.keys()) < 4:
            logging.error('Number of slice < 4, canceling concatenation')
            return None, None

        if len(self.slice_dict.keys()) > 4:
            logging.error('Number of slice > 4, canceling concatenation')
            return None, None

        slices_list = [None, None, None, None]
        for slice_id in range(4):
            #  check whether 1st 4 slices are available
            if slice_id not in self.slice_dict.keys():
                logging.error('First 4 slices not available, canceling concatenation')
                return None, None

            img = cv2.imread(self.slice_dict[slice_id])
            img_roi = img[self.y_min:self.y_max, self.x_min:self.x_max]
            slices_list[slice_id] = img_roi

        im_h1 = cv2.hconcat(slices_list[0:2])
        im_h2 = cv2.hconcat(slices_list[2:4])
        im_concat = cv2.vconcat([im_h1, im_h2])

        if im_concat is None:
            logging.error('Error occured in opencv ROI concat, is none, skipping concatenation')
            return None, None
        resized_image = cv2.resize(im_concat, (128, 128), interpolation=cv2.INTER_AREA)
        logging.debug('First 4 slices available, concatenation done')
        return resized_image, self.defect_name
Пример #35
0
    def _get_multi_frame_image(self, video_file, frame_pos):
        return video_file.get_frame_by_frame_pos(frame_pos)

        himg = None
        vimg = None
        for i in range(4):
            frame = video_file.get_frame_by_frame_pos(frame_pos-3+i)
            if frame is None:
                resized_img = np.tile(np.uint8([127]), (227, 227, 1))
            else:
                resized_img = cv2.resize(frame, (227, 227))

            if himg is None:
                himg = resized_img
            else:
                himg = cv2.hconcat([himg, resized_img])
                if vimg is None:
                    vimg = himg.copy()
                else:
                    himg_copy = himg.copy()
                    vimg = cv2.vconcat([vimg, himg_copy])
                himg = None

        return vimg
Пример #36
0
    # 0..1 の範囲に正規化
    normalized_val = 2**(4 * img.dtype.num) - 1
    img             = img/normalized_val

    # 画像を半分にリサイズ
    image_width  = img.shape[1]//2
    image_height = img.shape[0]//2
    img_resize_half = cv2.resize(img, (image_width, image_height))

    # 半分、64未満、940以上の画像を抽出
    img_half_level  = gamma_func(img_resize_half)
    img_black_area  = view_limited_black(img_resize_half)
    img_super_white = view_superwhite(img_resize_half)
    
    # 各画像を結合して1つの画像にする
    img_vcat1 = cv2.vconcat([img_resize_half, img_half_level])
    img_vcat2 = cv2.vconcat([img_black_area, img_super_white])
    img_hcat  = cv2.hconcat([img_vcat1, img_vcat2])

    # 画像のプレビュー
    cv2.imshow('bbb.tif', img_hcat)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    # 出力用に 0..1 → 0..65535 の変換を実施
    out_img = img_hcat * normalized_val_uint16
    out_img = np.uint16(out_img)

    # 保存
    cv2.imwrite('out.tiff', out_img)
    
def filter_image(frame):
    blurred = cv2.GaussianBlur(frame,(9,9),0)

    hsv= cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

    lower_orange=np.array([0,170,0])
    upper_orange=np.array([15,255,255])
    mask = cv2.inRange(hsv,lower_orange,upper_orange)

    lower_white = np.array([0, 0, 210])
    upper_white = np.array([255, 210, 255])
    white_mask = cv2.inRange(hsv, lower_white, upper_white)
    white_edges = cv2.Canny(white_mask, 10, 30, 3)

    hsvMask = mask.copy()
 
    # white_points = cv2.findNonZero(mask)
    # edges = cv2.Canny(mask, 100, 200, 3)
    # contours,hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    rectangle_list = []
    rectangle_map = {}

    # for cnt in contours:
    #     x,y,w,h = cv2.boundingRect(cnt)
    #     cv2.rectangle(res,(x,y),(x+w,y+h),(0,0,0),-1)

    # mask = cv2.cvtColor(res, cv2.COLOR_RGB2GRAY)

    # res[mask = 255] = [0, 255, 0]
    
    structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (70, 70))
    structuringElement2 = cv2.getStructuringElement(cv2.MORPH_RECT, (50, 50))

    lineElement = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 90))
    mask = cv2.morphologyEx( mask, cv2.MORPH_CLOSE, lineElement)
    white_edges = cv2.bitwise_and(white_edges, white_edges, mask=~mask)
    
    laneLines= cv2.HoughLinesP(white_edges, rho=1, theta=np.pi/360, threshold=100, minLineLength=60, maxLineGap=25)

    # lines = cv2.HoughLinesP(mask, rho=1, theta=np.pi / 180, threshold=100, minLineLength=150, maxLineGap=100)

    # mask = cv2.morphologyEx( mask, cv2.MORPH_CLOSE, structuringElement )

    res = cv2.bitwise_and(frame, frame, mask=~mask)
    
    if laneLines is not None:
        for l in laneLines[0]:
            cv2.line(mask, (l[0], l[1]), (l[2], l[3]), (255, 255, 255), 4)

    
	# NEARBY_RANGE = 100
    # LENGTH = len(contours)
    # status = np.zeros((LENGTH,1))

    # for i,cnt1 in enumerate(contours):
    #     x = i    
    #     if i != LENGTH-1:
    #         for j,cnt2 in enumerate(contours[i+1:]):
    #             x = x+1
    #             dist = find_if_close(cnt1,cnt2)
    #             if dist == True:
    #                 val = min(status[i],status[x])
    #                 status[x] = status[i] = val
    #             else:
    #                 if status[x]==status[i]:
    #                     status[x] = i+1

    # unified = []
    # maximum = int(status.max())+1
    # for i in xrange(maximum):
    #     pos = np.where(status==i)[0]
    #     if pos.size != 0:
    #         cont = np.vstack(contours[i] for i in pos)
    #         hull = cv2.convexHull(cont)
    #         unified.append(hull)

    # cv2.drawContours(img,unified,-1,(0,255,0),2)
    # cv2.drawContours(thresh,unified,-1,255,-1)


        # for key, val in rectangle_map:
        #     if (abs((x + w) - (key[0] + key[2])) < NEARBY_RANGE and abs((y+h) - (key[1] + key[3])) < NEARBY_RANGE):
        #         smallest_x = x if x < key[0] else key[0]
        #         smallest_y = y if y < key[1] else key[1]
        #         largest_x = (x + w) if (x + w) > (key[0] + key[2]) else (key[0] + key[2])
        #         largest_y = (y + h) if (y + h) > (key[1] + key[3]) else (key[1] + key[3])
        #         key = (smallest_x, smallest_y, largest_x, largest_y)

        #         key = (x < key[0] ? x : key[0], y < key[1] ? y : key[1]) 
        #     break
        #rectangle_list.append((x,y,w,h))

   # for r in rectangle_list:


    # if white_points is not None:
        # x,y,w,h = cv2.boundingRect(white_points)
        # rect = cv2.minAreaRect(white_points)
        # box = cv2.cv.BoxPoints(rect)
        # box = np.int0(box)
        # (x,y),radius = cv2.minEnclosingCircle(white_points)
        # center = (int(x),int(y))
        # radius = int(radius)
        #cv2.circle(res,center,radius,(0,255,0),5)
        # cv2.drawContours(res,[box],0,(0,0,255),2)
        # cv2.rectangle(res,(x,y),(x+w,y+h),(255,255,255),5)
        # cv2.rectangle(mask,(x,y),(x+w,y+h),(255,255,255),5)
    mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
    hsvMask = cv2.cvtColor(white_edges, cv2.COLOR_GRAY2BGR)

    side_comp = cv2.vconcat([res, mask, hsvMask, frame])
    height, width, _ = side_comp.shape
    side_comp_res = cv2.resize(side_comp, (height / 2, width / 2))
 

    # res2, contours, hierarchy = cv2.findContours()

    return side_comp_res
Пример #38
0
    def draw_field(self):
        # フィールドの描画
        img_src = self.field.draw()
        # プレーヤーの描画
        self.player.draw(img_src)
        
        # 移動量
        # x軸方向に平行移動させたい距離
        #mx = self.player.x - SCR_RECT.centerx
        dist = math.sqrt((float(self.startx) - self.player.x)**2 + (float(self.starty) - self.player.y)**2)
        #print("sx:%f" %self.startx + " sy:%f" %self.starty + " x:%f" %self.player.x + " y:%f" %self.player.y + " dist:%f" %dist)
        # y軸方向に平行移動させたい距離
        #my = self.player.y - SCR_RECT.centery

        mx = float(self.startx) + dist - float(SCR_RECT.centerx)
        my = float(self.starty) - float(SCR_RECT.centery)
        #print("mx:%f" %mx + " my:%f" %my)

        x = self.player.x - float(self.startx)
        y = float(self.starty) - self.player.y
        rad = math.atan2(y, x)
        #print("radian:%f" %rad)
        angle = rad * 180.0 / math.pi
        #print("angle:%f" %angle)
        
        # 回転変換行列の算出
        rotation_matrix = cv2.getRotationMatrix2D((self.startx,self.starty) , -angle, 1.0)
        # アフィン変換
        img_rot = cv2.warpAffine(img_src, rotation_matrix, SCR_RECT.size, flags=cv2.INTER_CUBIC)
        scipy.misc.imsave('rotation.png', img_rot)
        
        #print("player:x:%03d" %self.player.x + ":y:%03d" %self.player.y)
        #print("start:x:%03d" %self.startx + ":y:%03d" %self.starty)
        #print("moved:x:%03d" %mx + ":y:%03d" %my)
        #rad = math.atan((self.player.y - SCR_RECT.centery) / (self.player.x - SCR_RECT.centerx))
        #print("angle:%05f" %angle)
                        
        # 回転変換行列の定義
        matrix = [
                [np.cos(0),  -1 * np.sin(0), -mx],
                [np.sin(0),   np.cos(0), -my]
            ]
        # アフィン変換
        affine_matrix = np.float32(matrix)
        img_afn = cv2.warpAffine(img_rot, affine_matrix, SCR_RECT.size, flags=cv2.INTER_LINEAR)
                
        #print(img_src.shape)
        #print(img_afn.shape)
        
        # 90度回転
        img_src = np.rot90(img_src)
        img_afn = np.rot90(img_afn)
        # 水平方向に反転
        img_src = np.flipud(img_src)
        img_afn = np.flipud(img_afn)
        # BGRからRGBに変換
        img_src = cv2.cvtColor(img_src,cv2.COLOR_BGR2RGB)
        img_afn = cv2.cvtColor(img_afn,cv2.COLOR_BGR2RGB)
                
        self.img_right[55:55+ST_RECT.width, 42:42+ST_RECT.height] = img_afn[55:55+ST_RECT.width, 42:42+ST_RECT.height]
        self.img_state = img_afn[55:55+ST_RECT.width, 42:42+ST_RECT.height]

        # 横に結合
        img_screen = cv2.vconcat([img_src, self.img_right])
        #print(img_screen.shape)
        # 中央に分割線
        cv2.line(img_screen,(0,SCR_RECT.width),(SCR_RECT.height,SCR_RECT.width),(255,0,0),2)

        #scipy.misc.imsave('screen.png', img_screen)
        pygame.surfarray.blit_array(self.screen,img_screen)
        pygame.display.update()

        self.img_state = cv2.cvtColor(self.img_state, cv2.COLOR_RGB2GRAY)
#                     print "shape", gabor_image.shape                     
                     gabor_image = cv2.resize(gabor_image, dsize = (100,100) )       
                     print gabor_image.max()                             
                     analyzed_images.append( gabor_image )
                     kernels.append( kernel.get_kernel() )
                     
                 former_image = cv2.hconcat(  analyzed_images )
                 print former_image.shape

                 large_image_list.append( former_image )
                 
                 former_kernel = cv2.hconcat( kernels )
                 print former_kernel.shape
                 kernel_large_list.append( former_kernel )


             combinened_image = cv2.vconcat( large_image_list )
             combinened_kernels = cv2.vconcat( kernel_large_list )
             
#             combinened_image = np.array( combinened_image, np.uint8)
#             cv2.normalize( combinened_image, combinened_image,0,255, cv2.NORM_MINMAX) 
             gabor_image = np.array(gabor_image, dtype=np.uint8)
             cv2.imshow('gabor_image', combinened_image*2.+0.5)

             cv2.imshow('gabor_kernels', combinened_kernels)
             cv2.imwrite(input_image[:-4] + "_gabor.png", (combinened_image*2.+0.5) *100)
             cv2.imwrite("gabor_kernels.png", np.array(combinened_kernels*255, np.uint8))

             k = cv2.waitKey(0)
             break
        cv2.destroyAllWindows()
# -*- coding: utf-8 -*-
import cv2

img1 = cv2.imread('image-1.jpg')
img2 = cv2.imread('image-2.jpg')
img3 = cv2.imread('image-3.jpg')
img4 = cv2.imread('image-4.jpg')

img5 = cv2.vconcat([img1, img2])
img6 = cv2.vconcat([img3, img4])
img7 = cv2.hconcat([img5, img6])
cv2.imwrite('output.jpg', img7)
def concat_tile(im_list_2d):
    return cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in im_list_2d])