Exemplo n.º 1
0
 def determineIslandShape(self, subIslandImg):
     ml = ANN()
     sampleVec = []
     sample = subIslandImg.copy()
     sample *= 255
     sample = ml.prepareImage(sample,ml.getSize())
     sampleVec.append(sample)
 
     results = ml.runANN2(sampleVec)
     results2 = ml.runANN2b(sampleVec,3) #Fused-Donut NN3
     cv2.hconcat(results,results2,results)
     self.NN_Results = results
     max_elem = max(results)
     labelNum = results.index(max_elem)
     thresh = 0.0
     if(max_elem<thresh):
         labelNum = ml.getShapeIndex2("Default")
     shapeName = ml.getShapeName2(labelNum)
     if(labelNum==0 or labelNum==1):
         results = ml.runANN2b(sampleVec,labelNum)
         if(results[0,0]>0.0):
             shapeName = "Comp-" + shapeName;
         else:
             shapeName = "Incomp-" + shapeName
     labelNum = ml.getShapeIndex(shapeName)
     self.NN_Score = max_elem
     self.islShape = labelNum
     self.islShapeName = shapeName
     self.nn_prepared_img = sample
    def createImage(self, gabor_response):
        large_image_list = []
        for image_list in gabor_response:
            large_image_list.append(cv2.hconcat( image_list ))
#            print cv2.hconcat( image_list ).shape
        combinened_image = cv2.vconcat( large_image_list )
        return combinened_image                                 
Exemplo n.º 3
0
def main():
    names = ['tap', 'right', 'left', 'long_start', 'long_end']
    types = ['co', 'cu', 'pa', 'all']
    originals = [[cv2.imread('./templates/' + name + '_' + type + '.png') for name in names] for type in types]
    gray_scaled_imgs = [[convert_color_to_gray(img) for img in original] for original in originals]
    binary_imgs = [[convert_gray_to_binary(img) for img in grays] for grays in gray_scaled_imgs]

    def get_concat_img(src):
        output = None
        for v in src:
            tmp = cv2.vconcat(v)
            if output is None:
                output = tmp
            else:
                output = cv2.hconcat([output, tmp])
        return output

    # output_img = cv2.hconcat([get_concat_img(original), get_concat_img(filterd)])
    output_img = cv2.hconcat([get_concat_img(gray_scaled_imgs), get_concat_img(binary_imgs)])
    # output_img = get_concat_img(binary_imgs)
    # output_image = cv2.vconcat([convert_color_to_binary(image) for image in template_images_data.values()])

    # note_scraper = NoteScraper()
    cv2.imshow('capture', output_img)
    cv2.waitKey(0)
Exemplo n.º 4
0
 def get_concat_img(src):
     output = None
     for v in src:
         tmp = cv2.vconcat(v)
         if output is None:
             output = tmp
         else:
             output = cv2.hconcat([output, tmp])
     return output
Exemplo n.º 5
0
def KBT_func(image: np.ndarray)-> np.ndarray:
    w, h, _ = image.shape
    if w != h:
        v = max((w - h) / 2, 0)
        h = max((h - w) / 2, 0)
        padding = (
            (int(round(h)), int(ceil(h))),
            (int(round(v)), int(ceil(v))),
            (0, 0)
        )
        #print(padding)
        image = np.pad(image, padding, mode='constant')
    #print(image.shape)
    r2, _, p = image.shape
    w = int(pi * r2 / 10)
    h = int(r2 / 2)
    ANGLE = 36

    # おうぎ形で画像を切り抜くための座標群
    coordinates = []
    for r in range(h):
        length = int(2 * pi * r * ANGLE / 360)
        if length == 0:
            continue
        angle = 0
        row = []
        while angle < ANGLE:
            angle += ANGLE / length
            row.append(
                (
                    round(cos((252.0 + angle) / 360 * pi * 2) * r) + int(w / 2),
                    -round(sin((252.0 + angle) / 360 * pi * 2) * r)
                )
            )
        coordinates.append(row)

    # 幅の中央値を取得する
    width = int(np.median([len(m) for m in coordinates]))

    # 切り抜く、長方形に補正しつなげる、画像を回転する。のループ
    output = None
    for angle in range(0, 360, ANGLE):
        M = cv2.getRotationMatrix2D((r2 / 2, r2 / 2), angle, 1)
        rotate = cv2.warpAffine(image, M, (r2, r2))
        crop = rotate[h:, int(r2 / 2 - w / 2): int(r2 / 2 - w / 2 + w)]
        piece = []
        for m in coordinates:
            row = np.array([crop[y, x] for x, y in m])
            row = cv2.resize(row, (3, width))
            piece.append(row)
        piece = np.array(piece)
        if output is None:
            output = piece
        else:
            output = cv2.hconcat([piece, output])

    return output
def gammaCorrection():
    ## [changing-contrast-brightness-gamma-correction]
    lookUpTable = np.empty((1,256), np.uint8)
    for i in range(256):
        lookUpTable[0,i] = np.clip(pow(i / 255.0, gamma) * 255.0, 0, 255)

    res = cv.LUT(img_original, lookUpTable)
    ## [changing-contrast-brightness-gamma-correction]

    img_gamma_corrected = cv.hconcat([img_original, res]);
    cv.imshow("Gamma correction", img_gamma_corrected);
Exemplo n.º 7
0
def get_tile_image(imgs, tile_shape=None):
    # import should be here to avoid import error on server
    # caused by matplotlib's backend
    import matplotlib.pyplot as plt  # noqa

    def get_tile_shape(img_num):
        x_num = 0
        y_num = int(math.sqrt(img_num))
        while x_num * y_num < img_num:
            x_num += 1
        return x_num, y_num

    if tile_shape is None:
        tile_shape = get_tile_shape(len(imgs))

    img_rgb_list = []
    for img in imgs:
        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img_rgb_list.append(img_rgb)
    # check if all the resolution is same
    x_num, y_num = tile_shape
    if all(img.shape == imgs[0].shape for img in imgs[1:]):
        # rospy.loginfo("all the size same images")
        concatenated_image = None
        for y in range(y_num):
            row_image = None
            for x in range(x_num):
                i = x + y * x_num
                if i >= len(imgs):
                    img = np.zeros(imgs[0].shape, dtype=np.uint8)
                else:
                    img = imgs[i]
                if row_image is None:
                    row_image = img
                else:
                    row_image = cv2.hconcat([row_image, img])
            if concatenated_image is None:
                concatenated_image = row_image
            else:
                concatenated_image = cv2.vconcat([concatenated_image, row_image])
        return concatenated_image
    else:
        for i, img_rgb in enumerate(img_rgb_list):
            plt.subplot(y_num, x_num, i + 1)
            plt.axis("off")
            plt.imshow(img_rgb)
        canvas = plt.get_current_fig_manager().canvas
        canvas.draw()
        pil_img = PIL.Image.frombytes("RGB", canvas.get_width_height(), canvas.tostring_rgb())
        out_rgb = np.array(pil_img)
        out_bgr = cv2.cvtColor(out_rgb, cv2.COLOR_RGB2BGR)
        plt.close()
        return out_bgr
 def inspect_kernel(self, time):
     gabor_large_list = []
     for scale, kernel_listn in self.gabor_jet:                
         analyzed_images = []
         for kernel in kernel_listn:
             gabor_image = kernel.get_kernel()
             analyzed_images.append(gabor_image)
             gabor_images = cv2.hconcat(analyzed_images)
         gabor_large_list.append(gabor_images)
     image_to_show = cv2.vconcat(gabor_large_list)
     cv2.imshow("Gabor Kernels", image_to_show)
     cv2.waitKey(time)
     return image_to_show
Exemplo n.º 9
0
def plot_average_color(paths):

    for path in paths:
        # pixelate
        img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
        img = pixelate(img, 10)

        aveColor = average_color(img)
        aveColor = cv2.resize(aveColor, (img.shape[1], img.shape[0]))
        img = cv2.hconcat([img, aveColor])

        exportPath = os.path.join('average', os.path.basename(path))
        cv2.imwrite(exportPath, img)

    cmd = 'montage average/*.png -tile 8x -geometry 50x25+0+0 average/export.jpg'
    result = subprocess.call(cmd, shell=True)
Exemplo n.º 10
0
def detect2():
    row_image = cv2.imread("miku.jpg", 1)
    gray_image = cv2.imread("miku.jpg", 1)

    print row_image.shape
    print gray_image.shape
    image = cv2.hconcat([row_image, gray_image])

    cascade = cv2.CascadeClassifier("lbpcascade_animeface.xml")

    detected_face = cascade.detectMultiScale(image, 1.1, 3)

    for (x, y, w, h) in detected_face:
        cv2.rectangle(image, (x, y), (x + w, y + h), (0, 50, 255), 3)

    cv2.imshow("Show Image", image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
def get_head_pose(shape):
    image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
                            shape[39], shape[42], shape[45], shape[31], shape[35],
                            shape[48], shape[54], shape[57], shape[8]])

    _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)

    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix,
                                        dist_coeffs)

    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))

    # calc euler angle
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    return reprojectdst, euler_angle
Exemplo n.º 12
0
def plot_dictance_color(paths):
    templates = images_to_average_colors(paths)
    templates = templates[:, 0:3]  # BGRA to BGR
    useHsv = False

    for path in paths:
        img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
        imgArr = get_validity_pixels(pixelate(img, 10))
        # Export plot to tmp image
        plot_image_distance(imgArr, templates, 'tmp.png', useHsv)
        figImg = cv2.imread('tmp.png', cv2.IMREAD_UNCHANGED)
        # Resize and export
        img = cv2.resize(img, (600, 600))
        exportPath = os.path.join('plot', os.path.basename(path))
        cv2.imwrite(exportPath,
                    cv2.hconcat([img, figImg]))

    cmd = 'montage plot/*.png -tile 8x -geometry 800x300+0+0 plot/export.jpg'
    result = subprocess.call(cmd, shell=True)
Exemplo n.º 13
0
    def _get_multi_frame_image(self, video_file, frame_pos):
        return video_file.get_frame_by_frame_pos(frame_pos)

        himg = None
        vimg = None
        for i in range(4):
            frame = video_file.get_frame_by_frame_pos(frame_pos-3+i)
            if frame is None:
                resized_img = np.tile(np.uint8([127]), (227, 227, 1))
            else:
                resized_img = cv2.resize(frame, (227, 227))

            if himg is None:
                himg = resized_img
            else:
                himg = cv2.hconcat([himg, resized_img])
                if vimg is None:
                    vimg = himg.copy()
                else:
                    himg_copy = himg.copy()
                    vimg = cv2.vconcat([vimg, himg_copy])
                himg = None

        return vimg
Exemplo n.º 14
0
def gen_spritesheet(col, directory, image_list, write_to, css_filename, 
                    class_prefix, spritesheet_filepath, image_size, 
                    size_percentage=1, responsive_768=1, write_spritesheet_map=False):
  css_rules = []
  css_responsive_768_rules = []
  champion_id_map = []

  col_index, row_index = 0, 0
  image_row, image_col = [], []
  first_image_channel = 0
  champion_id_map_row = []
  for key, image in image_list:
    img = cv2.imread(directory + image, cv2.IMREAD_UNCHANGED)
    if first_image_channel == 0:
      first_image_channel = img.shape[2]
    if img.shape[0] != image_size[0] or img.shape[1] != image_size[1]:
      img = cv2.resize(img, image_size)
    image_col.append(img)
    champion_id_map_row.append(key)
    css_rules.append(
      gen_css_rule(
        class_prefix + str(key),
        col_index * img.shape[1],
        row_index * img.shape[0],
        img.shape[1],
        img.shape[0],
        size_percentage
      )
    )

    css_responsive_768_rules.append(
      gen_css_rule(
        class_prefix + str(key),
        col_index * img.shape[1],
        row_index * img.shape[0],
        img.shape[1],
        img.shape[0],
        responsive_768
      )
    )

    col_index += 1
    if col_index >= col:
      image_row.append(image_col)
      image_col = []
      col_index = 0
      row_index += 1
      champion_id_map.append(champion_id_map_row)
      champion_id_map_row = []
  image_row.append(image_col)
  champion_id_map.append(champion_id_map_row)
 
  width, last_row_width = 0, 0
  last_row_height = 0
  for img in image_row[-1]:
    last_row_width += img.shape[1]
    last_row_height = img.shape[0]
  for img in image_row[0]:
    width += img.shape[1]
  col_padding = width - last_row_width
  row_padding = last_row_height
  if col_padding > 0:
    padding = np.zeros([row_padding, col_padding, first_image_channel], np.uint8)
    image_row[-1].append(padding)

  width, height = 0, 0
  for imgs in image_row:
    height += imgs[0].shape[0]
  for img in image_row[0]:
    width += img.shape[1]

  gen_css_file(
    css_filename, spritesheet_filepath, width, 
    height, size_percentage, css_rules, responsive_768, css_responsive_768_rules
  )

  spritesheet = cv2.vconcat([cv2.hconcat(col) for col in image_row])
  cv2.imwrite(write_to, spritesheet)

  if write_spritesheet_map:
    with open("spritesheet_champion_id_mapping.json", 'w') as f:
      json.dump(champion_id_map, f)
    f.close()
def concat_tile(im_list_2d):
    return cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in im_list_2d])
Exemplo n.º 16
0
 def show(self):
     preview_result = cv.hconcat(
         [self.original_image, self.processed_image])
     cv.imshow(self.ROOT_WINDOWS, self.processed_image)
Exemplo n.º 17
0
 _, contours, _ = cv2.findContours(mask2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
 for i in range(len(contours)):
     area = cv2.contourArea(contours[i])
     areaImage = wid*hei
     if area > (areaImage*0.001):
         cv2.drawContours(mask2, contours, i, (255,255,255), cv2.FILLED)
 _, contours, _ = cv2.findContours(mask2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
 for contour in contours:
     (x, y, w, h) = cv2.boundingRect(contour)
     if cv2.contourArea(contour) < 700:
         continue
     cv2.rectangle(hv, (x,y), (x+w, y+h), (0,0,0), 3)
 print(hei, wid)
 output = np.ones((hei, wid*2), dtype="uint8")
 #v = cv2.hconcat([hv, mask])
 v = cv2.hconcat([hv, mask])
 output[0:hei, 0:wid*2] = v
 #v2 = cv2.hconcat([mask1, mask2])
 #output[hei:hei*2, 0:wid*2] = v2
 #v3 = cv2.vconcat([v, v2])
 '''
 perc = 50
 nw = int(v3.shape[1]*perc/100)
 nh = int(v3.shape[0]*perc/100)
 dim = (nw, nh)
 v_resized = cv2.resize(v3, dim, interpolation=cv2.INTER_AREA)
 # Write the frame into the file 'output.avi'
 print('frame size', frame.shape)
 print('v3 size', v3.shape)
 print('v3 resize', v_resized.shape)
 '''
dataPath = './data' #Cambia a la ruta donde hayas almacenado Data
imagePaths = os.listdir(dataPath)
print('imagePaths=',imagePaths)

cap = cv2.VideoCapture(0)

faceClassif = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml')

while True:
    ret,frame = cap.read()
    if ret == False: break
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    auxFrame = gray.copy()

    nFrame = cv2.hconcat([frame, np.zeros((480,300,3),dtype=np.uint8)])

    faces = faceClassif.detectMultiScale(gray,1.3,5)

    for (x,y,w,h) in faces:
        rostro = auxFrame[y:y+h,x:x+w]
        rostro = cv2.resize(rostro,(150,150),interpolation= cv2.INTER_CUBIC)
        result = emotion_recognizer.predict(rostro)

        cv2.putText(frame,'{}'.format(result),(x,y-5),1,1.3,(255,255,0),1,cv2.LINE_AA)

        # EigenFaces
        if method == 'EigenFaces':
            if result[1] < 5700:
                cv2.putText(frame,'{}'.format(imagePaths[result[0]]),(x,y-25),2,1.1,(0,255,0),1,cv2.LINE_AA)
                cv2.rectangle(frame, (x,y),(x+w,y+h),(0,255,0),2)
Exemplo n.º 19
0
def concat_merge_Image(img1, img2, point1, point2, bgr=False):
    """
    concat and merge images,
    :param bgr: (boolean) if True,concat brg
    :return:stitched image,img1ori,img2ori,imgoverlap
    """
    img1h, img1w = img1.shape[0], img1.shape[1]
    img2h, img2w = img2.shape[0], img2.shape[1]
    p1x, p1y = int(point1[0]), int(point1[1])
    p2x, p2y = int(point2[0]), int(point2[1])

    img1overlap = img1[:, p1x - p2x:]
    img1ori = img1[:, :p1x - p2x]

    img2_file = np.zeros(img2.shape, np.uint8)
    # img2_file.fill(255)
    shifty = p2y - p1y  # 若右侧图片的keypoints在左侧上方,对图片上面部分做裁剪,否则对图片下面部分做裁剪,空出部分填充0
    if shifty <= 0:
        img2crop = img2[:img2h + shifty, :]  # 裁剪右图,使其与左图对齐
        img2_file[0 - shifty:, :] = img2crop
    else:
        img2crop = img2[shifty:, :]
        img2_file[:img2h - shifty, :] = img2crop

    img2overlap = img2_file[:, :p2x + img1w - p1x]
    img2ori = img2_file[:, p2x + img1w - p1x:]

    imgoh = img1overlap.shape[0]
    imgow = img1overlap.shape[1]
    imgoverlap = np.zeros(img1overlap.shape, np.uint8)
    # imgoverlap.fill(255)
    # BRG图像拼接
    if bgr:
        # optimization version B
        ind = np.arange(imgow)
        w = np.empty(imgow)
        w.fill(imgow)
        alpha = (w - ind) / w
        beta = np.ones(imgow) - alpha
        for i in range(imgoverlap.shape[2]):
            imgoverlap[:, :,
                       i] = img1overlap[:, :,
                                        i] * alpha + img2overlap[:, :,
                                                                 i] * beta

        # optimization version A
        # imgoverlap[:,:,:]=img1overlap*alpha+img2overlap*beta
        # for j in range(imgow):
        #     alpha = float(imgow - j) / imgow
        #     imgoverlap[:, j, :] = img1overlap[:, j, :] * alpha + img2overlap[:, j, :] * (1.0 - alpha)

        # original version
        # for i in range(imgoh):
        #     for j in range(imgow):
        #         if img2overlap[i, j, 0] == 0 and img2overlap[i, j, 1] == 0 and img2overlap[i, j, 2] == 0:
        #             alpha = 1.0
        #         else:
        #             alpha = float(imgow - j) / imgow
        #         imgoverlap[i, j, :] = img1overlap[i, j, :] * alpha + img2overlap[i, j, :] * (1.0 - alpha)

    else:  # 灰度图像拼接
        for j in range(imgow):
            alpha = float(imgow - j) / imgow
            imgoverlap[:, j] = int(img1overlap[:, j] * alpha +
                                   img2overlap[:, j] * (1.0 - alpha))

        # for i in range(imgoh):
        #     for j in range(imgow):
        #         if img2overlap[i, j] == 0:
        #             alpha = 1.0
        #         else:
        #             alpha = float(imgow - j) / imgow
        #         imgoverlap[i, j] = int(img1overlap[i, j] * alpha + img2overlap[i, j] * (1.0 - alpha))
    final = cv2.hconcat([img1ori, imgoverlap, img2ori])
    return final, img1ori, img2ori, imgoverlap, shifty
Exemplo n.º 20
0
                thickness=2,
                lineType=cv2.LINE_AA)
    return images_resize


#filenames0 = file_name(img_path0)
filenames1 = file_name(img_path1)
filenames2 = file_name(img_path2)

font = cv2.FONT_HERSHEY_SIMPLEX

if (len(filenames1) != len(filenames2)):
    print("Number of files in both directory doesn't match")

elif ((len(filenames1) or len(filenames2)) == 0):
    print("Folder is EMPTY!!!")
else:
    print(len(filenames1))
    print(len(filenames2))
    for i in range(len(filenames1)):
        #images0_resize = img_load(i,filenames0,'5/21')
        images1_resize = img_load(i, filenames1, '5/21')
        images2_resize = img_load(i, filenames2, '5/22')

        print("Complete ", i)
        c_img = cv2.hconcat([images1_resize, images2_resize])
        cv2.imwrite(os.path.join(save_img_path, str(i) + '.jpg'), c_img)
        cv2.imshow("Window", c_img)
        cv2.waitKey(1000)

cv2.destroyAllWindows()
Exemplo n.º 21
0
    color_face = color[y:(y+h), x:(x+w)]

    # Image.fromarray(gray_face).save("{}/{}".format("small-gray",image.rsplit("/",1)[1]))
    # Image.fromarray(color_face).save("{}/{}".format("small-color",image.rsplit("/",1)[1]))

    face_mean = opencv.mean(gray_face)[0]
    gray_mean += face_mean

    ## organize the data for kmeans
    hsv = opencv.split(color_face)
    
    hsv[0] = hsv[0].reshape(hsv[0].shape[0]* hsv[0].shape[1],1)
    hsv[1] = hsv[1].reshape(hsv[1].shape[0]* hsv[1].shape[1],1)
    hsv[2] = hsv[2].reshape(hsv[2].shape[0]* hsv[2].shape[1],1)

    data = opencv.hconcat(hsv)
    ## run kmeans
    criteria = (opencv.TERM_CRITERIA_EPS, 1000, 0)
    compactness,labels,centers =  opencv.kmeans(np.float32(data), 10, criteria, 10, opencv.KMEANS_RANDOM_CENTERS)

    colors = []

    ## sort colors based on size of cluster
    for (i, center) in enumerate(centers):
      labelMask = opencv.inRange(labels,i,i)
      n = opencv.countNonZero(labelMask)
      colors.append({"count" : n, "center": center})

    sortedColors = sorted(colors, key=lambda k: k['count']) 
    sortedColors.reverse() # descending order
Exemplo n.º 22
0
    def return_pitch_yaw_roll(self, image, radians=False):
        """ Return the the roll pitch and yaw angles associated with the input image.

         @param image It is a colour image. It must be >= 64 pixel.
         @param radians When True it returns the angle in radians, otherwise in degrees.
         """

        #The dlib shape predictor returns 68 points, we are interested only in a few of those
        # TRACKED_POINTS = (0, 4, 8, 12, 16, 17, 26, 27, 30, 33, 36, 39, 42, 45, 62)
        TRACKED_POINTS = [
            17, 21, 22, 26, 36, 39, 42, 45, 31, 35, 48, 54, 57, 8
        ]

        #Antropometric constant values of the human head.
        #Check the wikipedia EN page and:
        #"Head-and-Face Anthropometric Survey of U.S. Respirator Users"
        #
        #X-Y-Z with X pointing forward and Y on the left and Z up.
        #The X-Y-Z coordinates used are like the standard
        # coordinates of ROS (robotic operative system)
        #OpenCV uses the reference usually used in computer vision:
        #X points to the right, Y down, Z to the front
        #
        #The Male mean interpupillary distance is 64.7 mm (https://en.wikipedia.org/wiki/Interpupillary_distance)
        #
        # P3D_RIGHT_SIDE = np.float32([-100.0, -77.5, -5.0]) #0
        # P3D_GONION_RIGHT = np.float32([-110.0, -77.5, -85.0]) #4
        # P3D_MENTON = np.float32([0.0, 0.0, -122.7]) #8
        # P3D_GONION_LEFT = np.float32([-110.0, 77.5, -85.0]) #12
        # P3D_LEFT_SIDE = np.float32([-100.0, 77.5, -5.0]) #16
        # P3D_FRONTAL_BREADTH_RIGHT = np.float32([-20.0, -56.1, 10.0]) #17
        # P3D_FRONTAL_BREADTH_LEFT = np.float32([-20.0, 56.1, 10.0]) #26
        # P3D_SELLION = np.float32([0.0, 0.0, 0.0]) #27 This is the world origin
        # P3D_NOSE = np.float32([21.1, 0.0, -48.0]) #30
        # P3D_SUB_NOSE = np.float32([5.0, 0.0, -52.0]) #33
        # P3D_RIGHT_EYE = np.float32([-20.0, -32.35,-5.0]) #36
        # P3D_RIGHT_TEAR = np.float32([-10.0, -20.25,-5.0]) #39
        # P3D_LEFT_TEAR = np.float32([-10.0, 20.25,-5.0]) #42
        # P3D_LEFT_EYE = np.float32([-20.0, 32.35,-5.0]) #45
        # #P3D_LIP_RIGHT = np.float32([-20.0, 65.5,-5.0]) #48
        # #P3D_LIP_LEFT = np.float32([-20.0, 65.5,-5.0]) #54
        # P3D_STOMION = np.float32([10.0, 0.0, -75.0]) #62
        #
        # #This matrix contains the 3D points of the
        # # 11 landmarks we want to find. It has been
        # # obtained from antrophometric measurement
        # # of the human head.
        # landmarks_3D = np.float32([P3D_RIGHT_SIDE,
        #                          P3D_GONION_RIGHT,
        #                          P3D_MENTON,
        #                          P3D_GONION_LEFT,
        #                          P3D_LEFT_SIDE,
        #                          P3D_FRONTAL_BREADTH_RIGHT,
        #                          P3D_FRONTAL_BREADTH_LEFT,
        #                          P3D_SELLION,
        #                          P3D_NOSE,
        #                          P3D_SUB_NOSE,
        #                          P3D_RIGHT_EYE,
        #                          P3D_RIGHT_TEAR,
        #                          P3D_LEFT_TEAR,
        #                          P3D_LEFT_EYE,
        #                          P3D_STOMION])
        LEFT_EYEBROW_LEFT = [6.825897, 6.760612, 4.402142]
        LEFT_EYEBROW_RIGHT = [1.330353, 7.122144, 6.903745]
        RIGHT_EYEBROW_LEFT = [-1.330353, 7.122144, 6.903745]
        RIGHT_EYEBROW_RIGHT = [-6.825897, 6.760612, 4.402142]
        LEFT_EYE_LEFT = [5.311432, 5.485328, 3.987654]
        LEFT_EYE_RIGHT = [1.789930, 5.393625, 4.413414]
        RIGHT_EYE_LEFT = [-1.789930, 5.393625, 4.413414]
        RIGHT_EYE_RIGHT = [-5.311432, 5.485328, 3.987654]
        NOSE_LEFT = [2.005628, 1.409845, 6.165652]
        NOSE_RIGHT = [-2.005628, 1.409845, 6.165652]
        MOUTH_LEFT = [2.774015, -2.080775, 5.048531]
        MOUTH_RIGHT = [-2.774015, -2.080775, 5.048531]
        LOWER_LIP = [0.000000, -3.116408, 6.097667]
        CHIN = [0.000000, -7.415691, 4.070434]
        landmarks_3D = np.float32([
            LEFT_EYEBROW_LEFT, LEFT_EYEBROW_RIGHT, RIGHT_EYEBROW_LEFT,
            RIGHT_EYEBROW_RIGHT, LEFT_EYE_LEFT, LEFT_EYE_RIGHT,
            RIGHT_EYEBROW_LEFT, RIGHT_EYEBROW_RIGHT, NOSE_LEFT, NOSE_RIGHT,
            MOUTH_LEFT, MOUTH_RIGHT, LOWER_LIP, CHIN
        ])
        #Return the 2D position of our landmarks
        landmarks_2D = self._return_landmarks(inputImg=image,
                                              points_to_return=TRACKED_POINTS)
        if landmarks_2D is not None:
            #Print som red dots on the image
            #for point in landmarks_2D:
            #cv2.circle(frame,( point[0], point[1] ), 2, (0,0,255), -1)

            #Applying the PnP solver to find the 3D pose
            #of the head from the 2D position of the
            #landmarks.
            #retval - bool
            #rvec - Output rotation vector that, together with tvec, brings
            #points from the world coordinate system to the camera coordinate system.
            #tvec - Output translation vector. It is the position of the world origin (SELLION) in camera co-ords
            retval, rvec, tvec = cv2.solvePnP(landmarks_3D, landmarks_2D,
                                              self.camera_matrix,
                                              self.camera_distortion)
            #Get as input the rotational vector
            #Return a rotational matrix
            rmat, _ = cv2.Rodrigues(rvec)
            pose_mat = cv2.hconcat((rmat, tvec))
            #euler_angles contain (pitch, yaw, roll)
            # euler_angles = cv2.DecomposeProjectionMatrix(projMatrix=rmat, cameraMatrix=self.camera_matrix, rotMatrix, transVect, rotMatrX=None, rotMatrY=None, rotMatrZ=None)
            _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(
                pose_mat)
            return list(euler_angles)

            head_pose = [
                rmat[0, 0], rmat[0, 1], rmat[0, 2], tvec[0], rmat[1, 0],
                rmat[1, 1], rmat[1, 2], tvec[1], rmat[2, 0], rmat[2, 1],
                rmat[2, 2], tvec[2], 0.0, 0.0, 0.0, 1.0
            ]
            #print(head_pose) #TODO remove this line
            return self.rotationMatrixToEulerAngles(rmat)
        else:
            return None
Exemplo n.º 23
0
        bbox[3] = min(bbox[3] + bbox_width/4,img.shape[0])
        bbox = [int(bbox[1]),int(bbox[0]),int(bbox[3]),int(bbox[2])]
    else:
        bbox = [0,0,img.shape[0],img.shape[1]]
    face_img = img[bbox[0]:bbox[2],bbox[1]:bbox[3]]    
    lmks = get_lmks_by_img(model, face_img) 
    face_kps = []
    for kps in lmks:
        face_kps.append([int(bbox[1]+kps[0]),int(bbox[0]+kps[1])])
    points = np.array(face_kps,dtype=np.float32)

    pick_dlib = [19,24,39,36,42,45,33,48,51,54,57,8]
    # 计算朝向
    H,W = img.shape[0],img.shape[1]
    matrix = np.array([[W,0,W/2.0],[0,W,H/2.0],[0,0,1]])
    _,rot_vec,trans_vec = cv2.solvePnP(obj[pick_model,...].astype("float32"),points[pick_dlib,...].astype("float32"),matrix,None)
    # rot_vec,trans_vec = headpose_estimator.solve_pose_by_68_points(points)
    rot_mat = cv2.Rodrigues(rot_vec)[0]
    pose_mat = cv2.hconcat((rot_mat, trans_vec))
    euler_angle = cv2.decomposeProjectionMatrix(pose_mat)[-1]
    euler_angle = euler_angle.flatten()

    show_img = draw_pick_kps(img.copy(),points,pick_dlib,5)
    show_img = draw_axis(show_img,euler_angle,np.mean(points,axis=0))
    cv2.imshow("img",show_img)
    key = cv2.waitKey(25)
    if key == 27:  # 按键esc
        break

cap.release()
cv2.destroyAllWindows()
Exemplo n.º 24
0
            x, y, w, h = cv2.boundingRect(approx)

            if objCor == 3:
                objectType = "Triangle"
            elif objCor == 4:
                aspRatio = w / float(h)
                if aspRatio > 0.95 and aspRatio < 1.05:
                    objectType = "Sqaure"
                else:
                    objectType = "Rectangle"
            else:
                objectType = "None"
            cv2.rectangle(imgContour, (x, y), (x + w, y + h), (0, 255, 0), 5)
            cv2.putText(imgContour, objectType,
                        (x + (w // 2) - 10, y + (h // 2) - 10),
                        cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 2)


path = "Resources/Shapes.jpg"
img = cv2.resize(cv2.imread(path), (720, 720))

imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (7, 7), 1)
imgCanny = cv2.Canny(imgBlur, 50, 50)
imgContour = img.copy()
getContour(imgCanny)

img_h = cv2.hconcat([imgGray, imgBlur, imgCanny])
# cv2.imshow("Stacked Output",img_h)
cv2.imshow("Output", imgContour)
cv2.waitKey(0)
Exemplo n.º 25
0
        max_NCC_coordinator = []
        for c2_coordinator, c2 in corners2:
            c1 = np.reshape(c1, c1.size)
            c2 = np.reshape(c2, c2.size)

            c1 = np.array(c1 / np.linalg.norm(c1))
            c2 = np.array(c2 / np.linalg.norm(c2))
            NCC = np.dot(c1, c2)
            if max_NCC < NCC:
                max_NCC = NCC
                max_NCC_coordinator = c2_coordinator
        if max_NCC > 0.99:
            match_coordinate.append([c1_coordinator, max_NCC_coordinator])

    print("Number of matched points'", len(match_coordinate))
    concatenated_img_raw = cv2.hconcat([img1.resize_img, img2.resize_img])
    for c1, c2 in match_coordinate:
        c2 = [c2[0] + img2.resize_img.shape[1], c2[1]]
        concatenated_img_raw[c1[1], c1[0]] = [0, 255, 0]
        concatenated_img_raw[c2[1], c2[0]] = [0, 255, 0]
        cv2.line(concatenated_img_raw, (c1[0], c1[1]), (c2[0], c2[1]),
                 (255, 0, 0),
                 thickness=1,
                 lineType=16)

    cv2.imwrite('raw_feature_matching.jpg', concatenated_img_raw)

    # Find Homograph via RANSAC

    pts_src = np.array([c1 for [c1, c2] in match_coordinate])
    pts_dst = np.array([c2 for [c1, c2] in match_coordinate])
Exemplo n.º 26
0
def transform(path, sr):
    x, sr = readAudioFile(path)

    complex_spec, complex_spec_time_scaled = get_complex_spec(
        path, 0.079, 0.025, with_time_scaled=True)
    modgdf = get_modgdf(complex_spec, complex_spec_time_scaled)
    modgdf = np.absolute(modgdf)
    # print(modgdf.shape)
    # plot_data(modgdf, "modgdf.png", "modgdf")
    # plot_data(np.absolute(modgdf), "abs_modgdf.png", "abs_modgdf")

    hop_length = 1875  # This gives us 256 time buckets: 1875 = 10 * 48000 / 256
    n_fft = 8192  # This sets the lower frequency cut off to 48000 Hz / 8192 * 2 = 12 Hz
    S = librosa.feature.melspectrogram(x,
                                       sr=sr,
                                       n_fft=n_fft,
                                       hop_length=hop_length)
    logS = librosa.power_to_db(abs(S))
    # return logS
    # print(modgdf)
    # print(logS.shape,modgdf.shape)

    img1 = Image.fromarray(logS)
    img1 = img1.transpose(Image.FLIP_TOP_BOTTOM)

    img2 = Image.fromarray(modgdf)
    img2 = img2.transpose(Image.FLIP_TOP_BOTTOM)
    basepath = os.path.dirname(__file__)
    file_path1 = os.path.join(basepath, 'images', secure_filename("im1.png"))
    plt.imsave(file_path1, img1, cmap=plt.cm.gray)
    file_path2 = os.path.join(basepath, 'images', secure_filename("im2.png"))
    plt.imsave(file_path2, img2, cmap=plt.cm.gray)
    img1 = cv2.imread(file_path1)
    img2 = cv2.imread(file_path2)
    im_h = cv2.hconcat([img1, img2])
    transform_norm = transforms.Normalize([0.485, 0.456, 0.406],
                                          [0.229, 0.224, 0.225])
    loader = transforms.Compose(
        [transforms.ToTensor(), transform_norm,
         transforms.Resize([128, 256])])
    image = loader(im_h).float()
    image = image.unsqueeze(0)  # this is for VGG, may not be needed for ResNet
    model = load_model(10, 512)
    file_pathm = os.path.join(basepath, secure_filename("mix-checkpoint-7.pt"))
    model.load_state_dict(
        torch.load(file_pathm, map_location=torch.device('cpu')))
    model.eval()
    out = {}
    with torch.no_grad():
        out_data = model(image)
        g = out_data.cpu().numpy().flatten()
        s = g.argsort()[-3:][::-1]
        v = 1
        for i in s:
            for d, k in class_to_idx.items():
                if k == i:
                    out[v] = d
                    v += 1

        put = '1st location is ' + out[3] + ' \n ' + '2st location is ' + out[
            2] + '  \n  3st location is ' + out[1]

    return put
#print image_list
transformed_images = image_list[:len(image_list)/2]
untransformed_images = image_list[len(image_list)/2:]

#print len(transformed_images)
#print len(untransformed_images)

height , width , layers =  (480, 1280, 3)

#video = cv2.VideoWriter('compensation.avi',-1,1,)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (width,height))

list_of_angles = [0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80]

for enumaration, image_name in enumerate(untransformed_images):
    untransformed_image = cv2.imread(image_name)
    transformed_image = cv2.imread(transformed_images[enumaration])
#    cv2.imshow("untransformed_image", untransformed_image)
#    cv2.imshow("transformed_image", transformed_image)
    combined_image = cv2.hconcat( [untransformed_image, transformed_image] )
    cv2.putText(combined_image, "Rotation " + str(list_of_angles[enumaration]) + "", ( 30, 480-20  ), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
    cv2.imshow("compensation", combined_image)
    for i in range(15):
        out.write(combined_image)
    cv2.waitKey(5)

cv2.destroyAllWindows()
out.release()
print "Finished"
    if not (capl.grab() and capr.grab()):
        print("No more frames")
        break

    ret, framel = capl.read()
    ret, framer = capr.read()

    if is_recording:
        if recl is not None:
            recl.write(framel)
        if recr is not None:
            recr.write(framer)

    # 画面に表示する
    screen = cv2.hconcat([framel, framer])
    if is_recording and math.sin(2.0 * math.pi * time.time() / 2.0) > -0.75:
        screen = cv2.circle(screen, (10, 10), 8, (0, 0, 255), -1)
        cv2.putText(screen,
                    'REC', (20, 15),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.5, (0, 0, 255),
                    thickness=2)

    cv2.imshow('screen', screen)

    frame_rate = min(
        int((1.0 / (time.time() - last_update_time)) * 2.0 + frame_rate) / 3.0,
        PREFERED_FRAME_RATE)

    if frame_rate < PREFERED_FRAME_RATE:
#                         if original_scale > 0:                          
#                             gabor_image = cv2.pyrUp(gabor_image)
#                             original_scale -= 1
#                         else:
#                             gabor_image = cv2.pyrDown(gabor_image)
#                             original_scale += 1     
                     
#                     gabor_image = cv2.pyrUp( gabor_image, dstsize = (100,100 ) )
#                     gabor_image = gabor_image[gabor_image.shape[0]/2:gabor_image.shape[0]/2 + 1,gabor_image.shape[1]/2:gabor_image.shape[1]/2 + 1]
#                     print "shape", gabor_image.shape                     
                     gabor_image = cv2.resize(gabor_image, dsize = (100,100) )       
                     print gabor_image.max()                             
                     analyzed_images.append( gabor_image )
                     kernels.append( kernel.get_kernel() )
                     
                 former_image = cv2.hconcat(  analyzed_images )
                 print former_image.shape

                 large_image_list.append( former_image )
                 
                 former_kernel = cv2.hconcat( kernels )
                 print former_kernel.shape
                 kernel_large_list.append( former_kernel )


             combinened_image = cv2.vconcat( large_image_list )
             combinened_kernels = cv2.vconcat( kernel_large_list )
             
#             combinened_image = np.array( combinened_image, np.uint8)
#             cv2.normalize( combinened_image, combinened_image,0,255, cv2.NORM_MINMAX) 
             gabor_image = np.array(gabor_image, dtype=np.uint8)
Exemplo n.º 30
0
def loading(request, input_id):
    if request.method == 'POST':  # 시작 버튼 눌렀을 떄
        font = get_object_or_404(Font, pk=input_id)  # 현재 객체
        ## 딥러닝 서버 돌리기 ##

        ##### 1. 이미지 복사 #####
        phrase_kor = ['문', '입', '초', '기', '웹', '대', '여', '명', '숙']
        phrase_eng = [
            'moon', 'ip', 'cho', 'gi', 'web', 'dae', 'yeo', 'myung', 'sook'
        ]  #dummy는 index맞추기 위함! 나중에 고치기

        # 파일명
        day = str(font.date)[:10]
        time = str(font.date)[11:13] + "-" + str(font.date)[14:16]
        day_time = day + "_" + time
        userTime = str(request.user) + "_" + day_time + "_"

        # 디렉토리에 파일 복사
        mkdir_command = " mkdir ./media/result/" + str(
            request.user) + "_" + day_time  # 이미지 병합 위한 디렉토리 만들기
        os.system(mkdir_command)

        for i in range(8, -1, -1):
            #글자별로 유저 폴더 생성
            char = phrase_kor[i]  #숙.명.여.대. 돌아가면서
            mkdir_command = " mkdir ~/ganjyfont/test2/" + str(
                char) + "/" + str(
                    request.user) + "_" + day_time  #학습 돌릴 이미지 모아두는 디렉토리
            os.system(mkdir_command)

            picname = userTime + phrase_eng[i] + ".png"  # 숙
            cp_command = "cp ~/WebServer/Graduate/media/crop/" + picname + " ~/ganjyfont/test2/" + str(
                char) + "/" + str(request.user) + "_" + day_time + "/"
            os.system(cp_command)  #파일 복사x`

        ##### 2.딥러닝 돌리기 #####

        # 명령어 완성
        input_str = str(font.final_phrase)  #checkpoint 있는 문자들 + * 로만 되어있는 문구

        filename = {
            '숙': 'sook',
            '명': 'myung',
            '여': 'yeo',
            '대': 'dae',
            '웹': 'web',
            '기': 'gi',
            '초': 'cho',
            '입': 'ip',
            '문': 'moon'
        }

        for char, i in zip(input_str, range(len(input_str))):  #char=만들 글자 (을)
            if char == " ":
                pass
            else:
                #이미지 생성
                letter = dictionary[char]  #letter=체크포인트 글자 (문)
                dl_command = "cd ~/ganjyfont && python3 test.py --dataroot ~/ganjyfont/test2/" + letter + "/" + str(
                    request.user
                ) + "_" + day_time + " --name " + letter + "_" + char + "_pix2pix --model test --which_model_netG unet_256 --which_direction AtoB --dataset_mode single --norm batch --gpu_ids=0 --how_many=100"
                print("=================deep learning=================")
                os.system(dl_command)

                #이미지 복사 --> 이미지 병합하기 위함!
                beforecopy = "~/ganjyfont/results_ver2_font/" + letter + "_" + char + "_pix2pix/test_latest/images/" + str(
                    request.user
                ) + "_" + day_time + "_" + filename[letter] + "_fake_B.png"
                aftercopy = "./media/result/" + str(
                    request.user) + "_" + day_time + "/" + str(i) + ".png"
                cp_command = "cp " + beforecopy + " " + aftercopy
                os.system(cp_command)

        ###### 3.이미지 이어붙이기 #####
        print("=============image merge===============")
        string = input_str

        directory = './media/result/' + str(request.user) + "_" + day_time
        blank = "./media/blank/blank.png"

        for s, i in zip(string, range(len(string))):
            if i is 0:
                result = directory + "/" + str(i) + '.png'
                result = cv2.imread(result, cv2.IMREAD_GRAYSCALE)
                result = cleanside(result)
                result = morph(result)

            elif s is " ":
                blank = "./media/blank/blank.png"
                blank = cv2.imread(blank, cv2.IMREAD_GRAYSCALE)
                result = cv2.hconcat([result, blank])

            else:
                temp = directory + "/" + str(i) + '.png'
                print(temp)
                temp = cv2.imread(temp, cv2.IMREAD_GRAYSCALE)
                temp = cleanside(temp)
                temp = morph(temp)
                result = cv2.hconcat([result, temp])
                print(str(i))

        # 결과 이미지 경로 지정
        # 결과 이미지 webserver/Graduate/media/output 경로에 저장하기
        imgName = "./media/output/" + userTime + "result.png"
        cv2.imwrite(imgName, result)

        ##### 4. 이미지 db에 저장 #####
        output_photo = "./output/" + userTime + "result.png"
        font.output_photo1 = output_photo
        font.save(update_fields=['output_photo1'])  # 데베에 저장

        return redirect('fontsapp:result', input_id=font.pk)

    else:  #페이지 처음 들어갈 때 (GET)
        font = get_object_or_404(Font, pk=input_id)
        input_str = str(font.final_phrase)  #checkpoint 있는 문자들 + * 로만 되어있는 문구

        return render(request, 'loading.html', {'font': font})
Exemplo n.º 31
0
def video(args):
    classes = np.genfromtxt(os.path.join(args.dataset, "meta", "classes.txt"),
                            str,
                            delimiter="\n")
    model, preprocess, xp, _ = prepare_setting(args)

    cap = cv2.VideoCapture(0)
    if cap.isOpened() is False:
        print("Error opening video stream or file")
    fps_time = 0
    with chainer.using_config('train', False):
        while cap.isOpened():
            ret_val, img = cap.read()
            img = cv2.resize(img, (224, 224))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            input_image = cv2.resize(img, (224, 224))
            input_image = input_image.transpose(2, 0, 1)
            input_image = preprocess(input_image.astype(np.float32))
            start = time.time()
            h = model.predictor(xp.expand_dims(xp.array(input_image), axis=0))
            prediction = F.softmax(h)
            if args.device >= 0:
                prediction = xp.asnumpy(prediction[0].data)
            else:
                prediction = prediction[0].data
            top_ten = np.argsort(-prediction)[:1]
            end = time.time()
            # print("Elapsed", end - start)
            img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            blank = np.zeros((224, 448, 3)).astype(img.dtype)
            for rank, label_idx in enumerate(top_ten):
                score = prediction[label_idx]
                label = classes[label_idx]
                # print('{:>3d} {:>6.2f}% {}'.format(
                #     rank + 1, score * 100, label))
                if (score > 0.15):
                    cv2.putText(blank,
                                '{:>6.2f}% {}'.format(score * 100, label),
                                (10, 20 * (rank + 2)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.putText(blank,
                                'Name: {}'.format(full_label[label_idx]),
                                (10, 20 * (rank + 1 + 2)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.putText(
                        blank,
                        'Calories: {} kcal'.format(data[label]['calories']),
                        (10, 20 * (rank + 2 + 2)), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 255, 0), 2)
                    cv2.putText(blank,
                                'Sodium: {}g'.format(data[label]['sodium']),
                                (10, 20 * (rank + 3 + 2)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.putText(
                        blank, 'Carbohydrate: {}g'.format(
                            data[label]['carbohydrate']),
                        (10, 20 * (rank + 4 + 2)), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 255, 0), 2)
                    cv2.putText(blank,
                                'Protein {}g'.format(data[label]['protein']),
                                (10, 20 * (rank + 5 + 2)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.putText(blank, 'Fat {}g'.format(data[label]['fat']),
                                (10, 20 * (rank + 6 + 2)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                else:
                    cv2.putText(
                        blank, '{:>6.2f}% {}'.format(score * 100,
                                                     "Try Closer"),
                        (10, 20 * (rank + 2)), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        (0, 255, 0), 2)
            cv2.putText(blank, "FPS: %f" % (1.0 / (time.time() - fps_time)),
                        (10, 12), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),
                        2)
            title = "Make Food Photo Great Again"
            cv2.imshow(title, cv2.hconcat([img, blank]))
            fps_time = time.time()
            """Hit esc key"""
            if cv2.waitKey(1) == 27:
                break
Exemplo n.º 32
0
import os
import cv2
import numpy as np

# Specify images
images = os.listdir('img')
imagesconverted = [cv2.imread('img/' + i) for i in images]

# Concatenate read images
img_h = cv2.hconcat(imagesconverted)
# Make new dir
if not os.path.exists('out'):
    os.makedirs('out')
# Write concatenated images
cv2.imwrite('out/out.png', img_h)
Exemplo n.º 33
0
                        batch["POINT_3D"])):
                # de-normalize and ready image for opencv display to show the result of transforms
                frame = (((frame.squeeze(0).numpy().transpose(
                    (1, 2, 0)) * norm_std) + norm_mean) * 255).astype(np.uint8)
                for pt_idx, pt in enumerate(pts.view(-1, 2)):
                    pt = (int(round(pt[0].item())), int(round(pt[1].item())))
                    color = (32, 224, 32) if pt_idx == 0 else (32, 32, 224)
                    frame = cv.circle(frame.copy(),
                                      pt,
                                      radius=3,
                                      color=color,
                                      thickness=-1)
                    cv.putText(frame, f"{pt_idx}", pt, cv.FONT_HERSHEY_SIMPLEX,
                               0.5, color)
                display_frames.append(frame)
            #print(f"sorting_good = {batch['sorting_good']}")
            cv.imshow(
                f"frames",
                cv.resize(
                    cv.hconcat(display_frames),
                    dsize=(-1, -1),
                    fx=4,
                    fy=4,
                    interpolation=cv.INTER_NEAREST,
                ))
            cv.waitKey(0)
        iter += 1
        if max_iters is not None and iter > max_iters:
            break
    print(f"all done in {time.time() - init_time} seconds")
Exemplo n.º 34
0
def concatenateImages(models, i):
    if (i == 0):
        return cv.resize(models[i], (50, 50))
    else:
        return cv.hconcat(
            (concatenateImages(models, i - 1), cv.resize(models[i], (50, 50))))
Exemplo n.º 35
0
        if w==None:
            h,w,c=frame.shape
            background=np.zeros((h,w,3),dtype=np.uint8)
            mask=np.zeros((h,w,3),dtype=np.uint8)
            for c,col in enumerate(BGRcol):
                background[:,:,c]=col
                mask[:,:,c]=threash
        #print(background.shape)
        #print(mask.shape)
        mask=GrayTo3DGray(threash)
        frame_masked = cv2.bitwise_and(background,mask)

        gray=GrayTo3DGray(gray)
        frameDelta=GrayTo3DGray(frameDelta)

        frametop=cv2.hconcat([gray,frameDelta])
        framebot=cv2.hconcat([mask,frame_masked])
        outframe=cv2.vconcat([frametop,framebot])
        
        cv2.imshow("Dance",outframe)

        if out==None:
            h,w,c=gray.shape
            out = cv2.VideoWriter(outname,fourcc, 20.0, (2*h,2*w))

        out.write(outframe)
        if cv2.waitKey(1)  & 0xFF == ord('q'):
            break
    except:
        break
cap.release()
    gamma = val / 100
    gammaCorrection()

parser = argparse.ArgumentParser(description='Code for Changing the contrast and brightness of an image! tutorial.')
parser.add_argument('--input', help='Path to input image.', default='lena.jpg')
args = parser.parse_args()

img_original = cv.imread(cv.samples.findFile(args.input))
if img_original is None:
    print('Could not open or find the image: ', args.input)
    exit(0)

img_corrected = np.empty((img_original.shape[0], img_original.shape[1]*2, img_original.shape[2]), img_original.dtype)
img_gamma_corrected = np.empty((img_original.shape[0], img_original.shape[1]*2, img_original.shape[2]), img_original.dtype)

img_corrected = cv.hconcat([img_original, img_original])
img_gamma_corrected = cv.hconcat([img_original, img_original])

cv.namedWindow('Brightness and contrast adjustments')
cv.namedWindow('Gamma correction')

alpha_init = int(alpha *100)
cv.createTrackbar('Alpha gain (contrast)', 'Brightness and contrast adjustments', alpha_init, alpha_max, on_linear_transform_alpha_trackbar)
beta_init = beta + 100
cv.createTrackbar('Beta bias (brightness)', 'Brightness and contrast adjustments', beta_init, beta_max, on_linear_transform_beta_trackbar)
gamma_init = int(gamma * 100)
cv.createTrackbar('Gamma correction', 'Gamma correction', gamma_init, gamma_max, on_gamma_correction_trackbar)

on_linear_transform_alpha_trackbar(alpha_init)
on_gamma_correction_trackbar(gamma_init)
Exemplo n.º 37
0
            histogram,
            "(S)hutter (A): " + str(shutterSpeedNames[currentShutterSpeed]),
            (10, 120), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
        cv2.putText(histogram,
                    "(E)xposure (W): " + str(camera.exposure_compensation),
                    (10, 150), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
        cv2.putText(histogram, "(I)SO (U): " + str(camera.iso), (10, 180),
                    cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
        cv2.putText(histogram, "W(B) (V): " + str(camera.awb_mode), (10, 210),
                    cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)

        if pauseRecording:
            cv2.putText(histogram, "PAUSED", (90, 80), cv2.FONT_HERSHEY_PLAIN,
                        2, (0, 0, 255), 2)

        rtop = cv2.hconcat([now, histogram])
        rbottom = cv2.hconcat([frameDelta, thresh])
        quad = cv2.vconcat([rtop, rbottom])
        #quad = cv2.resize(quad, (800, 600))
        cv2.imshow('processors', quad)

        if didTakeFullPicture:
            stamp = current_filestamp()
            cv2.imwrite(save_location() + "/debug/" + stamp + ".jpg", quad)

    key = cv2.waitKey(1) & 0xFF

    if key == ord("s"):
        next_shutter_speed(camera, 1)

    if key == ord("a"):
Exemplo n.º 38
0
def depth_flow2pose(depth,
                    flow,
                    K,
                    K_inv,
                    gs=16,
                    th=1.,
                    method='AP3P',
                    depth2=None):
    """

    :param depth:       H x W
    :param flow:        h x w x2
    :param K:           3 x 3
    :param K_inv:       3 x 3
    :param gs:          grad size for sampling
    :param th:          threshold for RANSAC
    :param method:      PnP method
    :return:
    """
    if method == 'PnP':
        PnP_method = cv2.SOLVEPNP_ITERATIVE
    elif method == 'AP3P':
        PnP_method = cv2.SOLVEPNP_AP3P
    elif method == 'EPnP':
        PnP_method = cv2.SOLVEPNP_EPNP
    else:
        raise ValueError('PnP method ' + method)

    H, W = depth.shape[:2]
    valid_mask = get_valid_depth(depth)
    sample_mask = np.zeros_like(valid_mask)
    sample_mask[::gs, ::gs] = 1
    valid_mask &= sample_mask == 1

    h, w = flow.shape[:2]
    flow[:, :, 0] = flow[:, :, 0] / w * W
    flow[:, :, 1] = flow[:, :, 1] / h * H
    flow = cv2.resize(flow, (W, H), interpolation=cv2.INTER_LINEAR)

    grid = np.stack(np.meshgrid(range(W), range(H)),
                    2).astype(np.float32)  # HxWx2
    one = np.expand_dims(np.ones_like(grid[:, :, 0]), 2)
    homogeneous_2d = np.concatenate([grid, one], 2)
    d = np.expand_dims(depth, 2)
    points_3d = d * (K_inv @ homogeneous_2d.reshape(-1, 3).T).T.reshape(
        H, W, 3)

    points_2d = grid + flow
    valid_mask &= (points_2d[:, :, 0] < W) & (points_2d[:, :, 0] >= 0) & \
                  (points_2d[:, :, 1] < H) & (points_2d[:, :, 1] >= 0)

    ret, rvec, tvec, inliers = cv2.solvePnPRansac(points_3d[valid_mask],
                                                  points_2d[valid_mask],
                                                  K,
                                                  np.zeros([4, 1]),
                                                  reprojectionError=th,
                                                  flags=PnP_method)
    if not ret:
        inlier_ratio = 0.
    else:
        inlier_ratio = len(inliers) / np.sum(valid_mask)
    pose_mat = np.eye(4, dtype=np.float32)
    pose_mat[:3, :] = cv2.hconcat([cv2.Rodrigues(rvec)[0], tvec])

    return pose_mat, np.concatenate([rvec, tvec]), inlier_ratio
Exemplo n.º 39
0
def StackVideos(path1,
                path2,
                use_flag,
                output_fname=None,
                extesnion='jpg',
                frame_rate=25,
                width=None,
                height=None,
                add_audio=False):
    '''
    Function to stack and write frames for quick comparision

    Arguments:
        path1: path for videos or frames of 1st video
        path2: path for videos or frames of 2nd video
        output_fname: output filename (optional)
    '''

    if output_fname is None:
        output_fname = 'stacked_video_1_2.mp4'

    print('Output video path is {}'.format(output_fname))

    if use_flag == 'frames':
        file_list_1 = glob.glob(os.path.join(path1, '*.' + extesnion))
        file_list_2 = glob.glob(os.path.join(path2, '*.' + extesnion))
        file_list_1 = natural_sort(file_list_1)
        file_list_2 = natural_sort(file_list_2)

        frame = cv2.imread(file_list_1[0])
        h, w, c = frame.shape

        if height is None:
            height = h
        if width is None:
            width = w

        out = cv2.VideoWriter(str(output_fname),
                              cv2.VideoWriter_fourcc(*"mp4v"), frame_rate,
                              (width * 2, height))

        for i, (file1, file2) in enumerate(zip(file_list_1, file_list_2)):
            img1 = cv2.imread(file1)
            img2 = cv2.imread(file2)
            img1 = cv2.resize(img1, (width, height))
            img2 = cv2.resize(img2, (width, height))
            img_out = cv2.hconcat([img1, img2])
            out.write(img_out)

    if use_flag == 'videos':
        cap1 = cv2.VideoCapture(str(path1))
        cap2 = cv2.VideoCapture(str(path2))

        w = int(cap1.get(cv2.CAP_PROP_FRAME_WIDTH))
        h = int(cap1.get(cv2.CAP_PROP_FRAME_HEIGHT))

        if height is None:
            height = h
        if width is None:
            width = w

        out = cv2.VideoWriter(str(output_fname),
                              cv2.VideoWriter_fourcc(*"mp4v"), (frame_rate),
                              (width * 2, height))
        while True:
            ret1, img1 = cap1.read()
            ret2, img2 = cap2.read()

            if not (ret1 and ret2):
                break
            img1 = cv2.resize(img1, (width, height))
            img2 = cv2.resize(img2, (width, height))

            img_out = cv2.hconcat([img1, img2])
            out.write(img_out)

    out.release()

    if add_audio:
        final_vid_fname = str(output_fname).replace('.mp4', '_wAudio.mp4')
        extract_audio(vid_path=str(path1))
        add_audio(input_video_without_audio=output_fname,
                  source_audio='extracted_audio.aac',
                  output_vid_name=final_vid_fname)
        os.remove('extracted_audio.aac')
        os.remove(output_fname)
        os.rename(final_vid_fname, str(output_fname))

    return
Exemplo n.º 40
0
def color_pick(img, color):
    global g_hsv

    if color == 1:
        # HSV色空間に変換
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        # 青色の検出
        hsv_min = np.array([80, 20, 180])
        hsv_max = np.array([150, 255, 255])
        mask = cv2.inRange(hsv, hsv_min, hsv_max)
    elif color == 2:
        # HSV色空間に変換
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        # 緑色の検出
        hsv_min = np.array([30, 50, 50])
        hsv_max = np.array([90, 255, 255])
        mask = cv2.inRange(hsv, hsv_min, hsv_max)
    elif color == 3:
        # HSV色空間に変換
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        # 赤色の検出
        hsv_min = np.array([0, 65, 65])
        hsv_max = np.array([40, 255, 255])
        mask = cv2.inRange(hsv, hsv_min, hsv_max)
        hsv_min = np.array([160, 65, 65])
        hsv_max = np.array([180, 255, 255])
        mask += cv2.inRange(hsv, hsv_min, hsv_max)
    else:
        # グレースケール化
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # 黒色の検出
        ret, mask = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY_INV)
        hsv = None
    # 輪郭抽出
    image, contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                                  cv2.CHAIN_APPROX_NONE)
    if hsv is not None:
        g_hsv = cv2.hconcat(cv2.split(hsv))
    # 最大の領域を選定
    max_area = 0
    best_cnt = None
    for cnt in contours:
        epsilon = 0.01 * cv2.arcLength(cnt, True)
        tmp = cv2.approxPolyDP(cnt, epsilon, True)
        area = cv2.contourArea(tmp)
        if max_area < area:
            best_cnt = cnt
            max_area = area
    # 対象が見つかったか判定
    if best_cnt is None:
        # print("color pick failed.")
        return None, None, None
    # 領域の重心を計算
    try:
        M = cv2.moments(best_cnt)
        cx = int(M['m10'] / M['m00'])
        cy = int(M['m01'] / M['m00'])
    except ZeroDivisionError:
        # たまにゼロ割になってしまうケースが有るので対処
        print("ZeroDivisionError!!")
        return None, None, None
    # 検出した領域を表示
    cv2.drawContours(img, [best_cnt], -1, (0, 255, 0), 3)
    return mask, cx, cy
Exemplo n.º 41
0
File_name='bbb.dpx' 
normalized_val_uint16 = 65535


if __name__ == '__main__':

#    capture = cv2.VideoCapture(0)
    capture = cv2.VideoCapture("nichijo_op.mp4")
    wait_time = int(1/29.97 * 1000)

    ret, img_pre = capture.read()
    if(ret != True):
        print("source open error!")
        sys.exit(0)

    while True:
        ret, img_now = capture.read()
        if(ret != True):
            break
        img_edit = (img_pre * 0.4) + (img_now * 0.6)
        img_edit = np.uint8(img_edit)
        img_view = cv2.hconcat([img_now, img_edit])
        cv2.imshow("cam view", img_view)

        img_pre = img_edit.copy()
        if cv2.waitKey(1) >= 0:
            break

    cv2.destroyAllWindows()
    
Exemplo n.º 42
0
                        ocv.FONT_HERSHEY_SIMPLEX,
                        1, (0, 0, 255),
                        thickness=2,
                        lineType=ocv.LINE_AA)
            print(ori_frame.shape)
            print(cur_frame.shape)
        except StopIteration:
            break
        # ocv.imshow(WINDOW_NAME['ori_vid'], ori_frame)
        # ocv.imshow(WINDOW_NAME['cur_vid'], cur_frame)
        # alpha = 0.5
        # blend_frame = ocv.addWeighted(ori_frame, alpha, cur_frame, 1. - alpha, 0.)
        # ocv.imshow(WINDOW_NAME['blend_vid'], blend_frame)
        # diff_frame = ori_frame - cur_frame
        # ocv.imshow(WINDOW_NAME['diff_vid'], diff_frame)

        combine = ocv.hconcat([ori_frame, cur_frame])
        videoWriter.write(combine)  # write frame to video
        ocv.imshow('videoConcat', combine)
        key = ocv.waitKey(1)
        if key < 0:
            continue
        else:
            if key == 27:  # ESC
                break
            if key == ord('p'):
                key = ocv.waitKey(0)
                continue

    ocv.destroyAllWindows()
    videoWriter.release()
Exemplo n.º 43
0
        angle += ANGLE / length
        row.append(
            (
                round(cos((252.0 + angle) / 360 * pi * 2) * r) + int(w / 2),
                -round(sin((252.0 + angle) / 360 * pi * 2) * r)
            )
        )
    coordinates.append(row)

# 幅の中央値を取得する
width = int(np.median([len(m) for m in coordinates]))

# 切り抜く、長方形に補正しつなげる、画像を回転する。のループ
output = None
for angle in range(0, 360, ANGLE):
    M = cv2.getRotationMatrix2D((r2 / 2, r2 / 2), angle, 1)
    rotate = cv2.warpAffine(image, M, (r2, r2))
    crop = rotate[h:, int(r2 / 2 - w / 2): int(r2 / 2 - w / 2 + w)]
    piece = []
    for m in coordinates:
        row = np.array([crop[y, x] for x, y in m])
        row = cv2.resize(row, (3, width))
        piece.append(row)
    piece = np.array(piece)
    if output is None:
        output = piece
    else:
        output = cv2.hconcat([piece, output])

cv2.imwrite('{}.png'.format(dist), output)
print('>>{}.png'.format(dist))
def stream(run_time = c.CALIB_T, calibrate = False, display = False, timeout = False):
    message_list = []
    left_stream_imgs = []
    right_stream_imgs = []
    left_chessboards = []
    right_chessboards = []
    chessboards = mp.Queue()
    chessboard_searchers = []
    pos = 0
    left_done = False
    right_done = False
    disp_n_frame = 0
    cal_n_frame = 0
    img_size = None
    done = False
    stopframe = int(run_time*c.FRAMERATE)
    chessboards_found = mp.Value('i',0)

    rec_obj = sf.MyMessage(c.TYPE_STREAM, c.CALIB_IMG_DELAY)
    send_to_client(c.LEFT_CLIENT, rec_obj)
    send_to_client(c.RIGHT_CLIENT, rec_obj)

    if calibrate:
        # load camera intrinsic calibration data
        left_cal, right_cal = s_cal.load_calibs()
        
    while True:
        message_list.extend(read_all_client_messages())
        if len(message_list) > 0:
            while pos < len(message_list):
                # when the clients send an image during calibration
                if (message_list[pos]['data'].type == c.TYPE_IMG) and not done:
                    n_frame = message_list[pos]['data'].message[0]
                    print(n_frame)
                    y_data = message_list[pos]['data'].message[1]
                    if img_size is None:
                        (h,w) = y_data.shape[:2]
                        img_size = (w,h)
                    # add the img to the corresponding calibration img list
                    if message_list[pos]['client'] == c.LEFT_CLIENT:
                        left_stream_imgs.append((n_frame, y_data))
                    elif message_list[pos]['client'] == c.RIGHT_CLIENT:                    
                        right_stream_imgs.append((n_frame, y_data))
                    # cv2.imwrite(f"{message_list[pos]['client']}{n_frame}.png",y_data)

                    if display:
                        if (len(left_stream_imgs) > disp_n_frame) and (len(right_stream_imgs) > disp_n_frame): 
                            
                            ########## FOR TESTING ##############
                            os.chdir(c.IMG_P)
                            cv2.imwrite(f"l_{(disp_n_frame):04d}.png",left_stream_imgs[disp_n_frame][1])
                            cv2.imwrite(f"r_{(disp_n_frame):04d}.png",right_stream_imgs[disp_n_frame][1])
                            os.chdir(c.ROOT_P)
                            ########## FOR TESTING ##############

                            disp_frame = cv2.hconcat([left_stream_imgs[disp_n_frame][1],right_stream_imgs[disp_n_frame][1]])
                            cv2.imshow(f"stream", disp_frame)
                            print(disp_n_frame)
                            cv2.waitKey(100)
                            if left_stream_imgs[disp_n_frame][0] >=stopframe and timeout:
                                done_obj = sf.MyMessage(c.TYPE_DONE, 1)
                                send_to_client(c.LEFT_CLIENT, done_obj)
                                send_to_client(c.RIGHT_CLIENT, done_obj)
                                done = True
                                cv2.destroyAllWindows()
                            
                            disp_n_frame += 1

                    # look for chessboards
                    if calibrate:
                        if (len(left_stream_imgs) > cal_n_frame) and (len(right_stream_imgs) > cal_n_frame):
                            chessboard_search = mp.Process(target = search_for_chessboards, args=(chessboards_found, chessboards, [left_stream_imgs[cal_n_frame]], [right_stream_imgs[cal_n_frame]]))
                            chessboard_search.start()
                            chessboard_searchers.append(chessboard_search)
                            cal_n_frame += 1

                    if chessboards_found.value >= c.MIN_PATTERNS:
                        done_obj = sf.MyMessage(c.TYPE_DONE, 1)
                        send_to_client(c.LEFT_CLIENT, done_obj)
                        send_to_client(c.RIGHT_CLIENT, done_obj)
                        if display: 
                            done = True
                            cv2.destroyAllWindows()

                # when both clients send the done message, they are finished collecting frames
                elif (message_list[pos]['data'].type == c.TYPE_DONE):
                    if message_list[pos]['client'] == c.LEFT_CLIENT:
                        left_done = True
                    elif message_list[pos]['client'] == c.RIGHT_CLIENT:
                        right_done = True
                    if left_done and right_done:
                        if calibrate and chessboards_found.value >= c.MIN_PATTERNS:
                            # for searcher in chessboard_searchers:
                            #     searcher.join()
                            while True:
                                try:
                                    left_chessboard, right_chessboard = chessboards.get_nowait()
                                    left_chessboards.append(left_chessboard)
                                    right_chessboards.append(right_chessboard)
                                except queue.Empty:
                                    if chessboards.qsize() == 0:
                                        break

                            # # check all chessboards are valid in both images
                            s_cal.validate_chessboards(left_chessboards, right_chessboards)
                            # calibrated stereo cameras
                            RMS, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = s_cal.calibrate_stereo(
                                left_chessboards, right_chessboards, left_cal, right_cal, img_size)
    
                            # obtain stereo rectification projection matrices
                            R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(cameraMatrix1, distCoeffs1,
                                                        cameraMatrix2, distCoeffs2, img_size, R, T)

                            # save all calibration params to object
                            stereo_calib =  s_cal.StereoCal(RMS, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F,
                                                        R1, R2, P1, P2, Q, validPixROI1, validPixROI2)

                            s_cal.save_stereo_calib(stereo_calib)
                        
                            print(f'calibration complete, rms: {RMS}')
                            return stereo_calib
                        else:
                            return None
                pos += 1
# -*- coding: utf-8 -*-
import cv2

img1 = cv2.imread('image-1.jpg')
img2 = cv2.imread('image-2.jpg')
img3 = cv2.imread('image-3.jpg')
img4 = cv2.imread('image-4.jpg')

img5 = cv2.vconcat([img1, img2])
img6 = cv2.vconcat([img3, img4])
img7 = cv2.hconcat([img5, img6])
cv2.imwrite('output.jpg', img7)
Exemplo n.º 46
0
background = cv2.resize(background, (int(background_width * ratio), 360))

#load cat images from the folder
cat_images = load_images_from_folder("./cat")

# create new image of desired size and color (green) for padding
ww = background.shape[1] - exampleCat.shape[
    1]  #padding must be the size of empty parts
hh = exampleCat.shape[0]
color = (15, 235, 16)  #exact green color matches with the cat
result = np.full((hh, ww, 3), color, dtype=np.uint8)

flipped_cats = []  #cat images as their mirrored selves
for cat in cat_images:
    flipped = cv2.flip(cat, 1)
    padded_img = cv2.hconcat([result, flipped])
    flipped_cats.append(padded_img)
#flipping the cat images and padding them to meet the left border of thge background image

images_list = []  #holds images left cat + background
loop_in_images(cat_images, background, images_list)

double_images_list = []  #holds images of images_list+right cat
loop_in_images_video_background(flipped_cats, images_list, double_images_list,
                                0)

clip = mpy.ImageSequenceClip(double_images_list, fps=10)
audio = mpy.AudioFileClip('selfcontrol_part.wav').set_duration(clip.duration)
clip = clip.set_audio(audioclip=audio)
clip.write_videofile('second.mp4', codec='libx264')
Exemplo n.º 47
0
    RGB_to_RGB_mat = normalize_RGB_to_RGB_mat(RGB_to_RGB_mat)
    print(XYZ_to_XYZ_mat)
    print(RGB_to_RGB_mat)

#    capture = cv2.VideoCapture(0)
    capture = cv2.VideoCapture("nichijo_op.mp4")

    while True:
        # 1フレーム抜き出す
        ret, img_src = capture.read()
        if(ret != True):
            break

        # 1.0 に正規化して RGB に分離(BGRの順序なことに注意)
        b_array, g_array, r_array = np.dsplit(img_src, 3)

        # 色温度変換
        img_dst = multiply_3x3_mat(img_src, RGB_to_RGB_mat)

        # src と dst を1つにまとめる
        img_view = cv2.hconcat([img_src, img_dst])

        # 表示
        cv2.imshow("cam view", img_view)

        if cv2.waitKey(1) >= 0:
            break

    cv2.destroyAllWindows()
    
Exemplo n.º 48
0
# cv2.imshow("Modified", imageB)
# cv2.imshow("Diff", diff)
# cv2.imshow("Thresh", thresh)

# cv2.imwrite('original.png', imageA)
# cv2.imwrite('modified.png', imageB)
# cv2.imwrite('diff.png', diff)
# cv2.imwrite('thresh.png', thresh)

# def concat_vh(list_2d):
#     return cv2.vconcat([cv2.hconcat(list_h)
#                         for list_h in list_2d])

# img_tile = concat_vh([[imageA, imageB], [diff, thresh]])

h_img_0 = cv2.hconcat([imageA, imageB])  #, diff, thresh])
# h_img_1 = cv2.hconcat([diff, thresh])

#--------works. write
rootFolder = pathlib.Path(__file__).parent.parent.absolute()

outputArg = args['output']
if outputArg == None:
    outputArg = '/test_driver/images/diff'

outputDir = str(rootFolder) + str(outputArg)
pathlib.Path(str(outputDir)).mkdir(parents=True, exist_ok=True)

diffImageName = args['name']
cv2.imwrite(str(outputDir) + '/' + str(diffImageName), h_img_0)
Exemplo n.º 49
0
def concat_tile(im_list_2d):
    return cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in im_list_2d])
Exemplo n.º 50
0
                width=800,
                height=800)

plotly.offline.plot(fig,
                    filename=PATH +
                    'images/html_exports/france/{}.html'.format(name_fig),
                    auto_open=False)

# In[32]:

im1 = cv2.imread(PATH +
                 'images/charts/france/line_metropole_avec_couvre_feu.jpeg')
im2 = cv2.imread(PATH +
                 'images/charts/france/line_metropoles_sans_couvre_feu.jpeg')

im3 = cv2.hconcat([im1, im2])

cv2.imwrite(PATH + 'images/charts/france/line_metropoles_comp_couvre_feu.jpeg',
            im3)

# In[33]:

nb_last_days = 25
for (title, df_temp, name) in [("Tous âges", df_metro_0, "0"),
                               ("> 65 ans", df_metro_65, "65")]:
    metros = list(dict.fromkeys(list(df_temp['Metropole'].values)))
    metros_ordered = df_temp[df_temp['semaine_glissante'] ==
                             df_temp['semaine_glissante'].max()].sort_values(
                                 by=["ti"], ascending=True)["Metropole"].values
    dates_heatmap = list(
        dict.fromkeys(list(df_temp['semaine_glissante'].values)))
def basicLinearTransform():
    res = cv.convertScaleAbs(img_original, alpha=alpha, beta=beta)
    img_corrected = cv.hconcat([img_original, res])
    cv.imshow("Brightness and contrast adjustments", img_corrected)
Exemplo n.º 52
0
Created on Mon Mar  9 22:51:10 2020

@author: lizeth
"""

import numpy as np
import cv2

#Load an color image in grayscale
img= cv2.imread('cameraman.jpg',0)
#Obtain image dimensions
rows,cols=img.shape
#Initialize variables
imgResult=np.zeros((rows,cols),dtype="uint8")
th = 120

#Thresholding
for i in range(rows):
    for j in range(cols):
        if img[i,j]>th:
            imgResult[i,j]=255

#show images
#cv2.imshow("Image Result",imgResult)
#cv2.imshow("Original image",img)
image = cv2.hconcat([img,imgResult])
cv2.imshow('Original / Thresholding',image)

cv2.waitKey(0)
cv2.destroyAllWindows()
Exemplo n.º 53
0
    normalized_val = 2**(4 * img.dtype.num) - 1
    img             = img/normalized_val

    # 画像を半分にリサイズ
    image_width  = img.shape[1]//2
    image_height = img.shape[0]//2
    img_resize_half = cv2.resize(img, (image_width, image_height))

    # 半分、64未満、940以上の画像を抽出
    img_half_level  = gamma_func(img_resize_half)
    img_black_area  = view_limited_black(img_resize_half)
    img_super_white = view_superwhite(img_resize_half)
    
    # 各画像を結合して1つの画像にする
    img_vcat1 = cv2.vconcat([img_resize_half, img_half_level])
    img_vcat2 = cv2.vconcat([img_black_area, img_super_white])
    img_hcat  = cv2.hconcat([img_vcat1, img_vcat2])

    # 画像のプレビュー
    cv2.imshow('bbb.tif', img_hcat)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    # 出力用に 0..1 → 0..65535 の変換を実施
    out_img = img_hcat * normalized_val_uint16
    out_img = np.uint16(out_img)

    # 保存
    cv2.imwrite('out.tiff', out_img)
    
Exemplo n.º 54
0
lst = []
for i in range(img.shape[0]):
    for j in range(img.shape[1]):
        lst.append(np.binary_repr(img[i][j], width=8))

eight_bit_img = (np.array([int(i[0]) for i in lst], dtype=np.uint8) *
                 128).reshape(img.shape[0], img.shape[1])
seven_bit_img = (np.array([int(i[1]) for i in lst], dtype=np.uint8) *
                 64).reshape(img.shape[0], img.shape[1])
six_bit_img = (np.array([int(i[2]) for i in lst], dtype=np.uint8) *
               32).reshape(img.shape[0], img.shape[1])
five_bit_img = (np.array([int(i[3]) for i in lst], dtype=np.uint8) *
                16).reshape(img.shape[0], img.shape[1])
four_bit_img = (np.array([int(i[4]) for i in lst], dtype=np.uint8) *
                8).reshape(img.shape[0], img.shape[1])
three_bit_img = (np.array([int(i[5]) for i in lst], dtype=np.uint8) *
                 4).reshape(img.shape[0], img.shape[1])
two_bit_img = (np.array([int(i[6]) for i in lst], dtype=np.uint8) * 2).reshape(
    img.shape[0], img.shape[1])
one_bit_img = (np.array([int(i[7]) for i in lst], dtype=np.uint8) * 1).reshape(
    img.shape[0], img.shape[1])

finalr = cv2.hconcat([eight_bit_img, seven_bit_img, six_bit_img, five_bit_img])
finalv = cv2.hconcat([four_bit_img, three_bit_img, two_bit_img, one_bit_img])

final = cv2.vconcat([finalr, finalv])

cv2.imshow('bit plane', final)
cv2.waitKey(0)