Example #1
0
    def run_button(self):
        # Sets whatever res you put into the res boxes.
        width = self.ui.width_line.text()
        height = self.ui.height_line.text()
        c.width = int(width)
        c.height = int(height)

        # For the fps
        fps = self.ui.fps_line.text()
        c.fps = int(fps)

        # Print using this length
        length = self.ui.length_line.text()
        c.length_frames = int(length)

        main_text, title_sheet = fs.get_sheet()  # get sheet data and title
        if c.flag_create_image:
            create_image.create_image(
                main_text)  # Creates an png Image using string
        if c.flag_create_video:
            make_video.make_video(
            )  # way of getting png into a movie sequence. Can me done on FFmpeg
        if c.flag_delete_images_when_done:  # deleting images
            os.listdir('images')
            for i in os.listdir('images'):
                os.remove(f"images/{i}")
def main():
    capture = cv2.VideoCapture('input.mp4')
    background_subtractor = cv2.bgsegm.createBackgroundSubtractorMOG()
    length = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))

    bar = Bar('Processing Frames', max=length)

    first_iteration_indicator = 1
    for i in range(0, length):

        ret, frame = capture.read()

        # If first frame
        if first_iteration_indicator == 1:

            first_frame = copy.deepcopy(frame)
            height, width = frame.shape[:2]
            accum_image = np.zeros((height, width), np.uint8)
            first_iteration_indicator = 0
        else:

            filter = background_subtractor.apply(frame)  # remove the background
            cv2.imwrite('./frame.jpg', frame)
            cv2.imwrite('./diff-bkgnd-frame.jpg', filter)

            threshold = 2
            maxValue = 2
            ret, th1 = cv2.threshold(filter, threshold, maxValue, cv2.THRESH_BINARY)

            # add to the accumulated image
            accum_image = cv2.add(accum_image, th1)
            cv2.imwrite('./mask.jpg', accum_image)

            color_image_video = cv2.applyColorMap(accum_image, cv2.COLORMAP_SUMMER)

            video_frame = cv2.addWeighted(frame, 0.7, color_image_video, 0.7, 0)

            name = "./frames/frame%d.jpg" % i
            cv2.imwrite(name, video_frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        bar.next()

    bar.finish()

    make_video('./frames/', './output.avi')

    color_image = cv2.applyColorMap(accum_image, cv2.COLORMAP_HOT)
    result_overlay = cv2.addWeighted(first_frame, 0.7, color_image, 0.7, 0)

    # save the final heatmap
    cv2.imwrite('diff-overlay.jpg', result_overlay)

    # cleanup
    capture.release()
    cv2.destroyAllWindows()
Example #3
0
def project_structure(text_music_name,text_artist_name,image_type,op_lyric,op_deepDream,deepDream_format):
    global current_job
    shutil.rmtree('/code/flask/music', ignore_errors=True)
    shutil.rmtree('/code/flask/imagens', ignore_errors=True)

    json_code = get_lyric_videoLink(text_music_name,text_artist_name)
    
    json_code['MusicPath'] = download_song(json_code['VideoID'])

    print '\n\nmusica baixada\n\n'

    json_code['Subtitle'] = get_images(json_code['Subtitle'],image_type)
    print '\n\nimagens pegadas\n\n'
    
    json_code['Subtitle'] = improve_subtitle(json_code['Subtitle'])
    print '\n\ntimestamps modificado\n\n'
    if  op_deepDream:
        if deepDream_format == '1':
            json_code['Subtitle'] = dreamImage(json_code['Subtitle'])
            if op_lyric:
                video_name = make_videoDeep_lyric(json_code,text_music_name,True)
            else:
                video_name = make_videoDeep(json_code,text_music_name,True)
        elif deepDream_format == '5':
            json_code['Subtitle'] = dreamImage_5(json_code['Subtitle'])
            if op_lyric:
                video_name = make_videoDeep_lyric(json_code,text_music_name,False)
            else:
                video_name = make_videoDeep(json_code,text_music_name,False)
        else:
            json_code['Subtitle'] = dreamImage_10(json_code['Subtitle'])
            if op_lyric:
                video_name = make_videoDeep_lyric(json_code,text_music_name,False)
            else:
                video_name = make_videoDeep(json_code,text_music_name,False)
    else:
        if op_lyric:
            video_name = make_video_lyric(json_code,text_music_name)
        else:
            video_name = make_video(json_code,text_music_name)

    print '\n\nclipe feito\n\n'
    
    return video_name
Example #4
0
if __name__ == "__main__":

    # Orca
    # base = '/usr0/home/dlwong/Projects'
    base = '/users/ahjiang'
    # base2 = '/usr0/home/dlwong/Dropbox/CMU/Projects/mainstream'
    base2 = '/users/ahjiang/src/'
    imagelabels_file = base + '/image-data/video/oxford-flowers/imagelabels.mat'
    images_dir = base + '/image-data/video/oxford-flowers/images/'
    dst_dir = base + '/image-data/video/flowers_video'
    metafile_dir = base2 + '/mainstream/log/videos/flowers/'

    label_numbers = {"daisy": 49}
    '''
    possibilities = get_label(49, imagelabels_file, images_dir)
    for p in possibilities:
        positives = get_positive_ids(imagelabels_file, p)
        print p, positives[0]
        '''

    positives = get_positive_ids(imagelabels_file, label_numbers["daisy"])
    negatives = get_negative_ids(imagelabels_file, label_numbers["daisy"])
    human_label = "daisy-p3-n7-buffer2500"
    human_label = "test"
    video_name = make_video.make_video(positives, negatives, 1, 1, 10,
                                       human_label, images_dir, dst_dir,
                                       metafile_dir, False)
    perturbed_video_name = make_perturbed_video(dst_dir, video_name)
    print video_name, perturbed_video_name
def main():
    capture = cv2.VideoCapture(videoname + '.mp4')
    #background_subtractor = cv2.createBackgroundSubtractorKNN(detectShadows=False)
    background_subtractor = cv2.bgsegm.createBackgroundSubtractorMOG()
    length = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))  #获取视频总帧数

    warning_threshold = 15  #报警阈值
    print("Start processing...")
    first_iteration_indicator = 1
    threshold = 2
    maxValue = 2
    for i in tqdm(range(0, length), ncols=100):  #进度条总长度为100

        ret, frame = capture.read()
        frame = cv2.flip(frame, -1)

        # If first frame
        if first_iteration_indicator == 1:

            first_frame = copy.deepcopy(frame)
            global height, width  #图片的高度和宽度
            height, width = frame.shape[:2]
            global height_size, width_size
            height_size = height // 10  #分割成的小区域的宽度和高度
            width_size = width // 10
            accum_image = np.zeros((height, width), np.uint8)
            global size2
            size2 = 2 * height_size * width_size
            first_iteration_indicator = 0

        else:
            filter = background_subtractor.apply(
                frame)  # remove the background
            cv2.imwrite(filename + '-frame_test_dining.jpg', frame)
            cv2.imwrite(filename + '-diff-bkgnd-frame_test_dining.jpg', filter)
            ret, th1 = cv2.threshold(filter, threshold, maxValue,
                                     cv2.THRESH_BINARY)  #黑白二值化

            # add to the accumulated image
            accum_image = cv2.add(accum_image, th1)
            cv2.imwrite(filename + '-mask_test_dining.jpg', accum_image)
            color_image_video = cv2.applyColorMap(accum_image,
                                                  cv2.COLORMAP_SUMMER)  #伪彩色
            video_frame = cv2.addWeighted(frame, 0.7, color_image_video, 0.5,
                                          0)  #按权重叠加

            #cv2.imwrite(name, video_frame)
            if i % 10 == 1:
                number, max_i, max_j = dense_max(accum_image)
                text = "Cumulative traffic:" + str(number)  # 待添加的文字
                text_color = (0, 255, 0)

                if number > warning_threshold:
                    text = text + '  danger!'
                    text_color = (0, 0, 255)
                    warning_area = np.zeros((height, width, 3), np.uint8)
                    for x in range(max_i * width_size,
                                   (max_i + 1) * width_size):
                        for y in range(max_j * height_size,
                                       (max_j + 1) * height_size):
                            pixel_value = accum_image[y, x]
                            warning_area[y, x] = (pixel_value, 0, 0)

                    video_frame = cv2.add(video_frame, warning_area)

            text_position = (int(
                (max_i + 0.5) * width_size), int((max_j + 0.5) * height_size))
            cv2.putText(video_frame, text, text_position,
                        cv2.FONT_HERSHEY_TRIPLEX, 0.8, text_color, 2,
                        cv2.LINE_AA)
            name = "./frames_test_dining/frame%d.jpg" % i
            cv2.imwrite(name, video_frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

    make_video('./frames_test_dining/', filename + '-output.avi')
    np.savetxt(filename + '.txt', accum_image, fmt='%d')  #将数组保存到文件中,方便后续调试
    color_image = cv2.applyColorMap(accum_image, cv2.COLORMAP_HOT)
    result_overlay = cv2.addWeighted(first_frame, 0.7, color_image, 0.7, 0)

    # save the final heatmap
    cv2.imwrite(filename + '-diff-overlay_test.jpg', result_overlay)
    dense_every(accum_image)
    # cleanup
    capture.release()
    cv2.destroyAllWindows()
Example #6
0
                                 cv2.THRESH_BINARY)

        # add to the accumulated image
        accum_image = cv2.add(accum_image, th1)
        cv2.imwrite('./mask.jpg', accum_image)

        color_image_video = cv2.applyColorMap(accum_image, cv2.COLORMAP_HOT)

        video_frame = cv2.addWeighted(frame, 0.7, color_image_video, 0.7, 0)

        name = "./frames/frame%d.jpg" % i
        cv2.imwrite(name, video_frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    bar.next()

bar.finish()

make_video('./frames/', './output.avi')

color_image = cv2.applyColorMap(accum_image, cv2.COLORMAP_HOT)
result_overlay = cv2.addWeighted(first_frame, 0.7, color_image, 0.7, 0)

# save the final heatmap
cv2.imwrite('diff-overlay.jpg', result_overlay)

# cleanup
capture.release()
cv2.destroyAllWindows()