def save_depthmap_video():
    input_video = get_input_video_name()  #**prompt_settings)
    print_formatted_text("Video to process: %s" % input_video)

    output_video = get_output_video_name()  #**prompt_settings)
    print_formatted_text("Output will be saved: %s" % output_video)

    step = get_step_value()
    print_formatted_text("Step is: %s" % step)

    low = False
    fast = False
    nn = get_nn_depthmap_option()
    if nn:
        print_formatted_text("Neural network depth map enabled")
    else:
        fast = get_fast_depthmap_option()
        if fast:
            print_formatted_text("Fast depth map enabled")

        low = get_low_quality_option()
        if low:
            print_formatted_text("Low quality enabled")

    try:
        converter = VideoConverter(input_video, low, step, fast, nn)
        converter.convert_video(output_video)
    except ValueError as err:
        print("===============> Error: ", err)
Ejemplo n.º 2
0
def test_file_not_found(mocked_depth_map_func):
    '''
    Verify that function raises error if input does not exist
    '''
    with pytest.raises(ValueError):
        test_converter = VideoConverter("any_input", False, 1, True, False)
        test_converter.convert_video("any_output")
Ejemplo n.º 3
0
def test_performance():
    tester = Tester()
    test_converter = VideoConverter("../Video to image/test.mp4", True, 1,
                                    False, False)
    print("Creating an instance...")
    print("setting up class Video_To_Depthmap...")
    tester.init_class_tracker(VideoConverter)
    tester.init_output_file_check()
    print("creating a an instance of test class")
    print("__________")
    print("Timing test:")
    tester.startTimer()
    test_converter.convert_video("out.mp4")
    tester.stopTimer()
    print("checking object size, memory and profile:")
    tester.check_object_size(test_converter)
    tester.print_object_profile(test_converter)
    tester.track_object()
    print("__________")
    print("checking Class size, memory and profile:")
    tester.snapshot_class()
    tester.get_class_summary()
    print("__________")
    print("checking for output files added:")
    print("so far there have been " + str(tester.output_file_count()) +
          " files added.")
Ejemplo n.º 4
0
def test_depthmaps_step_0(mocked_file_check):
    '''
    Verify that VideoConverter raises error when step<=0
    '''
    # mock check_file_exists to make input file 'available'
    mocked_file_check.return_value = True
    # call convert_video with step 0 and check that it returns ValueError
    with pytest.raises(ValueError):
        VideoConverter("any_input", False, 0, True, False)

    # call convert_video with step -1 and check that it returns ValueError
    with pytest.raises(ValueError):
        VideoConverter("any_input", False, -1, True, False)
Ejemplo n.º 5
0
def test_one_frame(mocked_video_reader, mocked_file_check, mocked_cv2, mocked_depth_map_func):
    # set required mocks
    set_video_reader_side_effect(mocked_video_reader, 1)
    # mock of VideoWriter object
    mocked_writer = mocked_cv2.VideoWriter.return_value
    # mock check_file_exists to make input file 'available'
    mocked_file_check.return_value = True

    # call tested method
    test_converter = VideoConverter("any_input", False, 1, True)
    test_converter.convert_video("any_output")

    assert mocked_depth_map_func.called == False
    assert mocked_writer.write.called == False
Ejemplo n.º 6
0
def test_nn_and_low_quality_endabled(mocked_depthmap):
    '''
    If low quality and neural network options are enabled, constructor should raise an
    exception
    '''

    with pytest.raises(ValueError):
        vc = VideoConverter("input_file", False, 1, True, True)
Ejemplo n.º 7
0
def test_depthmaps_large_step(mocked_video_reader, mocked_file_check, mocked_cv2, mocked_depth_map_func):
    '''
    Verify that VideoConverter doesn't process any frames when step > number of frames
    '''
    total_frames_count = 15
    # set required mocks

    set_video_reader_side_effect(mocked_video_reader, total_frames_count)
    # mock check_file_exists to make input file 'available'
    mocked_file_check.return_value = True

    # setting create_depth_map function to return a numpy array
    mocked_depth_map_func.return_value = np.array([[0], [1]])

    # mock of VideoWriter object
    mocked_writer = mocked_cv2.VideoWriter.return_value

    test_converter = VideoConverter("any_input", False, total_frames_count+1, True)

    # call convert_video with step > total_frames_count and check that it doesn't save anything
    test_converter.convert_video("any_output")
    assert mocked_writer.write.call_count == 0
Ejemplo n.º 8
0
def test_constructor(mocked_video_reader, mocked_file_check):
    '''
    Verify that VideoConverter object is properly constructed
    '''
    mocked_file_check.return_value = True
    set_video_reader_side_effect(mocked_video_reader, 1)

    input_file = "any_input"
    output_file = "any_output"
    step = 10
    low_quality = False
    fast = True
    test_converter = VideoConverter(input_file, low_quality, step, fast, False)
    assert test_converter.input_file == input_file
    assert test_converter.low_quality == low_quality
    assert test_converter.step == step
    assert test_converter.fast == fast
Ejemplo n.º 9
0
def main():
    args = get_arguments()
    converter = VideoConverter(args.input, args.low, args.step, args.fast)
    converter.convert_video(args.output)
Ejemplo n.º 10
0
def run_opengl(input_video, low, step, fast, nn):
    glutInit(sys.argv)
    glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
    glutInitWindowSize(640, 480)
    glutCreateWindow(name)
    glutKeyboardFunc(keyboard)
    glClearColor(0., 0., 0., 1.)
    glShadeModel(GL_SMOOTH)
    glEnable(GL_CULL_FACE)
    glEnable(GL_DEPTH_TEST)

    vertex_shader = createAndCompileShader(GL_VERTEX_SHADER, """
	#version 130
	uniform sampler2D depth;
	uniform float uMultiplier;
	out vec2 vST; // texture coords
	out vec3 vN; // normal vector
	out vec3 vL; // vector from point to light
	out vec3 vE; // vector from point to eye
	const vec3 LIGHTPOSITION = vec3( 5., 5., 0. );
	const float PI = 3.14159265;
	const float AMP = 0.2;
	const float W = 2.;

	void
	main( )
	{
		vST = gl_MultiTexCoord0.st;
		vec3 vert = gl_Vertex.xyz;
		vert.z = texture2D(depth, vST).r * uMultiplier;
		vec4 ECposition = gl_ModelViewMatrix * gl_Vertex;
		vec3 aLocalNormal = gl_Normal;
		vN = normalize( gl_NormalMatrix * aLocalNormal ); // normal vector
		vN.z = vN.z + vert.z;
		vL = LIGHTPOSITION - ECposition.xyz; // vector from the point
		// to the light position
		vE = vec3( 0., 0., 0. ) - ECposition.xyz; // vector from the point
		// to the eye position
		gl_Position = gl_ModelViewProjectionMatrix * vec4(vert,1);
	}
	""")

    fragment_shader = createAndCompileShader(GL_FRAGMENT_SHADER, """
	#version 130
	uniform sampler2D RGB;
	uniform float uKa, uKd, uKs; // coefficients of each type of lighting
	uniform float uShininess; // specular exponent
	in vec2 vST; // texture cords
	in vec3 vN; // normal vector
	in vec3 vL; // vector from point to light
	in vec3 vE; // vector from point to eye
	void
	main( )
	{
		vec3 myColor = texture2D(RGB, vST).rgb;
		vec3 Normal = normalize(vN);
		vec3 Light = normalize(vL);
		vec3 Eye = normalize(vE);
		vec3 ambient = uKa * myColor;
		float d = max( dot(Normal,Light), 0. ); // only do diffuse if the light can see the point
		vec3 diffuse = uKd * d * myColor;
		float s = 0.;
		if( dot(Normal,Light) > 0. ) // only do specular if the light can see the point
		{
			vec3 ref = normalize( reflect( -Light, Normal ) );
			s = pow( max( dot(Eye,ref),0. ), uShininess );
		}
		vec3 specular = uKs * s * vec3(1,1,1);
		gl_FragColor = vec4( ambient + diffuse + specular, 1. );
	}
	""")
    program = glCreateProgram()
    glAttachShader(program, vertex_shader)
    glAttachShader(program, fragment_shader)
    glLinkProgram(program)
    # glDeleteShader(vertex_shader)
    # glDeleteShader(fragment_shader)
    try:
        glUseProgram(program)
    except OpenGL.error.GLError:
        print(glGetProgramInfoLog(program))
        raise
    global uRGB
    global uDepth
    uDepth = glGetUniformLocation(program, "depth")
    uRGB = glGetUniformLocation(program, "RGB")
    global uMultiplier
    uMultiplier = glGetUniformLocation(program, "uMultiplier")
    uKa = glGetUniformLocation(program, "uKa")
    uKd = glGetUniformLocation(program, "uKd")
    uKs = glGetUniformLocation(program, "uKs")
    uShininess = glGetUniformLocation(program, "uShininess")

    if uKa < 0 or uKd < 0 or uKs < 0 or uShininess < 0:
        print("Error finding float lighting values.")
    #	exit(1)
    if uDepth < 0 or uRGB < 0:
        print("Error finding sampler texture declearqations.")
        print("Values -> uRGB: " + str(uRGB) + ", uDepth:" + str(uDepth))
    #	exit(1)

    glUniform1f(uMultiplier, 0)
    glUniform1f(uKa, 0.25)
    glUniform1f(uKd, 0.5)
    glUniform1f(uKs, 0.25)
    glUniform1f(uShininess, 1)

    # set background texture
    global rgb
    global depth
    global rgbData
    global depthData
    global converter
    global frame_and_depth_map_gen

    converter = VideoConverter(input_video, low, step, fast, nn)
    frame_and_depth_map_gen = converter.get_frame_and_depth_map()

    glEnable(GL_LIGHTING)
    lightZeroPosition = [0., 0., 20., 1.]
    lightZeroColor = [1.8, 1.0, 0.8, 1.0]  # green tinged
    glLightfv(GL_LIGHT0, GL_POSITION, lightZeroPosition)
    glLightfv(GL_LIGHT0, GL_DIFFUSE, lightZeroColor)
    glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 0.1)
    glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0.05)
    glEnable(GL_LIGHT0)
    glUseProgram(program)
    makeList()
    glutDisplayFunc(display)
    glMatrixMode(GL_PROJECTION)
    gluPerspective(40., 1., 0.1, 80.)
    glMatrixMode(GL_MODELVIEW)
    gluLookAt(CameraPosx, CameraPosy, CameraPosz, 0, 0, 0, 0, 1, 0)
    glPushMatrix()
    glutMainLoop()
    return