def main(video, saveLocation): # start_time = time.time() # creating the review file to be written to directory_1 = saveLocation + '/review_settings.csv' review_file = open(directory_1, 'w', newline='') writer = csv.writer(review_file) header = ['Effect','Contact','Intensity','PulseWidth','Frequency','Impedance','TargetLocation','StimulationONOFF'] writer.writerow(header) review_file.close() # create a patient limits new csv file to be written to directory_2 = saveLocation + '/patient_limits.csv' limits_file = open(directory_2, 'w', newline='') writer = csv.writer(limits_file) header = ['StepSize', 'Minimum', 'Maximum'] writer.writerow(header) limits_file.close() ReadVideo.main(video, directory_1, directory_2)
def testSpatialAlignmentEvangelidis(self): """ We test Evangelidis Spatial Alignment, given the precomputed temporal alignment, the cross array. """ videoPathFileNameQ = "Videos/input.avi" videoPathFileNameR = "Videos/reference.avi" """ We want to use the same JPEGs Matlab is using - since the algorithm is somewhat sensitive to quantisation errors. """ assert config.TESTING_IDENTICAL_MATLAB == True import ReadVideo captureQ, frameCountQ, resVidQ = ReadVideo.OpenVideoCapture( videoPathFileNameQ, 0) common.DebugPrint("Alex: frameCountQ = %d" % frameCountQ) captureR, frameCountR, resVidR = ReadVideo.OpenVideoCapture( videoPathFileNameR, 1) common.DebugPrint("Alex: frameCountR = %d" % frameCountR) """ The cross result we obtain for the videos from Evangelidis, with step=25 (1fps). """ crossref = [[0, 48], [1, 48], [2, 48], [3, 48], [4, 48], [5, 48], [6, 0], [7, 0], [8, 60], [9, 60], [10, 60], [11, 60], [12, 67], [13, 67], [14, 67], [15, 67], [16, 67], [17, 67], [18, 72], [19, 72], [20, 72], [21, 78], [22, 78], [23, 78], [24, 78], [25, 78], [26, 82], [27, 82], [28, 82], [29, 54], [30, 54]] crossref = np.array(crossref) #SpatialAlignmentEvangelidis(cross, captureQ=None, captureR=None); res = SpatialAlignmentEvangelidis(crossref, captureQ, captureR) common.DebugPrint("Corrected cross from SpatialAlignmentEvangelidis() = %s" % \ str(res)) """
common.DebugPrint("cv2.getNumThreads() (#logical CPUs) is %s" % str(cv2.getNumThreads())); """ videoPathFileNameQ = sys.argv[1]; # input/current video videoPathFileNameR = sys.argv[2]; # reference video #!!!!TODO: use getopt() to run Evangelidis' or "Alex's" algorithm, etc #if True: if False: import hotshot prof = hotshot.Profile("hotshot_edi_stats_Main"); #prof.runcall(findquads, Points, threshold, reflect_flag); prof.runcall(ReadVideo.Main, videoPathFileNameQ, videoPathFileNameR); print; prof.close(); """ from hotshot import stats s = stats.load("hotshot_edi_stats_findquads"); s.sort_stats("time").print_stats(); #s.print_stats() """ else: ReadVideo.Main(videoPathFileNameQ, videoPathFileNameR);
def main(argv): assert len(sys.argv) >= 3 if FLAGS.preprocess_ref: config.PREPROCESS_REFERENCE_VIDEO_ONLY = True elif FLAGS.process_query_and_align_videos: config.PREPROCESS_REFERENCE_VIDEO_ONLY = False else: config.PREPROCESS_REFERENCE_VIDEO_ONLY = False print("config.PREPROCESS_REFERENCE_VIDEO_ONLY = %s" % str( config.PREPROCESS_REFERENCE_VIDEO_ONLY)) ask_first() # Inspired from https://stackoverflow.com/questions/1520234/how-to-check-which-version-of-numpy-im-using print("numpy.version.version = %s" % str(np.version.version)) print("scipy.version.version = %s" % str(scipy.version.version)) np.show_config() scipy.show_config() # See http://docs.scipy.org/doc/numpy/reference/generated/numpy.set_printoptions.html # We use 7 digits precision and suppress using scientific notation. np.set_printoptions(precision=7, suppress=True, threshold=70000, linewidth=4000) # Inspired from \OpenCV2-Python-Tutorials-master\source\py_tutorials\py_core\py_optimization # normally returns True - relates to using the SIMD extensions of x86: # SSX, AVX common.DebugPrint("cv2.useOptimized() is %s" % str(cv2.useOptimized())) """ From http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html#checkhardwaresupport CV_CPU_MMX - MMX CV_CPU_SSE - SSE CV_CPU_SSE2 - SSE 2 CV_CPU_SSE3 - SSE 3 CV_CPU_SSSE3 - SSSE 3 CV_CPU_SSE4_1 - SSE 4.1 CV_CPU_SSE4_2 - SSE 4.2 CV_CPU_POPCNT - POPCOUNT CV_CPU_AVX - AVX """ # TODO: Figure out the correct way to reference these in OpenCV 3.x """ # Need to setUseOptimized before calling checkHardwareSupport cv2.setUseOptimized(True) if config.OCV_OLD_PY_BINDINGS == False: featDict = {cv2.CpuFeatures.CV_CPU_AVX: "AVX", cv2.CPU_MMX: "MMX", cv2.CPU_NEON: "NEON", cv2.CPU_POPCNT: "POPCNT", cv2.CPU_SSE: "SSE", cv2.CPU_SSE2: "SSE2", cv2.CPU_SSE3: "SSE3", cv2.CPU_SSE4_1: "SSE4.1", cv2.CPU_SSE4_2: "SSE4.2", cv2.CPU_SSSE3: "SSSE3"} for feat in featDict: res = cv2.checkHardwareSupport(feat) print("%s = %d" % (featDict[feat], res)) """ # "Returns the number of logical CPUs available for the process." common.DebugPrint("cv2.getNumberOfCPUs() (#logical CPUs) is %s" % str( cv2.getNumberOfCPUs())) common.DebugPrint( "cv2.getTickFrequency() is %s" % str(cv2.getTickFrequency())) video_file_q = sys.argv[1] # input/current video video_file_r = sys.argv[2] # reference video # TODO: use getopt() to run Evangelidis' or "Alex's" algorithm, etc ReadVideo.main(video_file_q, video_file_r)
name_of_video = os.path.splitext(args.video)[0] # name of video without file extension path_video = PATH_TO_VIDEO + args.video if not os.path.exists(path_video): log.error(" %s didn't find" % path_video) exit(1) else: name_of_video = 'online_camera' path_video = args.video if args.filename == 'no': # if name of file with txt doesn't point - it will be name of video filename = name_of_video + '.txt' else: filename = args.filename if args.sec == '0': sec = 0.1 else: sec = float(args.sec) return path_video, filename, name_of_video, sec if __name__ == '__main__': parser = create_parser() args = parser.parse_args() path_video, filename, name_of_video, sec = parse_args(args) log.info(' Run video %s' % args.video) ReadVideo.read_video(path_video, filename, args.type, name_of_video, sec, args.frame) log.info(' Close video %s \n\n' % args.video)
def TestGoProCameraVideos(): """ Running Spatial Alignment, given the precomputed temporal alignment, the cross array. """ path = "/home/asusu/drone-diff_Videos/GoPro_clips/2HD_cuts_from_Lucian/" videoPathFileNameQ = path + "GOPR7269_50-55.MP4" videoPathFileNameR = path + "GOPR7344_90-95.MP4" import ReadVideo captureQ, frameCountQ, resVidQ = ReadVideo.OpenVideoCapture( videoPathFileNameQ, 0) common.DebugPrint("Alex: frameCountQ = %d" % frameCountQ) captureR, frameCountR, resVidR = ReadVideo.OpenVideoCapture( videoPathFileNameR, 1) common.DebugPrint("Alex: frameCountR = %d" % frameCountR) """ crossref = np.array([ \ [ 7, 120], [ 8, 120], [ 9, 120], [ 10, 120], [ 11, 120], [ 12, 120], [ 13, 120], [ 14, 120], [ 15, 120], [ 16, 120], [ 17, 120], [ 18, 120], [ 19, 114], [ 20, 4], [ 21, 4], [ 22, 5], [ 23, 5], [ 24, 5], [ 25, 4], [ 26, 4], [ 27, 4], [ 28, 4], [ 29, 4], [ 30, 4], [ 31, 4], [ 32, 4], [ 33, 4], [ 34, 4], [ 35, 4], [ 36, 4], [ 37, 4], [ 38, 0], [ 39, 0], [ 40, 0], [ 41, 5], [ 42, 5], [ 43, 5], [ 44, 5], [ 45, 5], [ 46, 5], [ 47, 5], [ 48, 5], [ 49, 5], [ 50, 5], [ 51, 5], [ 52, 5], [ 53, 5], [ 54, 5], [ 55, 6], [ 56, 6], [ 57, 6], [ 58, 6], [ 59, 6], [ 60, 6], [ 61, 6], [ 62, 6], [ 63, 6], [ 64, 6], [ 65, 78], [ 66, 6], [ 67, 6], [ 68, 6], [ 69, 6], [ 70, 6], [ 71, 6], [ 72, 6], [ 73, 6], [ 74, 7], [ 75, 7], [ 76, 7], [ 77, 7], [ 78, 7], [ 79, 7], [ 80, 13], [ 81, 12], [ 82, 12], [ 83, 12], [ 84, 12], [ 85, 16], [ 86, 16], [ 87, 16], [ 88, 15], [ 89, 22], [ 90, 22], [ 91, 22], [ 92, 22], [ 93, 22], [ 94, 22], [ 95, 22], [ 96, 22], [ 97, 22], [ 98, 22], [ 99, 32], [100, 32], [101, 32], [102, 32], [103, 32], [104, 33], [105, 34], [106, 34], [107, 34], [108, 38], [109, 38], [110, 38], [111, 38], [112, 43], [113, 43], [114, 43], [115, 43], [116, 43], [117, 45], [118, 41], [119, 60], [120, 60], [121, 60], [122, 60], [123, 60], [124, 60], [125, 60], [126, 60], [127, 60], [128, 60], [129, 60], [130, 60], [131, 60], [132, 60], [133, 60], [ 0, 4], [ 1, 4], [ 2, 4], [ 3, 4], [ 4, 4], [ 5, 4], [ 6, 4]]); """ crossref = np.array([ \ [ 7, 120], [ 8, 120], [ 9, 120], [ 10, 120], [ 11, 120], [ 12, 120], [ 13, 120], [ 14, 120], [ 15, 120], [ 16, 120], [ 17, 120], [ 18, 120], [ 19, 114], [ 20, 4], [ 21, 4], [ 22, 5], [ 23, 5], [ 24, 5], [ 25, 4], [ 26, 4], [ 27, 4], [ 28, 4], [ 29, 4], [ 30, 4], [ 31, 4], [ 32, 4], [ 33, 4], [ 34, 4], [ 35, 4], [ 36, 4], [ 37, 4], [ 38, 0], [ 39, 0], [ 40, 0], [ 41, 5], [ 42, 5], [ 43, 5], [ 44, 5], [ 45, 5], [ 46, 5], [ 47, 5], [ 48, 5], [ 49, 5], [ 50, 5], [ 51, 5], [ 52, 5], [ 53, 5], [ 54, 5], [ 55, 6], [ 56, 6], [ 57, 6], [ 58, 6], [ 59, 6], [ 60, 6], [ 61, 6], [ 62, 6], [ 63, 6], [ 64, 6], [ 65, 78], [ 66, 6], [ 67, 6], [ 68, 6], [ 69, 6], [ 70, 6], [ 71, 6], [ 72, 6], [ 73, 6], [ 74, 7], [ 75, 7], [ 76, 7], [ 77, 7], [ 78, 7], [ 79, 7], [ 80, 13], [ 81, 12], [ 82, 12], [ 83, 12], [ 84, 12], [ 85, 16], [ 86, 16], [ 87, 16], [ 88, 15], [ 89, 22], [ 90, 22], [ 91, 22], [ 92, 22], [ 93, 22], [ 94, 22], [ 95, 22], [ 96, 22], [ 97, 22], [ 98, 22], [ 99, 32], [100, 32], [101, 32], [102, 32], [103, 32], [104, 33], [105, 34], [106, 34], [107, 34], [108, 38], [109, 38], [110, 38], [111, 38], [112, 43], [113, 43], [114, 43], [115, 43], [116, 43], [117, 45], [118, 41], [119, 60], [120, 60], [121, 60], [122, 60], [123, 60], [124, 60], [125, 60], [126, 60], [127, 60], [128, 60], [129, 60], [130, 60], [131, 60], [132, 60], [133, 60], [ 0, 4], [ 1, 4], [ 2, 4], [ 3, 4], [ 4, 4], [ 5, 4], [ 6, 4]]) res = SpatialAlignmentEvangelidis(crossref, captureQ, captureR) print("Corrected cross from SpatialAlignmentEvangelidis() = %s" % \ str(res))