Пример #1
0
def main():
    cv2.getBuildInformation()
    # lamonaca = lamonaca_2015.Lamonaca2015()

    # lamonaca.lacomana("Video/S98T89.avi")

    nemcova = nemcova_2020.Nemcova2020()
    o2, hr = nemcova.spo2_estimation("Video/S87T78.mp4", optimize=True)
    print("Done:", o2, " and hr ", hr)
Пример #2
0
def main():
    cv.getBuildInformation()
    # cap = cv.VideoCapture('Bridge.mp4')
    # cap = cv.VideoCapture('Night_Scene.mp4')
    cap = cv.VideoCapture('Highway.mp4')

    if not cap.isOpened():
        print('Falha ao abrir o video.')
        exit(-1)

    cv.namedWindow('Filtro')

    cv.createTrackbar('log', 'Filtro', 1, 1, faz_nada)
    cv.createTrackbar('c', 'Filtro', 10, 100, faz_nada)
    cv.createTrackbar('raio', 'Filtro', 20, 1000, faz_nada)
    cv.createTrackbar('min', 'Filtro', 0, 100, faz_nada)
    cv.createTrackbar('max', 'Filtro', 100, 100, faz_nada)

    speed = 5
    descarte_frame = 0
    while True:
        ret, frame = cap.read()
        if ret:
            if descarte_frame == 0:
                logs = cv.getTrackbarPos('log', 'Filtro')
                c = cv.getTrackbarPos('c', 'Filtro')
                r = cv.getTrackbarPos('raio', 'Filtro')
                v_min = cv.getTrackbarPos('min', 'Filtro')
                v_max = cv.getTrackbarPos('max', 'Filtro')

                v_min = v_min / 100
                v_max = v_max / 100

                frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
                cv.imshow('Frame', frame)
                img, filtro = filtro_homomorfico(frame, r, v_min, v_max, c,
                                                 logs == 1)
                cv.imshow('Homomorfico', img)
                cv.imshow('Filtro', filtro)

            descarte_frame = (descarte_frame + 1) % speed

            key = cv.waitKey(15)
            if key == 27:
                break
        else:
            # break
            cap = cv.VideoCapture('Highway.mp4')

    cap.release()
    cv.destroyAllWindows()
Пример #3
0
def main():
    cv2.getBuildInformation()
    # lamonaca = lamonaca_2015.Lamonaca2015()

    # lamonaca.lacomana("Video/S98T89.avi")

    wang = wang_2017.Wang2017()

    ppg_green_interpolated, ppg_red_interpolated, video_length, timestamps_spline, fps_spline = wang.ppg_from_video_file(
        "../Video/S87T78.mp4")

    hr = wang.heart_rate(ppg_red_interpolated, fps_spline, timestamps_spline)

    print(hr)
Пример #4
0
def check_gstreamer_support(logging=False):
    """
    ## check_gstreamer_support

    Checks whether OpenCV is compiled with Gstreamer(`>=1.0.0`) support.

    Parameters:
        logging (bool): enables logging for its operations

    **Returns:** A Boolean value
    """
    raw = cv2.getBuildInformation()
    gst = [
        x.strip()
        for x in raw.split("\n")
        if x and re.search(r"GStreamer[,-:]+\s*(?:YES|NO)", x)
    ]
    if gst and "YES" in gst[0]:
        version = re.search(r"(\d+\.)?(\d+\.)?(\*|\d+)", gst[0])
        if logging:
            logger.debug("Found GStreamer version:{}".format(version[0]))
        return version[0] >= "1.0.0"
    else:
        logger.warning("GStreamer not found!")
        return False
def built_with_cuda():
    b = cv2.getBuildInformation()
    lines = b.split('\n')

    for l in lines:
        if ' NVIDIA CUDA' in l:
            return l.split(':')[-1].strip().startswith('YES')

    return False
def parse_params():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-target",
        default=None,
        help=
        'Target to process, "all" processes all folders, "test" runs test cases, "clean" to remove banned clips from db, or a "cptv" file to run a single source.',
    )

    parser.add_argument(
        "-p",
        "--create-previews",
        action="count",
        help="Create MP4 previews for tracks (can be slow)",
    )
    parser.add_argument(
        "-t",
        "--test-file",
        default="tests.txt",
        help="File containing test cases to run",
    )
    parser.add_argument("-v",
                        "--verbose",
                        action="count",
                        help="Display additional information.")

    parser.add_argument(
        "-r",
        "--reprocess",
        action="count",
        help="Re process clips that already exist in the database",
    )

    parser.add_argument(
        "-i",
        "--show-build-information",
        action="count",
        help="Show openCV build information and exit.",
    )
    parser.add_argument("-c",
                        "--config-file",
                        help="Path to config file to use")
    args = parser.parse_args()

    if args.show_build_information:
        print(cv2.getBuildInformation())
        return None, None

    config = Config.load_from_file(args.config_file)
    if args.create_previews:
        config.loader.preview = "tracking"
    if args.verbose:
        config.tracking.verbose = True

    return config, args
Пример #7
0
def extraOpenCVModulesPresent():

    # we only need to check this once and remember the result
    # so we can do this via a stored function attribute (static variable)
    # which is preserved across calls

    if not hasattr(extraOpenCVModulesPresent, "already_checked"):
        (is_built, not_built) = cv2.getBuildInformation().split("Disabled:")
        extraOpenCVModulesPresent.already_checked = ('xfeatures2d' in is_built)

    return extraOpenCVModulesPresent.already_checked
Пример #8
0
    def _check_opencv_config(self):
        build_info = cv2.getBuildInformation()
        ffmpeg_line = re.search(r'FFMPEG\:\s+(.*)', build_info)

        if not (ffmpeg_line and ffmpeg_line.group(1) == 'YES'):
            IkaUtils.dprint('%s: OpenCV misconfiguration detected.\n'
                '  - IkaLog may experience serious performance degradation.\n'
                '  - IkaLog may not able to read several video formats.\n'
                '  Please review your OpenCV Configuration.\n'
                '  %s' % (self, ffmpeg_line.group(0))
            )
            time.sleep(5)
Пример #9
0
def nonFreeAlgorithmsPresent():

    # we only need to check this once and remember the result
    # so we can do this via a stored function attribute (static variable)
    # which is preserved across calls

    if not hasattr(nonFreeAlgorithmsPresent, "already_checked"):
        (before, after) = cv2.getBuildInformation().split("Non-free algorithms:");
        output_list = after.split("\n");
        nonFreeAlgorithmsPresent.already_checked = ('YES' in output_list[0]);

    return nonFreeAlgorithmsPresent.already_checked;
Пример #10
0
    def __init__(self, config, settings, eventmanager, imageprocessor,
                 hardwarecontroller, calibration):
        super(FSScanProcessorInterface,
              self).__init__(self, config, settings, eventmanager,
                             imageprocessor, hardwarecontroller, calibration)

        #asyncio.set_event_loop(asyncio.new_event_loop())
        self.settings = settings
        self.config = config
        self._logger = logging.getLogger(__name__)

        self.eventmanager = eventmanager.instance
        self.calibration = calibration
        self._worker_pool = None
        self.hardwareController = hardwarecontroller
        self.image_processor = imageprocessor

        self._prefix = None
        self._resolution = 16
        self._number_of_pictures = 0
        self._total = 1
        self._progress = 1
        self._is_color_scan = True
        self.point_clouds = []
        self.both_cloud = []

        self.current_position = 0
        self._stop_scan = False
        self._current_laser_position = 1
        self._starttime = 0
        self._additional_worker_number = 1

        self.texture_lock_event = threading.Event()
        self.texture_lock_event.set()

        self.utils = FSSystem()

        self._scan_brightness = self.settings.file.camera.brightness
        self._scan_contrast = self.settings.file.camera.contrast
        self._scan_saturation = self.settings.file.camera.saturation
        self._logger.info("Laser Scan Processor initilized.")

        # prevent deadlocks when opencv tbb is not available

        cv_build_info = cv2.getBuildInformation()

        # fallback to one worker.
        if not "TBB" in cv_build_info:
            self._logger.warning(
                "OpenCV does not support TBB. Falling back to single processing."
            )
            self.config.file.process_numbers = 1
Пример #11
0
def OpenCVUsingCuda():
    buildinfo = cv2.getBuildInformation().split("\n")
    foundSomething = 0
    for item in buildinfo:
        if "Use Cuda" in item:
            print(item.strip())
            foundSomething += 1
        if "NVIDIA" in item:
            print(item.strip())
            foundSomething += 1

    if foundSomething == 0:
        print("Could Not Find CUDA")
Пример #12
0
def main():
    cap=cv2.VideoCapture(0)
    full_img_to_gray=0
    full_detection=0
    ImageConvPlot=[]
    ObjDetectPlot=[]
    Cadrs=100
    
    print(f'========\tSTARTING SCORE TEST OF VIOLA-JONES ALGORITM OF OBJECT DETECTION\t========\n')
    print(cv2.getBuildInformation())

    clf = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
    #clf = cv2.CascadeClassifier('/usr/local/share/opencv4/haarcascades/' + 'haarcascade_frontalface_default.xml')
    #f1 = open('Image converting.txt', 'w')
    #f2 = open('Object detection.txt', 'w')

    for i in range(Cadrs):
        ret, frame=cap.read()
        start_t = time.perf_counter()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        end_t = time.perf_counter()
        img_to_gray = end_t - start_t
        full_img_to_gray+=img_to_gray

        start_t = time.perf_counter()
        clf.detectMultiScale(gray, 1.3, 125)
        end_t = time.perf_counter()
        detection = end_t - start_t
        full_detection+=detection
        
        print(f'Time has passed:\t{img_to_gray + detection}\t\n')
        print(f'Image converting:\t{img_to_gray}')
        ImageConvPlot.append(img_to_gray)
        #f1.writelines(str(img_to_gray)+"\n")
        print(f'Object detection:\t{detection}')
        ObjDetectPlot.append(detection)
        #f2.writelines(str(detection)+"\n")
        print ("Frame default resolution: " + str(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + "; " + str(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        

    #f1.close()
    #f2.close()
    print(f'\nFull time has passed:\t{full_img_to_gray + full_detection}\t')
    print(f'Full image converting:\t{full_img_to_gray}')
    print(f'Full object detection:\t{full_detection}')
    time.sleep(3)
    lineplot(range(1,Cadrs+1), ImageConvPlot, "Конвертация изображения")
    lineplot(range(1,Cadrs+1), ObjDetectPlot, "Поиск объекта")
    
    return 0
def main():
    import sys
    try:
        param = sys.argv[1]
    except IndexError:
        param = ""

    if "--build" == param:
        print(cv.getBuildInformation())
    elif "--help" == param:
        print("\t--build\n\t\tprint complete build info")
        print("\t--help\n\t\tprint this help")
    else:
        print("Welcome to OpenCV")
    print("DONE")
Пример #14
0
    def _check_opencv_config(self):
        build_info = cv2.getBuildInformation()
        ffmpeg_line = re.search(r'FFMPEG\:\s+(.*)', build_info)

        is_osx = IkaUtils.isOSX()
        is_ffmpeg_enabled = (ffmpeg_line and ffmpeg_line.group(1) == 'YES')

        if (is_osx and not is_ffmpeg_enabled):
            IkaUtils.dprint(
                '%s: OpenCV misconfiguration detected.\n'
                '  - IkaLog may experience serious performance degradation.\n'
                '  - IkaLog may not able to read several video formats.\n'
                '  Please review your OpenCV Configuration.\n'
                '  %s' % (self, ffmpeg_line.group(0)))
            time.sleep(5)
Пример #15
0
def Check_with_CUDA():
    print("\nCOMPLIE GPU INFORMATION:")
    import cv2
    cv_info = [
        re.sub('\s+', ' ', ci.strip())
        for ci in cv2.getBuildInformation().strip().split('\n')
        if len(ci) > 0 and re.search(r'(nvidia*:?)|(cuda*:)|(cudnn*:)',
                                     ci.lower()) is not None
    ]
    cv_info = "\tOPENCV USING CUDA: " + str(cv_info)
    print(cv_info)
    try:
        import dlib
        dlib_info = "\tDLIB USING CUDA: " + str(dlib.DLIB_USE_CUDA)
    except:
        dlib_info = "\tDLIB USING CUDA: NONE"
    print(dlib_info)
Пример #16
0
def main():
    import sys

    try:
        param = sys.argv[1]
    except IndexError:
        param = ""

    if "--build" == param:
        print(cv.getBuildInformation())
    elif "--help" == param:
        print("\t--build\n\t\tprint complete build info")
        print("\t--help\n\t\tprint this help")
    else:
        print("Welcome to OpenCV")

    print('Done')
Пример #17
0
    def run(self):
        if self.debug:
            print("debug:", cv2.getBuildInformation())

        # prep video capture  TODO: try..except..
        self.vcap = VideoCapture(self.url)
        _, frame = self.vcap.read()
        print("vid2scene: video capture started")

        # prep motion detector
        self.md = MotionDetect(ref_frame=frame)

        # model load and prep
        self._load()

        # capture loop
        self._loop()
Пример #18
0
    def major_video(self):

        print(cv2.getBuildInformation())

        self.file_name = QFileDialog.getOpenFileName(
            self, 'Open file', '', "Video files (*.flv *.mp4)")[0]

        self.vidcap = cv2.VideoCapture(self.file_name)

        success = True

        while success:
            success, im = self.vidcap.read()

            self.detect2(im)

            if cv2.waitKey(1) >= 0:  # Break with ESC
                break
Пример #19
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--key-api-url",
                        default="http://localhost:5000",
                        help="URL of key verification service")
    parser.add_argument("-v",
                        "--video-src",
                        type=int,
                        default=0,
                        help="Video source index")
    args = parser.parse_args()

    print(cv2.getBuildInformation())

    print("Starting video stream...")
    vs = VideoStream(src=args.video_src, resolution=(320, 240),
                     framerate=10).start()

    # Warm up camera sensor
    time.sleep(2.0)

    t0 = time.time()
    frame_count = 0

    while True:
        frame = vs.read()
        # cv2.imwrite("frame-%05d.jpg" % frame_count, frame)

        barcodes = pyzbar.decode(frame)

        if True:
            for barcode in barcodes:
                # The barcode data is a bytes object
                barcode_data = barcode.data.decode("utf-8")
                barcode_type = barcode.type
                print("Got barcode '%s' of type '%s'" %
                      (barcode_data, barcode_type))

                if _verify_key(args.key_api_url, barcode_data):
                    _open_door()

        frame_count += 1
        print("\rFPS: %.02f" % (frame_count / (time.time() - t0)))
Пример #20
0
def arReader():
    print(cv2.getBuildInformation())  #詳細の確認
    cap = cv2.VideoCapture(0)  #ビデオキャプチャの開始
    cnt = 0

    while True:

        ret, frame = cap.read()  #ビデオキャプチャから画像を取得
        if frame is None: break

        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            frame, dictionary)  #マーカを検出
        print(corners, ids)
        aruco.drawDetectedMarkers(frame, corners, ids,
                                  (0, 255, 0))  #検出したマーカに描画する

        cv2.imwrite('result' + str(cnt) + '.png', frame)
        cnt += 1

    cap.release()  #ビデオキャプチャのメモリ解放
    cv2.destroyAllWindows()  #すべてのウィンドウを閉じる
Пример #21
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("path", help="Path or file to evaluate")
    parser.add_argument(
        "-o", dest="save_video", help="save output video with bounding boxes"
    )
    parser.add_argument(
        "-s",
        "--save",
        dest="save_detections",
        action="store_true",
        default=False,
        help="save detections as images",
    )
    parser.add_argument(
        "-t",
        "--threshold",
        dest="threshold",
        default=3,
        type=int,
        help="set threshold (default: 3)",
    )
    parser.add_argument("-f", dest="log_file", default=None, help="save logs to file")
    parser.add_argument(
        "-p",
        dest="save_detections_path",
        default="output",
        help="save detection images to path (default: output)",
    )
    parser.add_argument(
        "-v",
        "--verbose",
        action="count",
        dest="verbosity",
        default=0,
        help="verbose output (repeat for increased verbosity)",
    )
    parser.add_argument(
        "-d",
        "--debug",
        dest="debug",
        action="store_true",
        default=False,
        help="run in debug mode",
    )
    parser.add_argument(
        "-q",
        "--quiet",
        action="store_const",
        const=-1,
        default=0,
        dest="verbosity",
        help="quiet output (show errors only)",
    )
    parser.add_argument(
        "--test",
        dest="test",
        action="store_true",
        default=False,
        help="run tests against test folder",
    )
    args = parser.parse_args()

    if args.debug:
        args.verbosity = 5

    _setup_logger(args.verbosity, args.log_file)

    print("bluestoned v{}".format(__version__))

    LOG.debug("Running with %s \n %r", sys.argv, args)
    LOG.debug("OpenCV version %s", cv2.__version__)
    LOG.debug(cv2.getBuildInformation())

    _path = os.path.realpath(args.path)

    if args.test:
        return run_tests(_path)

    if os.path.isdir(_path):
        return analyze_dir(
            _path,
            threshold=args.threshold,
            output_video=args.save_video,
            save_detections=args.save_detections,
            save_detections_path=args.save_detections_path,
        )
    else:
        return analyze_file(
            _path,
            threshold=args.threshold,
            save_detections=bool(args.save_detections),
            save_detections_path=args.save_detections_path,
        )
Пример #22
0
import numpy as np
import os

from distutils.core import setup
from distutils.extension import Extension

##Figure out opencv paths
try:
    import cv2
except:
    raise Exception(
        'OpenCV does not appear to be installed. Install before proceeding ... '
    )

##Figure out paths for headers and libraries
bldInfo = cv2.getBuildInformation().splitlines()
for line in bldInfo:
    if 'Install to:' in line:
        path = line.split()[-1]
        break

print('Open CV path: ', path)

extensions = [
    Extension(
        name="autoRIFT/autoriftcore",
        sources=['geo_autoRIFT/autoRIFT/bindings/autoriftcoremodule.cpp'],
        include_dirs=[np.get_include()] +
        ['geo_autoRIFT/autoRIFT/include',
         os.path.join(path, 'include')],
        library_dirs=[os.path.join(path, 'lib')],
Пример #23
0
def system_info(cv2_info):
    print("The Python version is %s.%s.%s" % sys.version_info[:3])
    print("The OpenCV version is", cv2.__version__)
    print("Cuda in Torch is available: ", torch.cuda.is_available())
    if cv2_info:
        print("Cv2 build information", cv2.getBuildInformation())
Пример #24
0
#    * ShareAlike - If you remix, transform, or build upon the material, you
#                   must distribute your contributions under the same license
#                   as the original.
#    * No additional restrictions - You may not apply legal terms or technological
#                                   measures that legally restrict others from
#                                   doing anything the license permits.
# ==================================================================================

from setuptools import find_packages
from distutils.core import Extension, setup
import os
import cv2


this_dir = os.path.dirname(os.path.realpath(__file__))
cv_folder = [l[l.find('/'):] for l in cv2.getBuildInformation().splitlines() if 'Install path' in l][0]
opentld_python_root = this_dir
opentld_cpp_root = this_dir + "/original_opentld/src"
opentld_source_folders = [opentld_cpp_root + '/libopentld/tld', opentld_cpp_root + '/libopentld/mftracker', opentld_cpp_root + '/3rdparty/cvblobs']

opentld_sources = []
for sf in opentld_source_folders:
    opentld_sources += [os.path.join(sf, i) for i in os.listdir(sf) if i.endswith('cpp')]

opentld_module = Extension('tld',
                           include_dirs=["/usr/include",
                                         "/usr/local/include",
                                         "/usr/include/boost",
                                         "/usr/include/opencv",
                                         "/usr/local/include/opencv",
                                         cv_folder + "/include", cv_folder + "/include/opencv",
Пример #25
0
def parse_params():

    parser = argparse.ArgumentParser()

    parser.add_argument('target', default='all', help='Target to process, "all" processes all folders, "test" runs test cases, "clean" to remove banned clips from db, or a "cptv" file to run a single source.')

    parser.add_argument('-o', '--output-folder', default=os.path.join(DEFAULT_BASE_PATH,"tracks"), help='Folder to output tracks to')
    parser.add_argument('-s', '--source-folder', default=os.path.join(DEFAULT_BASE_PATH,"clips"), help='Source folder root with class folders containing CPTV files')
    parser.add_argument('-c', '--color-map', default="custom_colormap.dat", help='Colormap to use when exporting MPEG files')
    parser.add_argument('-p', '--enable-previews', action='count', help='Enables preview MPEG files (can be slow)')
    parser.add_argument('-t', '--test-file', default='tests.txt', help='File containing test cases to run')
    parser.add_argument('--high-quality-optical-flow', default=False, action='store_true', help='Enables high quality optical flow (much slower).')
    parser.add_argument('-v', '--verbose', action='count', help='Display additional information.')
    parser.add_argument('-w', '--workers', default='0', help='Number of worker threads to use.  0 disables worker pool and forces a single thread.')
    parser.add_argument('-f', '--force-overwrite', default='old', help='Overwrite mode.  Options are all, old, or none.')
    parser.add_argument('-i', '--show-build-information', action='count', help='Show openCV build information and exit.')
    parser.add_argument('-d','--disable-track-filters', default=False, action='store_true', help='Disables filtering of poor quality tracks.')

    args = parser.parse_args()

    if args.show_build_information:
        print(cv2.getBuildInformation())
        return

    # setup extractor
    extractor = CPTVTrackExtractor(args.output_folder)

    extractor.workers_threads = int(args.workers)
    if extractor.workers_threads >= 1:
        print("Using {0} worker threads".format(extractor.workers_threads))

    # set overwrite mode
    if args.force_overwrite.lower() not in ['all','old','none']:
        raise Exception("Valid overwrite modes are all, old, or none.")
    extractor.overwrite_mode = args.force_overwrite.lower()

    # set optical flow
    extractor.high_quality_optical_flow = args.high_quality_optical_flow

    # set verbose
    extractor.verbose = args.verbose

    # this colormap is specially designed for heat maps
    extractor.colormap = tools.load_colormap(args.color_map)

    # load hints.  Hints are a way to give extra information to the tracker when necessary.
    extractor.load_hints("hints.txt")

    extractor.enable_previews = args.enable_previews

    extractor.source_folder = args.source_folder
    extractor.output_folder = args.output_folder

    # allow everything through
    extractor.disable_track_filters = args.disable_track_filters

    if extractor.enable_previews:
        print("Previews enabled.")

    if os.path.splitext(args.target)[1].lower() == '.cptv':
        # run single source
        source_file = tools.find_file(args.source_folder, args.target)
        tag = os.path.basename(os.path.dirname(source_file))
        extractor.overwrite_mode = CPTVTrackExtractor.OM_ALL
        extractor.process_file(source_file, tag=tag)
        return

    if args.target.lower() == 'test':
        print("Running test suite")
        extractor.run_tests(args.source_folder, args.test_file)
        return

    print('Processing tag "{0}"'.format(args.target))

    if args.target.lower() == 'all':
        extractor.clean_all()
        extractor.process_all(args.source_folder)
        return
    if args.target.lower() == 'clean':
        extractor.clean_all()
        return
    else:
        extractor.process_folder(os.path.join(args.source_folder, args.target), tag=args.target, worker_pool_args=(trackdatabase.hdf5_lock,))
        return
        self.fc = nn.Linear(hidden_dim, output_dim)
        self.relu = nn.ReLU()
        
    def forward(self, x, h):
        out, h = self.gru(x, h)
        out = self.fc(self.relu(out[:,-1]))
        return out, h
    
    def init_hidden(self, batch_size):
        weight = next(self.parameters()).data
        hidden = weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().to(device)
        return hidden
mdl = GRUNet(3*112*112)
dummy_input = torch.randn(1, 3, 112, 112, device='cpu')
torch.onnx.export(mdl, dummy_input, "gru.onnx", verbose=True, input_names=["input"], output_names=["output"])

import cv2
#print(cv2.__version__)

net = cv2.dnn.readNet("lipreading.onnx")

!ls -l models/__pycache__
!rm -f models/__pycache__/*

import cv2
print(cv2.getBuildInformation())

cap = cv2.VideoCapture("/content/00ee40cd.png")
print(cap.isOpened())

!ls -l *.onnx
Пример #27
0
prints OpenCV version

Usage:
    opencv_version.py [<params>]
    params:
        --build: print complete build info
        --help:  print this help
'''

# Python 2/3 compatibility
from __future__ import print_function

import cv2

if __name__ == '__main__':
    import sys
    print(__doc__)

    try:
        param = sys.argv[1]
    except IndexError:
        param = ""

    if "--build" == param:
        print(cv2.getBuildInformation())
    elif "--help" == param:
        print("\t--build\n\t\tprint complete build info")
        print("\t--help\n\t\tprint this help")
    else:
        print("Welcome to OpenCV")
Пример #28
0
#!/usr/bin/env python

import cv2

if __name__ == '__main__':
    import sys
    try:
        param = sys.argv[1]
    except:
        param = ""

    if ("--build" == param):
        print cv2.getBuildInformation()
    elif ("--help" == param):
        print "\t--build\n\t\tprint complete build info"
        print "\t--help\n\t\tprint this help"
    else:
        print "Welcome to OpenCV"
Пример #29
0
HOST = '127.0.0.1'
FRAMERATE = 10.0
RESOLUTION = (800, 600)

running = True
        
def showFrame(frame):

    global running
    
    cv2.imshow("Frame", frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        running = False
        
print(cv2.__version__) #версия opencv
print(cv2.getBuildInformation()) #полезная информация, интересует строка Video I/O, GStreamer

cap = cv2.VideoCapture(0)
#задаем параметры видео
cap.set(cv2.CAP_PROP_FRAME_WIDTH, RESOLUTION[0])
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, RESOLUTION[1])
cap.set(cv2.CAP_PROP_FPS, FRAMERATE)

#отправка потока
streamer = cv_stream.OpenCVRTPStreamer(resolution = RESOLUTION, framerate = FRAMERATE, host = (HOST, RTP_PORT))
streamer.start()

#прием потока
receiver = cv_stream.OpenCVRTPReciver(host = (HOST, RTP_PORT), onFrameCallback = showFrame)
receiver.start()
Пример #30
0
GNU Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public License
along with the Matrix library. If not, see <http://www.gnu.org/licenses/>.
"""
import os

# issue#7378 may happen with custom opencv. It doesn't hurt to disable opencl
os.environ['OPENCV_OPENCL_RUNTIME'] = 'disabled'     # https://github.com/opencv/opencv/pull/10155
try:
    # issue#1924 may happen on old systems
    import cv2  # noqa
    if int(cv2.__version__.split('.')[0]) == 3:
        cv2.ocl.setUseOpenCL(False)
    # check if cv is built with cuda
    info = cv2.getBuildInformation().split('\n')
    for line in info:
        if 'use cuda' in line.lower():
            answer = line.split()[-1].lower()
            if answer == 'yes':
                # issue#1197
                print("OpenCV is built with CUDA support. "
                      "This may cause slow initialization or sometimes segfault with TensorFlow.")
            break
except (ImportError, TypeError):
    pass

os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'  # issue#9339
os.environ['TF_AUTOTUNE_THRESHOLD'] = '2'   # use more warm-up

# Since 1.3, this is not needed
Пример #31
0
#!/usr/bin/env python

# validate all required modules installed

import rospy
from geometry_msgs.msg import PoseStamped

import cv2
import cv2.aruco

import numpy
import mavros
from mavros_msgs.msg import State, StatusText, ExtendedState
from mavros_msgs.srv import CommandBool, CommandLong, SetMode

from std_srvs.srv import Trigger
from clover.srv import GetTelemetry, Navigate, NavigateGlobal, SetPosition, SetVelocity, \
    SetAttitude, SetRates, SetLEDEffect

import tf2_ros
import tf2_geometry_msgs

import VL53L1X
import pymavlink
from pymavlink import mavutil
import rpi_ws281x
import pigpio
from espeak import espeak

print cv2.getBuildInformation()
Пример #32
0
import numpy as np
import cv2 as cv
import sys
import ctypes


def Mbox(title, text, style):
    return ctypes.windll.user32.MessageBoxW(0, text, title, style)


Mbox('OpenCV Version: ', cv.getBuildInformation(), 1)
Пример #33
0
def scan():
	frames = [None] * 4

	cv2.namedWindow("preview")
	cv2.namedWindow("preview2")
	cv2.namedWindow("preview3")
	cv2.namedWindow("preview4")

	vc = cv2.VideoCapture(0)
	vc2 = cv2.VideoCapture(1)
	vc3 = cv2.VideoCapture(2)
	vc4 = cv2.VideoCapture(3)

	print("Connection established")

	time.sleep(2)

	print("starting!")
	print(cv2.getBuildInformation())

	cv2.setMouseCallback("preview", functools.partial(clickHandler, index=0, f=frames))
	cv2.setMouseCallback("preview2", functools.partial(clickHandler, index=1, f=frames))
	cv2.setMouseCallback("preview3", functools.partial(clickHandler, index=2, f=frames))
	cv2.setMouseCallback("preview4", functools.partial(clickHandler, index=3, f=frames))

	while True:
		frames[0] = process(vc2.read()[1])  # BDL (lower proximal)
		frames[1] = process(vc4.read()[1])  # LUF (upper proximal)
		frames[2] = process(vc.read()[1])  # BRU (upper distal)
		frames[3] = process(vc3.read()[1])  # FRD (lower distal)

		out = {}

		for i, points in COORDS.items():
			color_list = []

			unreliable = all(point[0] in (2, 3) for point in points)

			for point in points:
				if not point[0] in (2, 3) or unreliable:
					color_list.append(
						get_color(cv2.cvtColor(frames[point[0] - 1], cv2.COLOR_BGR2HSV)[point[2], point[1]]))
			color = resolver(i, color_list)

			for point in points:
				out[i] = color
				if get_color(cv2.cvtColor(frames[point[0] - 1], cv2.COLOR_BGR2HSV)[point[2], point[1]]) == color:
					num = -1
				else:
					num = -1
				if not point[0] in (2, 3) or unreliable:
					cv2.circle(frames[point[0] - 1], (point[1], point[2]), 5, COLORS[color], num)

		cv2.imshow("preview", frames[0])
		cv2.imshow("preview2", frames[1])
		cv2.imshow("preview3", frames[2])
		cv2.imshow("preview4", frames[3])

		key = cv2.waitKey(20)

		if key == 13:  # exit on ENTER
			print(COORDS)
			cv2.waitKey(0)
			cv2.destroyWindow("preview")
			cv2.destroyWindow("preview2")
			cv2.destroyWindow("preview3")
			cv2.destroyWindow("preview4")
			return out
Пример #34
0
 def cv_buildInformation(self):
     """
     Get OpenCV build information
     """
     return cv2.getBuildInformation()
Пример #35
0
import os

# issue#7378 may happen with custom opencv. It doesn't hurt to disable opencl
os.environ[
    'OPENCV_OPENCL_RUNTIME'] = 'disabled'  # https://github.com/opencv/opencv/pull/10155
try:
    # issue#1924 may happen on old systems
    import cv2  # noqa
    # cv2.setNumThreads(0)
    if int(cv2.__version__.split('.')[0]) == 3:
        cv2.ocl.setUseOpenCL(False)
    # check if cv is built with cuda or openmp
    info = cv2.getBuildInformation().split('\n')
    for line in info:
        splits = line.split()
        if not len(splits):
            continue
        answer = splits[-1].lower()
        if answer in ['yes', 'no']:
            if 'cuda' in line.lower() and answer == 'yes':
                # issue#1197
                print(
                    "OpenCV is built with CUDA support. "
                    "This may cause slow initialization or sometimes segfault with TensorFlow."
                )
        if answer == 'openmp':
            print(
                "OpenCV is built with OpenMP support. This usually results in poor performance. For details, see "
                "https://github.com/tensorpack/benchmarks/blob/master/ImageNet/benchmark-opencv-resize.py"
            )
except (ImportError, TypeError):
Пример #36
0
def start(ENABLE_AIMBOT):

    # Config
    YOLO_DIRECTORY = "models"
    CONFIDENCE = 0.36
    THRESHOLD = 0.22

    #
    #   Size (in pixels) of the screen capture box to feed the neural net.
    #   This box is in the center of your screen. Lower value makes the network faster.
    #
    #   Example: "ACTIVATION_RANGE = 400" means a 400x400 pixel box.
    #
    ACTIVATION_RANGE = 335

    # load the COCO class labels our YOLO model was trained on
    labelsPath = os.path.sep.join([YOLO_DIRECTORY, "coco-dataset.labels"])
    LABELS = open(labelsPath).read().strip().split("\n")

    # initialize a list of colors to represent each possible class label
    np.random.seed(42)
    COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")

    # derive the paths to the YOLO weights and model configuration
    weightsPath = os.path.sep.join([YOLO_DIRECTORY, "yolov3-tiny.weights"])
    configPath = os.path.sep.join([YOLO_DIRECTORY, "yolov3-tiny.cfg"])

    # Wait for buffering
    time.sleep(0.4)

    # load our YOLO object detector trained on COCO dataset (80 classes)
    # and determine only the *output* layer names that we need from YOLO
    print("[INFO] loading neural-network from disk...")
    net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
    net.setPreferableBackend(cv2.dnn.DNN_BACKEND_DEFAULT)
    net.setPreferableTarget(cv2.dnn.DNN_TARGET_OPENCL)
    ln = net.getLayerNames()
    ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]

    # Define screen capture area
    print("[INFO] loading screencapture device...")
    sct = mss.mss()
    W, H = None, None
    Wd, Hd = sct.monitors[1]["width"], sct.monitors[1]["height"]
    origbox = (int(Wd / 2 - ACTIVATION_RANGE / 2),
               int(Hd / 2 - ACTIVATION_RANGE / 2),
               int(Wd / 2 + ACTIVATION_RANGE / 2),
               int(Hd / 2 + ACTIVATION_RANGE / 2))

    # Log whether aimbot is enabled
    if not ENABLE_AIMBOT:
        print("[INFO] aimbot disabled, using visualizer only...")
    else:
        print(colored("[OKAY] Aimbot enabled!", "green"))

    # Handle Ctrl+C in terminal, release pointers
    def signal_handler(sig, frame):
        # release the file pointers
        print("\n[INFO] cleaning up...")
        sct.close()
        cv2.destroyAllWindows()
        sys.exit(0)

    signal.signal(signal.SIGINT, signal_handler)

    # Test for GPU support
    build_info = str("".join(cv2.getBuildInformation().split()))
    if cv2.ocl.haveOpenCL():
        cv2.ocl.setUseOpenCL(True)
        cv2.ocl.useOpenCL()
        print(colored("[OKAY] OpenCL is working!", "green"))
    else:
        print(colored("[WARNING] OpenCL acceleration is disabled!", "yellow"))
    if "CUDA:YES" in build_info:
        print(colored("[OKAY] CUDA is working!", "green"))
    else:
        print(colored("[WARNING] CUDA acceleration is disabled!", "yellow"))

    print()

    # loop over frames from the video file stream
    while True:
        start_time = timeit.default_timer()

        frame = np.array(sct.grab(origbox))
        frame = cv2.resize(frame, (ACTIVATION_RANGE, ACTIVATION_RANGE))
        frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2BGR)

        # if the frame dimensions are empty, grab them
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        frame = cv2.UMat(frame)

        # construct a blob from the input frame and then perform a forward
        # pass of the YOLO object detector, giving us our bounding boxes
        # and associated probabilities
        blob = cv2.dnn.blobFromImage(frame,
                                     1 / 260.0, (416, 416),
                                     swapRB=False,
                                     crop=False)
        net.setInput(blob)
        layerOutputs = net.forward(ln)

        # initialize our lists of detected bounding boxes, confidences,
        # and class IDs, respectively
        boxes = []
        confidences = []
        classIDs = []

        # loop over each of the layer outputs
        for output in layerOutputs:
            # loop over each of the detections
            for detection in output:
                # extract the class ID and confidence (i.e., probability)
                # of the current object detection
                scores = detection[5:]

                # classID = np.argmax(scores)
                # confidence = scores[classID]
                classID = 0  # person = 0
                confidence = scores[classID]

                # filter out weak predictions by ensuring the detected
                # probability is greater than the minimum probability
                if confidence > CONFIDENCE:
                    # scale the bounding box coordinates back relative to
                    # the size of the image, keeping in mind that YOLO
                    # actually returns the center (x, y)-coordinates of
                    # the bounding box followed by the boxes' width and
                    # height
                    box = detection[0:4] * np.array([W, H, W, H])
                    (centerX, centerY, width, height) = box.astype("int")

                    # use the center (x, y)-coordinates to derive the top
                    # and and left corner of the bounding box
                    x = int(centerX - (width / 2))
                    y = int(centerY - (height / 2))

                    # update our list of bounding box coordinates,
                    # confidences, and class IDs
                    boxes.append([x, y, int(width), int(height)])
                    confidences.append(float(confidence))
                    classIDs.append(classID)

        # apply non-maxima suppression to suppress weak, overlapping
        # bounding boxes
        idxs = cv2.dnn.NMSBoxes(boxes, confidences, CONFIDENCE, THRESHOLD)

        # ensure at least one detection exists
        if len(idxs) > 0:

            # Find best player match
            bestMatch = confidences[np.argmax(confidences)]
            skipRound = False

            # Check if the mouse is already on a target
            for i in idxs.flatten():
                # extract the bounding box coordinates
                (x, y) = (boxes[i][0], boxes[i][1])
                (w, h) = (boxes[i][2], boxes[i][3])

                mouseX, mouseY = (origbox[0] + x + w / 2,
                                  origbox[1] + y + h / 8)
                currentMouseX, currentMouseY = pyautogui.position()

                # Detect closeness to target based on W and H of target
                if abs(mouseX - currentMouseX) < w * 2 and abs(
                        mouseY - currentMouseY) < h * 2:
                    skipRound = True

                    cv2.circle(frame, (int(x + w / 2), int(y + h / 8)), 5,
                               (0, 0, 255), -1)

                    if abs(mouseX - currentMouseX) > w * 0.5 or abs(
                            mouseY - currentMouseY) > h * 0.5:
                        moveMouse(mouseX, mouseY)

                        cv2.rectangle(frame, (x, y), (x + w, y + h),
                                      (0, 255, 0), 2)
                        text = "TARGET ADJUST {}%".format(
                            int(confidences[i] * 100))
                        cv2.putText(frame, text, (x, y - 5),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),
                                    2)
                    else:
                        cv2.rectangle(frame, (x, y), (x + w, y + h),
                                      (255, 0, 0), 2)
                        text = "TARGET LOCK {}%".format(
                            int(confidences[i] * 100))
                        cv2.putText(frame, text, (x, y - 5),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0),
                                    2)

            # loop over the indexes we are keeping
            if not skipRound:
                for i in idxs.flatten():
                    # extract the bounding box coordinates
                    (x, y) = (boxes[i][0], boxes[i][1])
                    (w, h) = (boxes[i][2], boxes[i][3])

                    # draw a bounding box rectangle and label on the frame
                    color = [int(c) for c in COLORS[classIDs[i]]]
                    cv2.rectangle(
                        frame, (x, y), (x + w, y + h),
                        (0, 0, 255) if bestMatch == confidences[i] else color,
                        2)

                    text = "TARGET? {}%".format(int(confidences[i] * 100))
                    cv2.putText(frame, text, (x, y - 5),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)

                    if ENABLE_AIMBOT and bestMatch == confidences[i]:
                        mouseX = origbox[0] + x + w / 2
                        mouseY = origbox[1] + y + h / 8
                        moveMouse(mouseX, mouseY)

        cv2.imshow("Neural Net Vision (Pine)", frame)

        elapsed = timeit.default_timer() - start_time
        sys.stdout.write("\r{1} FPS with {0} MS interpolation delay \t".format(
            int(elapsed * 1000), int(1 / elapsed)))
        sys.stdout.flush()

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # Clean up on exit
    signal_handler(0, 0)