示例#1
0
def Service(device, config):
    cap = capture.Capture(device, config)
    run = True
    img1, gray1 = cap.getImage()

    time.sleep(0.1)
    try:
        sensor = config['sensetive']
    except:
        sensor = 0.997

    writeCount = 0
    slp = 0.33
    print('\033[92m' + 'webcam is running' + '\033[0m')
    while run:
        try:
            img, gray2 = cap.getImage()
            imgDiff = cap.compareImage(gray1, gray2)
            frmNum = cap.getFrame()
            print "\033[K", 'cmp:', imgDiff, '\ttm:', slp, '\tsens:', sensor, '\tfrm:', frmNum, "\r",
            sys.stdout.flush()

            if imgDiff < sensor or writeCount > 0:
                slp = 0.07
                if writeCount == 0:
                    writeCount = 50

                cap.writeVideo(img)
                writeCount -= 1
            else:
                slp = 0.33

            gray1 = gray2
            time.sleep(slp)
        except KeyboardInterrupt:
            print('\033[93m' + '\nwebcam is stoped\n' + '\033[0m')
            run = False
            break

    del cap
示例#2
0
def main():
    cap = capture.Capture(0)
    print('before run')
    cap.start()
    t = time.time()
    while cap.img is None:
        print('waiting for camera to connect', time.time() - t)
        time.sleep(.25)

    proc = Processor()
    proc.start(cap)

    while proc.img is None:
        print('waiting for processed image', time.time() - t)
        time.sleep(.25)
    ii = 0
    while True:
        processed = proc.img
        print(processed.shape, ii)
        ii += 1
        cv2.imshow('this', processed)
        cv2.waitKey(1)
示例#3
0
    def __init__(self):

        ## this class is the driver of data.
        """
        It get asked for the data from the we, it gets the data
        aka captures de image and process it, and returns the list

        if will also had a method for asking the score.

        and an method to send keystrokes 

        """
        # object for processing the images
        self.image_processer = ImProc.ImageProcessing()
        # we need the points to create the capturer
        x1 , y1 = 580 , 360
        x2 , y2 = 1080 , 860
        self.capturer = capture.Capture(x1,y1,x2 , y2 )

        self.url =  "https://gabrielecirulli.github.io/2048/"
        # the web driver
        self.driver = webdriver.Chrome('/home/andresh/Descargas/chrome_driver/chromedriver' )

        self.driver.get(self.url)

        self.driver.maximize_window()
        time.sleep(3)
        #close the ad button
        close = self.driver.find_element_by_class_name("notice-close-button")
        close.click()
        
        

        # body element to send key strokes
        self.body = self.driver.find_elements(By.XPATH , '//body')[0]
        
        # restart
        self.restart = self.driver.find_element_by_class_name("restart-button")
示例#4
0
def startGUI():
    cap = capture.Capture(0)
    print('before run')
    cap.start()
    t = time.time()
    while cap.img is None:
        print('waiting for camera to connect',time.time()-t)
        time.sleep(.25)
    proc = preprocess.Processor()
    proc.start(cap)
    while proc.img is None:
        print('waiting for processed image',time.time()-t)
        time.sleep(.25)

    ocr = OCR.OCR()
    ocr.start(proc)

    while ocr.txt is None:
        print('waiting for OCR',time.time()-t)
        time.sleep(.25)

    root.cap = cap
    root.proc = proc
    root.mainloop()
    def __init__(self):
        self.deltaTime = 0.05
        self.attractFactor = 8.5

        # Target Data
        self.targetMaxVelocity = 1.2
        self.targetPositions = []
        self.targetHeadings = []

        # Robot Data
        self.robotMaxVelocity = 50
        self.robotPositions = []
        self.robotVelocities = []
        self.robotHeadings = []

        # Initialize relative states between robot and target
        self.relativePositions = []
        self.relativePosAbsolute = []
        self.relativeVelocities = []
        self.relativeHeadings = []

        # Initialize the camera data
        self.capture = capture.Capture()
        self.targetShape = "faces"
示例#6
0
 def startCapture(self):
     self.cap = capture.Capture(1)
     self.cap.run()
示例#7
0
import pygame
import pygame.camera
import capture
import numpy
import sys
from pygame.locals import *

# Initialize screen:
pygame.init()
screen = pygame.display.set_mode((640, 480))
pygame.display.set_caption('MC920')

# Initialize camera:
pygame.camera.init()
webcam = capture.Capture()

# Initialize settings:
frame_count = 0
calibration = True
save_mode = "-s" in sys.argv
clock = pygame.time.Clock()

# Event loop:
while True:
    clock.tick(60)

    # Handle events:
    pygame.event.pump()
    keystate = pygame.key.get_pressed()
    if keystate[K_ESCAPE] or pygame.event.peek(QUIT):
        break
示例#8
0
import capture
import argparse

parser = argparse.ArgumentParser()

parser.add_argument('--host', help='hostname', default='localhost')
parser.add_argument('--port', help='port', default=9090, type=int)
parser.add_argument('--device', help='camera device num', default=0, type=int)

args = parser.parse_args()

if __name__ == "__main__":
    capture = capture.Capture()
    capture.communicate(args.host, args.port, args.device)
    # detector = texture_detector.TextureDetector()
    # image = capture.capture()
    # res = detector.detectAndCut(image)
    # if res is not None:
    #     plt.imshow(res)
    #     plt.show()
示例#9
0
    def __init__(self, parameters, parent=None):
        QtGui.QMainWindow.__init__(self, parent)

        # Coordinate system setup, the internal scale is 1 pixel is 100nm.
        coord.Point.pixels_to_um = 0.1

        # variables
        self.current_center = coord.Point(0.0, 0.0, "um")
        self.current_offset = coord.Point(0.0, 0.0, "um")
        self.debug = parameters.debug
        self.file_filter = "\S+.dax"
        self.parameters = parameters
        self.picture_queue = []
        self.regexp_str = ""
        self.requested_stage_pos = False
        self.stage_tracking_timer = QtCore.QTimer(self)
        self.taking_pictures = False
        self.snapshot_directory = self.parameters.directory
        self.spin_boxes = []
        self.stage_tracking_timer.setInterval(500)

        # ui setup
        self.ui = steveUi.Ui_MainWindow()
        self.ui.setupUi(self)

        # hide some things that we don't currently use & resize group-box.
        self.ui.backgroundComboBox.hide()
        self.ui.backgroundLabel.hide()
        self.ui.moveAllSectionsCheckBox.hide()
        self.ui.showFeaturesCheckBox.hide()
        self.ui.thresholdLabel.hide()
        self.ui.thresholdSlider.hide()
        self.ui.sectionViewSettingsGroupBox.setMaximumHeight(50)

        self.setWindowIcon(QtGui.QIcon("steve.ico"))

        # handling file drops
        self.ui.centralwidget.__class__.dragEnterEvent = self.dragEnterEvent
        self.ui.centralwidget.__class__.dropEvent = self.dropEvent
        self.ui.centralwidget.setAcceptDrops(True)

        # Create a validator for scaleLineEdit.
        self.scale_validator = QtGui.QDoubleValidator(1.0e-6, 1.0e+6, 6,
                                                      self.ui.scaleLineEdit)
        self.ui.scaleLineEdit.setValidator(self.scale_validator)

        # Initialize view.
        self.view = mosaicView.MosaicView(parameters, self.ui.mosaicFrame)
        layout = QtGui.QGridLayout(self.ui.mosaicFrame)
        layout.addWidget(self.view)
        self.ui.mosaicFrame.setLayout(layout)
        self.view.show()

        # Initialize positions list.
        self.positions = positions.Positions(parameters, self.view.getScene(),
                                             self.ui.positionsFrame)
        layout = QtGui.QGridLayout(self.ui.positionsFrame)
        layout.addWidget(self.positions)
        self.ui.positionsFrame.setLayout(layout)
        self.positions.show()

        # Initialize sections.
        self.sections = sections.Sections(parameters, self.view.getScene(),
                                          self.ui.sectionsDisplayFrame,
                                          self.ui.sectionsScrollArea,
                                          self.ui.sectionsTab)

        # Initialize communications.
        self.comm = capture.Capture(parameters)

        # signals
        self.ui.actionQuit.triggered.connect(self.quit)
        self.ui.actionAdjust_Contrast.triggered.connect(
            self.handleAdjustContrast)
        self.ui.actionDelete_Images.triggered.connect(self.handleDeleteImages)
        self.ui.actionLoad_Movie.triggered.connect(self.handleLoadMovie)
        self.ui.actionLoad_Mosaic.triggered.connect(self.handleLoadMosaic)
        self.ui.actionLoad_Positions.triggered.connect(
            self.handleLoadPositions)
        self.ui.actionSave_Mosaic.triggered.connect(self.handleSaveMosaic)
        self.ui.actionSave_Positions.triggered.connect(
            self.handleSavePositions)
        self.ui.actionSave_Snapshot.triggered.connect(self.handleSnapshot)
        self.ui.actionSet_Working_Directory.triggered.connect(
            self.handleSetWorkingDirectory)
        self.ui.foregroundOpacitySlider.valueChanged.connect(
            self.handleOpacityChange)
        self.ui.getStagePosButton.clicked.connect(self.handleGetStagePosButton)
        self.ui.imageGridButton.clicked.connect(self.handleImageGrid)
        self.ui.scaleLineEdit.textEdited.connect(self.handleScaleChange)
        self.ui.tabWidget.currentChanged.connect(self.handleTabChange)
        self.ui.trackStageCheckBox.stateChanged.connect(self.handleTrackStage)
        self.ui.xSpinBox.valueChanged.connect(self.handleGridChange)
        self.ui.ySpinBox.valueChanged.connect(self.handleGridChange)

        self.stage_tracking_timer.timeout.connect(
            self.handleStageTrackingTimer)

        self.view.addPosition.connect(self.addPositions)
        self.view.addSection.connect(self.addSection)
        self.view.getObjective.connect(self.handleGetObjective)
        self.view.gotoPosition.connect(self.gotoPosition)
        self.view.mouseMove.connect(self.updateMosaicLabel)
        self.view.scaleChange.connect(self.updateScaleLineEdit)
        self.view.takePictures.connect(self.takePictures)

        self.sections.addPositions.connect(self.addPositions)
        self.sections.takePictures.connect(self.takePictures)

        self.comm.captureComplete.connect(self.addImage)
        self.comm.changeObjective.connect(self.handleChangeObjective)
        self.comm.disconnected.connect(self.handleDisconnected)
        self.comm.getPositionComplete.connect(self.handleGetPositionComplete)
        self.comm.newObjectiveData.connect(self.handleNewObjectiveData)
        self.comm.otherComplete.connect(self.handleOtherComplete)

        self.ui.objectivesGroupBox.valueChanged.connect(
            self.handleMOValueChange)

        # Try and get settings from HAL.
        self.comm.commConnect()
        self.comm.getSettings()
示例#10
0
    def __init__(self, parameters, parent=None):
        QtGui.QMainWindow.__init__(self, parent)

        # coordinate system setup
        coord.Point.pixels_to_um = parameters.pixels_to_um

        # variables
        self.current_center = coord.Point(0.0, 0.0, "um")
        self.current_magnification = 1.0
        self.current_objective = False
        self.current_offset = coord.Point(0.0, 0.0, "um")
        self.debug = parameters.debug
        self.parameters = parameters
        self.picture_queue = []
        self.taking_pictures = False

        # ui setup
        self.ui = steveUi.Ui_MainWindow()
        self.ui.setupUi(self)

        # hide some things that we don't currently use & resize group-box.
        self.ui.backgroundComboBox.hide()
        self.ui.backgroundLabel.hide()
        self.ui.moveAllSectionsCheckBox.hide()
        self.ui.showFeaturesCheckBox.hide()
        self.ui.thresholdLabel.hide()
        self.ui.thresholdSlider.hide()
        self.ui.sectionViewSettingsGroupBox.setMaximumHeight(50)

        self.setWindowIcon(QtGui.QIcon("steve.ico"))

        # Initialize objectives.
        objectives = []
        for i in range(10):
            mag = "mag" + str(i)
            if hasattr(self.parameters, mag):
                data = getattr(self.parameters, mag)
                obj_name = data.split(",")[0]
                objectives.append(data)
                self.ui.magComboBox.addItem(obj_name, data)

        # Create labels and spin boxes for objective settings.
        self.spin_boxes = []
        layout = QtGui.QGridLayout(self.ui.objectivesFrame)

        for i, label_text in enumerate(
            ["Objective", "Magnification", "X Offset", "Y Offset"]):
            text_item = QtGui.QLabel(label_text, self.ui.objectivesFrame)
            layout.addWidget(text_item, 0, i)

        # The first objective is assumed to be the 100x & is not adjustable.
        data = objectives[0].split(",")
        self.current_objective = data[0]
        for j, datum in enumerate(data):
            text_item = QtGui.QLabel(datum, self.ui.objectivesFrame)
            layout.addWidget(text_item, 1, j)

        # The other objectives are adjustable.
        for i, obj in enumerate(objectives[1:]):
            data = obj.split(",")
            text_item = QtGui.QLabel(data[0], self.ui.objectivesFrame)
            layout.addWidget(text_item, i + 2, 0)

            for j, btype in enumerate(["magnification", "xoffset", "yoffset"]):
                sbox = MagOffsetSpinBox(data[0], btype, float(data[j + 1]))
                layout.addWidget(sbox, i + 2, j + 1)
                sbox.moValueChange.connect(self.handleMOValueChange)
                self.spin_boxes.append(sbox)

        # Create a validator for scaleLineEdit.
        self.sce_validator = QtGui.QDoubleValidator(1.0e-6, 1.0e+6, 6,
                                                    self.ui.scaleLineEdit)
        self.ui.scaleLineEdit.setValidator(self.sce_validator)

        # Initialize view.
        self.view = mosaicView.MosaicView(parameters, self.ui.mosaicFrame)
        layout = QtGui.QGridLayout(self.ui.mosaicFrame)
        layout.addWidget(self.view)
        self.ui.mosaicFrame.setLayout(layout)
        self.view.show()

        # Initialize positions list.
        self.positions = positions.Positions(parameters, self.view.getScene(),
                                             self.ui.positionsFrame)
        layout = QtGui.QGridLayout(self.ui.positionsFrame)
        layout.addWidget(self.positions)
        self.ui.positionsFrame.setLayout(layout)
        self.positions.show()

        # Initialize sections.
        self.sections = sections.Sections(parameters, self.view.getScene(),
                                          self.ui.sectionsDisplayFrame,
                                          self.ui.sectionsScrollArea,
                                          self.ui.sectionsTab)

        # Initialize communications.
        self.comm = capture.Capture(parameters)

        # signals
        self.ui.abortButton.clicked.connect(self.handleAbort)
        self.ui.actionQuit.triggered.connect(self.quit)
        self.ui.actionDelete_Images.triggered.connect(self.handleDeleteImages)
        self.ui.actionLoad_Dax.triggered.connect(self.handleLoadDax)
        self.ui.actionLoad_Mosaic.triggered.connect(self.handleLoadMosaic)
        self.ui.actionLoad_Positions.triggered.connect(
            self.handleLoadPositions)
        self.ui.actionSave_Mosaic.triggered.connect(self.handleSaveMosaic)
        self.ui.actionSave_Positions.triggered.connect(
            self.handleSavePositions)
        self.ui.actionSave_Snapshot.triggered.connect(self.handleSnapshot)
        self.ui.actionSet_Working_Directory.triggered.connect(
            self.handleSetWorkingDirectory)
        self.ui.foregroundOpacitySlider.valueChanged.connect(
            self.handleOpacityChange)
        self.ui.magComboBox.currentIndexChanged.connect(
            self.handleObjectiveChange)
        self.ui.scaleLineEdit.textEdited.connect(self.handleScaleChange)
        self.ui.tabWidget.currentChanged.connect(self.handleTabChange)
        self.ui.xSpinBox.valueChanged.connect(self.handleGridChange)
        self.ui.ySpinBox.valueChanged.connect(self.handleGridChange)

        self.view.addPosition.connect(self.addPositions)
        self.view.addSection.connect(self.addSection)
        self.view.gotoPosition.connect(self.gotoPosition)
        self.view.mouseMove.connect(self.updateMosaicLabel)
        self.view.scaleChange.connect(self.updateScaleLineEdit)
        self.view.takePictures.connect(self.takePictures)

        self.sections.addPositions.connect(self.addPositions)
        self.sections.takePictures.connect(self.takePictures)

        self.comm.captureComplete.connect(self.addImage)
        self.comm.disconnected.connect(self.handleDisconnected)
        self.comm.gotoComplete.connect(self.handleGotoComplete)

        self.handleObjectiveChange(0)
示例#11
0
文件: main.py 项目: agupta/Pilhouette
 def retake(self):
     self.frame_confirm.destroy()
     self.current_frame = self.frame_capture = capture.Capture(self.master, self)
     #self.frame_capture.pack(fill=tk.BOTH, expand=True)
     self.frame_capture.grid(row=0, column=0, sticky="news")
示例#12
0
#!/usr/bin/python2
import cv2
import time
import capture

NUM_NEEDED = 20
NUM_CAPTURE = 2 * NUM_NEEDED
images = []
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(5000)
print("press a key for each picture (or wait 3 seconds)")
with capture.Capture() as c:
    for i in range(0, NUM_CAPTURE):
        cv2.waitKey(3000)
        print("capture")
        gray, color = c.capture()
        ret, corners = cv2.findChessboardCorners(gray, (8, 6), None)
        if ret:
            images.append(im0)
        else:
            print("oops, no checkerboard detected")
        if len(images) >= NUM_NEEDED:
            break

n = 1
for im in images:
    name = 'cal-image-%03d.png' % (n, )
    print(("saving " + name))
    cv2.imwrite(name, im)
    n += 1
示例#13
0
def startCapture():
    cap = capture.Capture(1)
    cap.run()
示例#14
0
 def __init__(self, path):
     e = events.Events()
     s = Script(e, path)
     self._capture = capture.Capture(e)
示例#15
0
import capture
import bounding_boxes
import predict
import tts
import pytesseract
import cv2
import sys

print("Enter 1 for book reading \n Enter 2 for scene images")
a = int(input())

#orig_img, gray_img = cv2.imread(sys.argv[1]), cv2.imread(sys.argv[1], 0)
orig_img, gray_img = capture.Capture()
custom_config = r'--oem 3 --psm 6'

if a == 1:
    text = pytesseract.image_to_string(orig_img, config=custom_config)

elif a == 2:
    letters, words = bounding_boxes.get_bboxes(orig_img, gray_img)
    #print(len(words))
    #text = ' '.join([pytesseract.image_to_string(word, config = custom_config) for word in words if word.shape[1] > 0 and word.shape[1] > 0])
    text = ' '.join(
        [''.join([predict.Predict(letter) for letter in _]) for _ in letters])

print(text)
tts.texttospeech(text)
示例#16
0
from io import RawIOBase
import capture, preprocess, OCR
import time, cv2


cap = capture.Capture(1)
print('before run')
cap.start()
t = time.time()
while cap.img is None:
    print('waiting for camera to connect',time.time()-t)
    time.sleep(.25)

proc = preprocess.Processor()
proc.start(cap)
while proc.img is None:
    print('waiting for processed image',time.time()-t)
    time.sleep(.25)


ocr = OCR.OCR()
ocr.start(proc)

while ocr.txt is None:
    print('waiting for OCR',time.time()-t)
    time.sleep(.25)

t = time.time()

while True:
    raw = cap.img
示例#17
0
import sys, capture, time
import lib.hexdump as hexdump

# EXEMPLE D'UTILISATION DE LA CLASSE CAPTURE
test = capture.Capture()

# on lance la capture
test.start()

time.sleep(10)

# on l'arrete
test.stopCapture()
#on attend la fin du thread
test.join()

# on boucle sur les paquets recuperes
for packet in test.result:
    print '\n' + '[PACKET TIMESTAMP] : ', packet.created

    #on boucle sur les layers du paquet
    for layer in packet.layers:
        print
        print 'COUCHE : ', layer['LayerType']
        #on boucle sur les items du layer
        for key, value in layer.items():
            if key != 'Data':
                print key, value

    print '\n'
    hexdump.hexdump(packet.packet)