Example #1
0
def _write_results_on_machine(image, eggs, clusters, imname):

	rpath = get_root(__file__)[:get_root(__file__).index('/')]

	for egg in eggs:
		egg = detect.get_egg_border(egg, image.shape[:2])
		image[egg[:, 0], egg[:, 1]] = (0, 0, 255)

	for cluster in clusters:#range(len(clusters)):
		cluster = detect.get_egg_border(cluster, image.shape[:2])
		image[cluster[:, 0], cluster[:, 1]] = (0, 255, 0)

	cv2.imwrite(rpath + imname[:-4] + "_TEST.jpg", image)
def runDet(diaPath):
    import os
    import sys
    import optparse

    import lsst.daf.data as dafBase
    import lsst.pex.policy
    import lsst.pex.logging
    import lsst.afw.image as afwImage
    import lsst.afw.detection as afwDet
    import lsst.detection as det
    import Detection

    defInDir = os.environ.get("FWDATA_DIR", "")
    moduleDir = os.path.split(__file__)[0]
    appDir = os.path.normpath(os.path.join("../../../",moduleDir))

    defDiaPath = os.path.join(defInDir, "871034p_1_MI")
    defPolicyPath = os.path.join(appDir, "pipeline/DetectionStagePolicy.paf")
    defVerbosity = 5 # change to 0 once this all works to hide all messages

    policyPath = defPolicyPath

    diaExposure = afwImage.ExposureF()
    diaExposure.readFits(diaPath)
    diaMaskedImage = diaExposure.getMaskedImage()
    diaWCS = diaExposure.getWcs()

    policy = lsst.pex.policy.Policy.createPolicy(policyPath)

    diaSources = Detection.detection(diaExposure, policy)

    for i in range(len(diaSources)):
        diaSource_i = diaSources[i]
        print diaSource_i.toString()
Example #3
0
def main():
    classification = Classification.Classifier()  #Classification part
    classification.do_knn()
    classification.do_svm()
    classification.do_random_forest()
    classification.do_neural_network()
    detection = Detection.Detector()  #sliding window and classification
    detection.run()
Example #4
0
    def process(self):

        logging.Trace_setVerbosity("lsst.detection", 5)
        logging.Trace(
            "lsst.detection.DetectionStage", 3,
            'Python DetectionStage process : _rank %i stageId %d' %
            (self._rank, self.stageId))
        activeClipboard = self.inputQueue.getNextDataset()

        ###########
        #
        # Get objects from clipboard
        #
        triggerEvent = activeClipboard.get('triggerVisitEvent')
        filterNameItem = triggerEvent.findUnique('filterName')
        filterName = filterNameItem.getValueString()
        exposureIdItem = triggerEvent.findUnique('exposureId')
        exposureId = exposureIdItem.getValueInt()
        visitTimeItem = triggerEvent.findUnique('visitTime')
        visitTime = visitTimeItem.getValueDouble()
        ###########
        #
        # Log the beginning of Detection stage for this slice
        #
        LogRec(self.detectionLog, Log.INFO) \
                                  <<  "Began detection stage" \
                                  << DataProperty("exposureId", exposureId) \
                                  << DataProperty("visitTime", visitTime) \
                                  << DataProperty("filterName", filterName) \
                                  << LogRec.endr
        #
        # Instantiate a Filter object to get the id of filterName
        #
        dbLocation = dafper.LogicalLocation(
            'mysql://lsst10.ncsa.uiuc.edu:3306/test')
        filterDB = lsst.afw.image.Filter(dbLocation, filterName)
        filterId = filterDB.getId()
        logging.Trace("lsst.detection.DetectionStage", 3,
                      'FilterName %s FilterId %d' % (filterName, filterId))

        differenceImageExposure = activeClipboard.get('DifferenceExposure')

        diaSourceCollection = Detection.detection(
            differenceImageExposure=differenceImageExposure,
            policy=self._policy,
            filterId=filterId,
            useLog=self.detectionLog,
            footprintList=None,
        )

        ###########
        #
        # Post results to clipboard
        #
        activeClipboard.put('DiaSources', diaSourceCollection)

        self.outputQueue.addDataset(activeClipboard)
Example #5
0
def main(argv):

    SHOW_FRAME = True
    WRITE_FRAME = True

    start_webserver("0.0.0.0", 5000)

    dt = Detection.Recog()
    sync = SimpleSynchronise(10, 3)

    cam = Cam.URLCam("http://192.168.188.200:200/cam1/cam_pic.php")
    #cam = Cam.WebCam(0)
    if SHOW_FRAME:
        cv.namedWindow("canvas")

    display = [Display((900, 1440))] * 9

    renderer = MultiScreenFaceDisplay(display)
    overlay = OverlayScreenFaceDisplay()
    while True:
        frame = cam.read_image()
        if frame is None:
            continue

        frame = cv.resize(frame, (1440, 900))

        faces = dt.detectFaces(frame)
        faces = list(faces)
        print(faces)
        loaded = list(load_faces(frame, faces))

        overlay.clear()
        renderer.clear()

        overlay.add_face(loaded)
        renderer.add_face(loaded)

        img = overlay.render(frame)
        images = renderer.render(frame)

        if SHOW_FRAME:
            #img2 = cv.resize(img, (1440, 900))
            cv.imshow("canvas", img)
            if cv.waitKey(1) == ord('q'):
                break

        if sync.check_sync():
            if WRITE_FRAME:
                cv.imwrite("img/over.jpg",
                           img)  #write overlay to special filename

                i = 1
                for image in images:
                    cv.imwrite("img/{:>05}.jpg".format(i),
                               image)  #write images to file
                    i += 1
def encryptProfile(Str):
    Encoded = profile_for(Str)
    Blocks = Padding.pad(Encoded, 16)
    plainText = ""
    for Block in Blocks:
        plainText += Block
    Key = Detection.randKey()
    cipher = AES.AESCipher(Key, AES.MODE_ECB)
    CipherText = cipher.encrypt(plainText)
    return CipherText, Key
def encryptProfile(Str):
    Encoded = profile_for(Str)
    Blocks = Padding.pad(Encoded, 16)
    plainText = ""
    for Block in Blocks:
        plainText += Block
    Key = Detection.randKey()
    cipher = AES.AESCipher(Key,AES.MODE_ECB)
    CipherText = cipher.encrypt(plainText)
    return CipherText, Key
Example #8
0
def main():
    cprint('+-------------------------------+', 'yellow')
    cprint('|                               |', 'yellow')
    cprint('|        Welcome, ZJUer!        |', 'yellow')
    cprint('|                               |', 'yellow')
    cprint('|    Auto Login ZJUWLAN (v1)    |', 'yellow')
    cprint('|       create by ChanLo        |', 'yellow')
    cprint('|                               |', 'yellow')
    cprint('+-------------------------------+\n', 'yellow')
    UPSave = DataSave.UPSave()
    if not if_use_existing_account(use_account):
        username, password = get_user_UP()
        UPSave.delete_DB(name_DB)
        UPSave.create_DB(name_DB)
        UPSave.insert_to_DB(name_DB, username, password)
    else:
        username, password = UPSave.select_DB(name_DB, aim_id)
    data = {
        'action': 'login',
        'username': username,
        'password': password,
        'ac_id': '3',
        'user_ip': '',
        'nas_ip': '',
        'user_mac': '',
        'save_me': '1',
        'ajax': '1'
    }
    data = urllib.parse.urlencode(data)
    try:
        req = Request('https://net.zju.edu.cn/srun_portal_pc.php?url=&ac_id=3')
        with urlopen(req, data.encode('utf-8'), timeout=10) as response:
            content = response.read()
            cprint('Checking network status...\n', 'green')
            sleep(1)
            detection = Detection.Detection()
            if detection.is_connected():
                cprint('Login Successfully!', 'red')
            else:
                cprint('Connection fail.', 'green')

    except URLError as e:
        if hasattr(e, 'reason'):
            info = '[ERROR] Failed to reach the server.\Reason: ' + str(
                e.reason)
        elif hasattr(e, 'code'):
            info = '[ERROR] The server couldn\'t fullfill the request.\nError code: ' + str(
                e.reason)
        else:
            info = '[ERROR] Unknown URLError'
            print(info)
    except:
        import traceback
        print('Generic exception: ' + traceback.format_exc())
    def process(self):

        logging.Trace_setVerbosity("lsst.detection", 5)
        logging.Trace("lsst.detection.DetectionStage", 3, 'Python DetectionStage process : _rank %i stageId %d' % (self._rank, self.stageId))
        activeClipboard = self.inputQueue.getNextDataset()

        ###########
        #
        # Get objects from clipboard
        #
        triggerEvent = activeClipboard.get('triggerVisitEvent')
        filterNameItem = triggerEvent.findUnique('filterName')
        filterName = filterNameItem.getValueString()
        exposureIdItem = triggerEvent.findUnique('exposureId')
        exposureId = exposureIdItem.getValueInt()
        visitTimeItem = triggerEvent.findUnique('visitTime')
        visitTime = visitTimeItem.getValueDouble()
        ###########
        #
        # Log the beginning of Detection stage for this slice
        #
        LogRec(self.detectionLog, Log.INFO) \
                                  <<  "Began detection stage" \
                                  << DataProperty("exposureId", exposureId) \
                                  << DataProperty("visitTime", visitTime) \
                                  << DataProperty("filterName", filterName) \
                                  << LogRec.endr
        #
        # Instantiate a Filter object to get the id of filterName
        #
        dbLocation = dafper.LogicalLocation('mysql://lsst10.ncsa.uiuc.edu:3306/test')
        filterDB = lsst.afw.image.Filter(dbLocation, filterName)
        filterId = filterDB.getId()
        logging.Trace("lsst.detection.DetectionStage", 3, 'FilterName %s FilterId %d' % (filterName, filterId))
       
        differenceImageExposure = activeClipboard.get('DifferenceExposure')

        diaSourceCollection = Detection.detection(
            differenceImageExposure = differenceImageExposure,
            policy = self._policy,
            filterId = filterId,
            useLog = self.detectionLog,
            footprintList = None,
        )

        ###########
        #
        # Post results to clipboard
        #
        activeClipboard.put('DiaSources', diaSourceCollection)
        
        self.outputQueue.addDataset(activeClipboard)
Example #10
0
    def __process_det_file(self, det_file=''):
        """
        Defines what information to gather when processing each detection file.
        This shouldn't be called directly.
        Change anything in here as it will change what information it will gather from each file.
        """
        file_text = open(det_file, 'r')
        lines = file_text.readlines()
        file_text.close()

        result = []
        for line in lines:
            aux = [float(val) for val in line.rstrip('\n').split(',')]
            result.append(aux)

        for i, det in enumerate(result):
            if len(det) > 5:
                detection = Detection(det[0], det[1], det[2], det[3], det[4], det[5], det[6:])
                result[i] = detection.return_dict()
            else:
                detection = Detection(det[0], det[1], det[2], det[3], det[4], 0, [])
                result[i] = detection.return_dict()

        return result
Example #11
0
def process_image(img):
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    mydetect = Detection.Detection(img, picklefile)
    mydetect.multiple_scale_window(scales, y_start_stops)
    #print(Record.history_track)
    if len(Record.history_track) > 0:
        last_threshold = mydetect.process_heatmap(np.int(np.min(np.array(Record.history_track))))
    else:
        last_threshold = mydetect.process_heatmap(2)
    if last_threshold > 0:
        Record.history_track.append(last_threshold)
    else:
        Record.history_track = []
    if len(Record.history_track) > Record.cache_size:
        Record.history_track = Record.history_track[1:]
    return mydetect.draw_label_box()
def detect():
    if request.method == 'POST' and request.files['image']:
        img = request.files['image']
        img_name = img.filename
        create_new_folder(app.config['SRC_FOLDER'])
        create_new_folder(app.config['DEST_FOLDER'])
        saved_path = os.path.join(app.config['SRC_FOLDER'], img_name)
        saved_path1 = os.path.join(app.config['DEST_FOLDER'], img_name)
        app.logger.info("saving {}".format(saved_path))
        img.save(saved_path)
        img.save(saved_path1)
        detections = Detection.detectObject(saved_path, saved_path1)
        strings2 = list()
        for eachObject in detections:
            strings2.append(
                eachObject["name"] + " : " +
                "{:10.2f}".format(eachObject["percentage_probability"]))
        return render_template('detect.html',
                               sourceImage=img_name,
                               newImage=img_name,
                               data=strings2)
    else:
        return "Where is the image?"
Example #13
0
)
orimg = cv2.imread(sys.argv[1], 0)  # Read the imag
height, width = orimg.shape[0:2]
img_resized = cv2.resize(orimg, (1300, int(1300 * (height / width))))
img = img_resized[:]
print(
    "................................Image Loading................................"
)
print("DONE")

box_list = list()

print(
    "...............................Detecting Images............................."
)
box_list, page_width = Detection.ImageDetect(img, box_list)
print("DONE")

for i in box_list:
    img[i.y1:i.y2, i.x1:i.x2] = np.ones((i.y2 - i.y1, i.x2 - i.x1)) * 255

print(
    ".................................Detecting Text.............................."
)
box_list = Detection.TextDetect(img, box_list)
print("DONE")

#for i in range(len(box_list)-1):
#	if(box_list[i].text==""):
#		del box_list[i]
Example #14
0
    def recast(self, delrec, geff, optype, useHeavyMeson=False):
        # we define the recast based on the expected signal

        if self.channel == "decay":
            if self.model == "EFT":  # For naive NoE based limits
                xi, EffLimtmp = de.GetNaiveDecayLimits(delrec, self.exp, 10,
                                                       geff, optype)
                xi, EffLim, EffLim_low = de.RecastDecayLimit(
                    xi, EffLimtmp, delrec, delrec, self.exp, geff, optype)
                xi_full, Lim_full = uf.CombineUpDown(xi, EffLim_low, EffLim,
                                                     self.combthr)
            else:  # Standard recasting case
                xi_full, Lim_full = de.FastDecayLimit(
                    self.exp, self.mx_ini, self.lim_ini, self.delini, delrec,
                    geff, optype, self.combthr, useHeavyMeson)
        elif self.channel == "heavymesondecay":
            if self.model == "EFT":
                xi, EffLimtmp = de.GetNaiveDecayLimits(delrec, self.exp, 10,
                                                       geff, optype, True)
                xi_heavy, EffLim_heavy, EffLim_heavy_low = de.RecastDecayLimit(
                    xi, EffLimtmp, delrec, delrec, self.exp, geff, optype,
                    True)
                xi_full, Lim_full = uf.CombineUpDown(xi_heavy,
                                                     EffLim_heavy_low,
                                                     EffLim_heavy,
                                                     self.combthr)
            else:
                xi_full, Lim_full = de.FastDecayLimit(
                    self.exp, self.mx_ini, self.lim_ini, self.delini, delrec,
                    geff, optype, self.combthr, useHeavyMeson)
        elif self.channel == "scattering":
            xi_full, Lim_full = de.FastScatLimit(self.exp, self.mx_ini,
                                                 self.lim_ini, self.delini,
                                                 delrec, geff, optype)
        elif self.channel == "monogam":
            xi_full, Lim_full = de.FastMonoPhoton(self.exp, self.mx_ini,
                                                  self.lim_ini, self.delini,
                                                  delrec, geff, optype)
        elif self.channel == "invisibledecayBmtoKm":
            if self.exp == "babar":
                xi_full, Lim_full = de.FastInvMesDecay("babar_BmtoKmnunu",
                                                       delrec, geff, optype)
            elif self.exp == "belle2":
                xi_full, Lim_full = de.FastInvMesDecay("belle2_BmtoKmnunu",
                                                       delrec, geff, optype)
        elif self.channel == "invisibledecayBmtoPim":
            xi_full, Lim_full = de.FastInvMesDecay("babar_BmtoPimnunu", delrec,
                                                   geff, optype)
        elif self.channel == "invisibledecayB0toPi0":
            xi_full, Lim_full = de.FastInvMesDecay("belle_B0toPi0nunu", delrec,
                                                   geff, optype)
        elif self.channel == "invisibledecayB0toK0":
            xi_full, Lim_full = de.FastInvMesDecay("belle_B0toK0nunu", delrec,
                                                   geff, optype)
        elif self.channel == "monojet_down":
            xi_full, Lim_full = de.FastMonoJet(self.exp, self.mx_ini,
                                               self.lim_ini, self.delini,
                                               delrec, geff, optype)
        elif self.channel == "monojet_up":
            xi_full, Lim_full = de.FastMonoJet(self.exp, self.mx_ini,
                                               self.lim_ini, self.delini,
                                               delrec, geff, optype)
        elif self.channel == "invisibledecayKL0toPi0":
            if self.exp == "e391a":
                xi_full, Lim_full = de.FastInvMesDecay("e391a_KL0toPi0nunu",
                                                       delrec, geff, optype)
            if self.exp == "na62":
                xi_full, Lim_full = de.FastInvMesDecay("na62_KL0toPi0nunu",
                                                       delrec, geff, optype)
        elif self.channel == "invisibledecayPi0":
            xi_full, Lim_full = de.FastInvMesDecay("na62_pi0toinvisible",
                                                   delrec, geff, optype)
        elif self.channel == "invisibledecayJPsi":
            xi_full, Lim_full = de.FastInvMesDecay("bes_JPsitoinvisible",
                                                   delrec, geff, optype)
        elif self.channel == "invisibledecayUpsilon":
            xi_full, Lim_full = de.FastInvMesDecay("babar_Upsilontoinvisible",
                                                   delrec, geff, optype)
        elif self.channel == "invisibledecayKptoPip":
            if self.exp == "na62":
                xi_full, Lim_full = de.FastInvMesDecay("na62_KptoPipa", delrec,
                                                       geff, optype)
            elif self.exp == "e949":
                xi_full, Lim_full = de.FastInvMesDecay("e949_KptoPipa", delrec,
                                                       geff, optype)
        elif self.channel == "cosmicrays":
            xi_full, Lim_low_full, Lim_high_full = de.FastCRLimit(
                "t2k", delrec, geff, optype)
            xi_full, Lim_full = uf.CombineUpDown(xi_full, Lim_low_full,
                                                 Lim_high_full)
        elif self.channel == "low_cooling":
            xi_full, Lim_full = de.FastSN1987Limit(self.lim_inifull, delrec,
                                                   geff, optype, False)
        elif self.channel == "high_cooling":
            xi_full, Lim_full = de.FastSN1987Limit(self.lim_inifull, delrec,
                                                   geff, optype, True)
        else:
            print(
                "Channel selected: ", self.channel,
                " is not currently implemented. Possible choices: \n",
                "'decay' : faser, mathusla, ship, charm, seaquest, seaquest_Phase2, lsnd \n",
                "heavymesondecay : ship"
                "'scattering' :  nova, miniboone, sbnd \n",
                "'missingE' : babar, belleII, atlas, lep \n",
                "'cosmicrays' : t2k (decay from cosmic ray showers into t2k), ",
                "cooling : for sn1987_low (pessimistic limit from SN1987 cooling), sn1987_high (optimistic limit from SN1987 cooling) ",
                "Invisible light meson decays: na62 (pi0decay and invisibledecayKptoPip) e949 (pi0decay and invisibledecayKptoPip), e391a (invisibledecayKL0toPi0)",
                "Invisible heavy meson decay: belle (invisibledecayB0toPi0 ,invisibledecayB0toK0) and belleII (invisibledecayBmtoKm)"
            )
            xi_full = np.logspace(-3., 1., 30.)
            Lim_full = np.zeros(np.shape(xi_full))
        return xi_full, Lim_full
Example #15
0
    exit()

# The first step is to check if the image really has a pallete with a circle at its center.
im = Defects.hasPalette(im)
if type(im) == type(""):
    exit()

print("Detecting circle...")

# detecting the central circle
# The program will try to recognize the central circle in 30 attempts.
# In case the circle isn't yet recognized, we flip the image in 180 degrees.
# Through the radius of the circle we can estimate the size of the eggs.
params = None
for att in range(30):
    params = detect.detect_circle_mark(im)

    if type(params) == type(None):

        if att == 29:
            for i in range(2):
                im = cv2.transpose(im)
                im = cv2.flip(im, 1)

            params = detect.detect_circle_mark(im)

            if type(params) == type(None):
                print(IO.json_packing_error('ERR_003'))
                exit()
            else:
                break
Example #16
0
count = 1
while count < 45 or cv2.waitKey == 13:
    (_, im) = webcam.genNextFrame()
    #time.sleep(0.38)
    cv2.imshow('OpenCV', im)
    count += 1
im = cv2.flip(im, 1, 0)

det.setScaleFactor(1.3)
det.setMinNeighbours(5)

gray = det.gray(im)
faces = det.detect(gray)
print(len(faces))

detection = Detection.Detection2D(scene=im)
for (x, y, w, h) in faces:
    detection.drawBoundingBox(x, y, w, h, 2, (255, 0, 0))
    #cv2.rectangle(im,(x,y),(x+w,y+h),(255,0,0),2)
    face = gray[y:y + h, x:x + w]
    detection.drawBoundingBox(x, y, w, h, 3, (0, 255, 0))
    #cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3)

    pred, l, conf = rec.predict(face, im_width, im_height)
    print(pred, conf)
    #print (&amp;quot;pred&amp;quot;,prediction)
    if conf < 75:
        #cv2.putText(im,'%s - %.0f' % (names[pred],pred),(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
        detection.putText(text="%s - %.0f" % (names[pred], conf),
                          font=cv2.FONT_HERSHEY_PLAIN,
                          color=(0, 255, 0))
Example #17
0
    Str = "comment1=cooking%20MCs;userdata=" + UserData + ";comment2=%20like%20a%20pound%20of%20bacon"
    Blocks = Padding.pad(Str, 16)
    PlainText = ""
    for Block in Blocks:
        PlainText += Block
    return CBCEncrypt(PlainText, Key, IV)


def DecryptData(CipherText, Key, IV):
    PlainText = CBCDecrypt(CipherText, Key, IV)
    if ";admin=true;" in PlainText:
        return True
    return False


Key = Detection.randKey()
IV = Detection.randKey()
Message = "Hello World My Name is Max Check"
List = []
for byte in Message:
    List.append(ord(byte))
print List[:16], "\t", List[16:]

cipherText = CBCEncrypt(Message, Key, IV)
byteList = []
for byte in cipherText:
    byteList.append(ord(byte))
print byteList[:16], "\t", byteList[16:]

byteList[0] = byteList[0] ^ 97 ^ 5
cipherText = ""
    print "Error"
    return None

def findAll(Message, Key, blockSize, Rand):
    Chars = []
    for i in range(len(Message)):
        if i % blockSize == 0:
            newMessage = Message[i:]
        cipherText = ECB_Encrypt(newMessage, Key, "A" * offset + "A" * ((blockSize - (i % blockSize) - 1)),Rand)
        knownStr = "A" * offset + "A" * (blockSize - (i % blockSize) - 1)
        for j in range(i % blockSize):
            knownStr += Chars[j + (i / blockSize) * blockSize]
        print knownStr
        Chars.append(findByte(cipherText, knownStr, Key, Rand))
        print Chars
        
    return Chars

Key = Detection.randKey()
Randnum = random.randint(5,10)
Rand = os.urandom(Randnum)
Message = ""
file = open("ECBText","r")
for line in file:
    Message += line.strip("\n")
file.close()
Message = binascii.a2b_base64(Message)

blockSize, offset = BlockAndOffset(Message, Key, Rand)
print blockSize, offset
findAll(Message, Key, blockSize, Rand)
Example #19
0
    x = indices[1][i] - centerSmall[0]
    y = centerSmall[1] - indices[0][i]
    rho = np.sqrt(x**2 + y**2)
    theta = (np.arctan2(y, x))
    if theta < 0:
        theta = theta + 2 * np.pi
    points.append((rho, theta))

plt.scatter([point[1] for point in points], [point[0] for point in points],
            s=0.2)
plt.show()

# Ignoring the distorted (?) regions.
points_truncated = [
    point for point in points if 2 <= point[1] <= 4.5 and point[0] > 3105
]
#plt.scatter([point[1] for point in points_truncated], [point[0] for point in points_truncated], s=0.2)
#plt.show()

h, bin_edges = Detection.points_histogram(
    [point[0] for point in points_truncated])
#plt.hist([point[0] for point in points_truncated], bin_edges)
#plt.show()

# To do: the inclusion threshold would be a good thing to expose to the user
grooves = Detection.points_to_grooves(h, bin_edges, 1000, points_truncated)

for groove in grooves:
    plt.scatter(groove.get_theta_axis(), groove.get_rho_axis(), s=0.2)
    plt.show()
Example #20
0
feature_params = dict(maxCorners=500,
                      qualityLevel=0.3,
                      minDistance=7,
                      blockSize=7)

# Parameters for lucas kanade optical flow
lk_params = dict(winSize=(15, 15),
                 maxLevel=2,
                 criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10,
                           0.03))

kernel = np.ones((7, 7), np.uint8)  #erosion kernel
smoothing_kernel = np.ones((25, 25), np.float32) / 25  #smoothing kernel

#detect a base skin color
skin_base = Detection.skin_detect(cap)

if skin_base.size != 0:
    #skin color range
    skin_lower = np.array([skin_base[0] - 20, 50, 50])
    skin_upper = np.array([skin_base[0] + 20, 255, 255])

    # Find corners of first frame
    ret, frame = cap.read()
    frame = cv2.flip(frame, 1)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    #set the length of motion necessary to perform action
    height, width, channels = frame.shape
    motion_length = width / 4
Example #21
0
def main():
    #parse command line arguments
    parser = argparse.ArgumentParser(description='Initialize autoturret.')
    parser.add_argument('target_class',
                        type=str,
                        help='Pick a target class from the list')
    #parser.add_argument('pin_file', type=str, help='file path to file containing pin info')
    parser.add_argument(
        '--fire',
        action='store_true',
        default=False,
        help='just track target instead of automatically firing',
        dest='fire')
    parser.add_argument('--showStream',
                        action='store_true',
                        default=False,
                        help="if you want to see it work.q")

    results = parser.parse_args()
    USER_TARGET = results.target_class
    PIN_FILEPATH = 'pin_loc.txt'  #results.pin_file
    FIRE = results.fire
    SHOW_STREAM = results.showStream
    ACTIVE = True

    #import from file global constant pin locations
    pin_dict = Utils.parse_pinfile(PIN_FILEPATH)

    #initialize drivers
    print('Initializing Drivers....')
    m1 = Drivers.Motor1(pin_dict['M1A1'], pin_dict['M1A2'], pin_dict['M1B1'],
                        pin_dict['M1B2'])
    m2 = Drivers.Motor2(enable=pin_dict['en2'],
                        direction=pin_dict['dirpin2'],
                        pulse=pin_dict['pul2'])
    launcher = Drivers.Launcher(pin_dict['WaterPin'], pin_dict['AirPin'], 1,
                                .4)

    #initialize interface
    print("Setting up interface...")
    control = Interface.CannonControl(m1, m2, launcher)

    # import the correct version of tensorflow based on whats installed and define and interpreter
    # from on the .tflite file
    if importlib.util.find_spec('tflite_runtime'):
        import tflite_runtime.interpreter as tflite
        interpreter = tflite.Interpreter(model_path='model/detect.tflite')
    else:
        import tensorflow as tf
        interpreter = tf.lite.Interpreter(model_path='model/detect.tflite')

    interpreter.allocate_tensors()

    #load labels
    with open('model/labelmap.txt', 'r') as f:
        labels = [line.strip() for line in f.readlines()]

    #initialize detection model
    print("Starting detection model...")
    model = Detection.TargetStream(USER_TARGET, interpreter)
    model.start()
    time.sleep(2)
    print("Model ready...")

    #define autotarget functionality

    #toss that in a loop. consiter using threads
    while ACTIVE:
        if SHOW_STREAM:
            model.show_frame()

        if model.object_detected:
            location = model.target_location
            h, v, dist = Utils.auto_target((location))
            Utils.move(h, v, dist, control, 3)
            time.sleep(1)
Example #22
0

def getArg(ind, defValue):
    if ind < len(args):
        return args[ind]
    return defValue


diaPath = getArg(0, defDiaPath)
policyPath = getArg(1, defPolicyPath)

diaExposure = afwImage.ExposureF()
diaExposure.readFits(diaPath)
diaMaskedImage = diaExposure.getMaskedImage()
diaWCS = diaExposure.getWcs()

policy = lsst.pex.policy.Policy.createPolicy(policyPath)
if options.debugIO:
    policy.set("debugIO", True)

if options.verbosity > 0:
    print "Verbosity =", options.verbosity
    lsst.pex.logging.Trace_setVerbosity("lsst.detection", 9)

diaSources = Detection.detection(diaExposure, policy, 0)

for i in range(len(diaSources)):
    diaSource_i = diaSources[i]
    print diaSource_i.getColc(), " ", diaSource_i.getRowc(
    ), " -> ", diaSource_i.getRa(), " ", diaSource_i.getDec()
Example #23
0
    )

    res = trainTicket.trainTicket(result)
    res = res.res
    res = [{'text': res[key], 'name': key, 'box': {}} for key in res]

    return res


if __name__ == '__main__':
    # threaded=False, processes=3
    runAngel = False
    path_model = r"./models/invoice_VAT.pb"
    path_craft_model = r"./models/weight.pb"
    path_direct_model = r""
    if (runAngel == True):
        path_direct_model = r"Angle-model.pb"
    else:
        path_direct_model = None

    path_pbtxt = r"./labels_pbtxt/invoiceDetection.pbtxt"  # 载入标签文件

    NUM_CLASSES = 35  # 分类数量
    Detection = detect.invoiceDetection(path_model, path_craft_model,
                                        path_pbtxt, NUM_CLASSES,
                                        path_direct_model)  # 构建预测类

    path_craft_model = r".、weight.pb"
    Detection_qdb = detect_qdb.Detection_craft(path_craft_model)

    app.run(host='0.0.0.0', port=7799, threaded=True)
Example #24
0
class CellFocusAuxillary(ZStackData):
    """
    Collects images at different focal planes of the cell.

    Records position of the xy, objective_height, img_folder into a pandas dataframe (exported to csv).

    Images are organized into folders based off of the time_data of acquisition.
    """

    _focus_dir = os.getcwd() + "\\focus_data"
    _temp_file = _focus_dir + "\\cell_data.csv"
    file_prefix = "Cell"


def get_images(fc, im):

    while True:
        fc.quickcheck()
        time.sleep(1.5)
        im.start_acquisition()


if __name__ == "__main__":
    CENTER = [3730, -1957]

    hd = CESystems.NikonEclipseTi()
    hd.start_system()
    det = Detection.CellDetector(hd)
    mov = Detection.Mover(CENTER)
    fc = Detection.FocusGetter(det, mov)
Example #25
0
    def sniffSlaac(self, buf, getInterface):
        eth = EthDecoder().decode(buf)
        ethChild = eth.child()
        ethChild2 = ethChild.child()
        try:
            if ethChild2.get_ip_protocol_number() == 58:
                destination_MAC_address = eth.get_ether_dhost()
                source_MAC_address = eth.get_ether_shost()
                source_MAC_address_final = ""
                destination_MAC_address_final = ""
                override_flag = False
                router_flag = False
                router_lifetime = "False"
                for x in range(6):
                    temp_decimal = source_MAC_address[x]
                    temp_hex = hex(temp_decimal)
                    source_MAC_address_final = source_MAC_address_final + temp_hex[
                        2:] + ":"
                    temp_decimal = destination_MAC_address[x]
                    temp_hex = hex(temp_decimal)
                    destination_MAC_address_final = destination_MAC_address_final + temp_hex[
                        2:] + ":"
                source_MAC_address_final = source_MAC_address_final[:-1].zfill(
                    2)
                destination_MAC_address_final = destination_MAC_address_final[:
                                                                              -1]
                target_link_layer_address = ""
                packetData = (ethChild2.get_originating_packet_data())
                packetHex = []
                payloadHex = []
                for data in packetData:
                    packetHex.append(hex(data))
                source_link_layer_address = ""
                target_address = ""
                ip_source_address = ethChild.get_source_address()
                ip_destination_address = ethChild.get_destination_address()
                ndp_message_number = ethChild2.get_type()
                contains_source, offset = self.check_ipv6_options(packetHex)

                if str(ndp_message_number) == "134":  #Router Advertisement
                    if str(contains_source) == "true-source":
                        for x in range(6):
                            source_link_layer_address = source_link_layer_address + packetHex[
                                x + offset + 1][2:].zfill(2) + ":"
                        target_address = "n/a"
                        source_link_layer_address = source_link_layer_address[:
                                                                              -1]
                        target_link_layer_address = "n/a"
                    else:
                        source_link_layer_address = "n/a"

                    payload_byte = ethChild2.child().get_bytes()
                    for payload_data in payload_byte:
                        payloadHex.append(hex(payload_data))
                    router_lifetime = payloadHex[2][2:] + payloadHex[3][2:]

                elif str(ndp_message_number) == "135":  #Neighbor Solicitation
                    for x in range(16):
                        target_address = target_address + packetHex[x][
                            2:].zfill(2)
                        if (x > 0):
                            if x % 2 != 0:
                                target_address = target_address + ":"
                    target_address = target_address[:-1]
                    target_link_layer_address = "n/a"
                    if str(contains_source) == "true-source":
                        for x in range(6):
                            source_link_layer_address = source_link_layer_address + packetHex[
                                x + offset + 1][2:].zfill(2) + ":"
                        source_link_layer_address = source_link_layer_address[:
                                                                              -1]
                    else:
                        source_link_layer_address = "n/a"

                elif str(ndp_message_number) == "136":  #Neighbor Advertisement
                    flags = hex(ethChild2.child().get_bytes()[0:1][0])
                    if flags == "0xa0":
                        override_flag = True
                        router_flag = True

                    if str(contains_source
                           ) == "true-source" and flags == "0xa0":
                        for x in range(6):
                            target_link_layer_address = target_link_layer_address + packetHex[
                                1 + offset + x][2:].zfill(2) + ":"
                        target_link_layer_address = target_link_layer_address[:
                                                                              -1]

                    else:
                        target_link_layer_address = "n/a"
                    for x in range(16):
                        target_address = target_address + packetHex[x][
                            2:].zfill(2)
                        if (x > 0):
                            if x % 2 != 0:
                                target_address = target_address + ":"
                    target_address = target_address[:-1]
                vlanId = self.check_vlanId(buf)

                message_details = SLAAC_Message.SLAAC_Message(
                    vlanId, ndp_message_number, source_link_layer_address,
                    ip_source_address, ip_destination_address,
                    source_MAC_address_final, destination_MAC_address_final,
                    target_address, target_link_layer_address, override_flag,
                    router_flag, router_lifetime, getInterface)

                detect_module = Detection.Detection()
                if str(message_details.get_ndp_message_number()
                       ) == "134":  #Last Hop Router Attack
                    detect_module.detect_rogue_advertisement(message_details)

                elif str(message_details.get_ndp_message_number()
                         ) == "135":  #Dos in DAD
                    if str(message_details.get_ip_source_address()) == "::":
                        detect_module.detect_dos_dad(message_details)

                elif str(message_details.get_ndp_message_number()
                         ) == "136":  #Neigbor Spoofing
                    detect_module.detect_neighbor_spoofing((message_details))

        except Exception, e:
            pass
Example #26
0
    # runs the code every third frame to reduce load and make the laser slightly less jittery.
    if frameCount % 1 is 0:
        # Black background the game is drawn on
        img = cv2.imread('testSmall.jpg')

        # Blockers for the play area
        #middleBlocker = Blocker.Blocker(middleX - 21, middleY - 19, middleX - 20, middleY + 21, middleX + 19, middleY + 20,
                        #middleX + 19, middleY - 21, img)

        # HSV and blur
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        blur = ImageProcessingMethods.ourMedianBlur(hsv)
        #blur = cv2.medianBlur(hsv, 13)

        # Detection. Should return a box.
        red_boxes = Detection.detectionRed(blur)
        blue_boxes = Detection.detectionBlue(blur)

        # Create mirrors and blockers from the detected boxes
        mirrorBLockerList = []
        #red_boxes.append(middleBlocker)
        #middleBlocker.drawBlocker(img)

        for i in range(len(red_boxes)):
            point1, point2, point3, point4 = red_boxes[i]
            x1, y1 = point1
            x2, y2 = point2
            x3, y3 = point3
            x4, y4 = point4
            tempBlocker = Blocker.Blocker(x1, y1, x2, y2, x3, y3, x4, y4, frame)
            mirrorBLockerList.append(tempBlocker)
Example #27
0
    fracAVtoV = -0.016
    Mzp = 20.
    geffAV = {
        "gu11": fracAVtoV,
        "gd11": -fracAVtoV,
        "gd22": -fracAVtoV,
        "gl11": -fracAVtoV,
        "gl22": -fracAVtoV
    }

    # Let us use a bit more the inner functions and make the decay limits for a combined V and AV operator for CHARM
    exp = 'charm'
    Delini = 0.1
    Delf = 5
    xeff, Limeff = de.MakeEffLimits(exp, lim.charm_decay.mx_ini,
                                    lim.charm_decay.lim_ini, geffem, "V",
                                    Delini)
    xprod, Nprod = br.NProd(Delini, exp, geffem, "V")  #
    xifV, EffLimV = de.ShiftLimDel(xprod, Nprod, xeff, Limeff, Delf, Delini,
                                   exp, "V", geffem)
    xeff, Limeff = de.MakeEffLimits(exp, lim.charm_decay.mx_ini,
                                    lim.charm_decay.lim_ini, geffAV, "AV",
                                    Delini)
    xprod, Nprod = br.NProd(Delini, exp, geffAV, "AV")  #
    xifAV, EffLimAV = de.ShiftLimDel(xprod, Nprod, xeff, Limeff, Delf, Delini,
                                     exp, ("AV", "V"), (geffAV, geffem))
    xif, EffLim = uf.CombineLimits((xifV, xifAV), (EffLimV, EffLimAV))
    Dexp, Lexp, beamtype = de.GeomForExp(
        exp)  # Need to clean the above functions
    EffLimFin_Low = de.ShortLamLimit(xif, Delf, EffLim, Dexp, Lexp, beamtype,
                                     (geffAV, geffem), ("AV", "V"))
Example #28
0
def readVideo(RGB_videoname, th_videoname, outputFolder):
    time_ori = getOriginaltime(RGB_videoname, th_videoname)
    # thermal processing
    frame_IR = imageio.get_reader(th_videoname, 'ffmpeg')
    IR_nframes = frame_IR._meta['nframes']
    # RGB processing
    frame_RGB = imageio.get_reader(RGB_videoname, 'ffmpeg')
    RGB_nframes = frame_RGB._meta['nframes']
    for th_num in range(1195, 1197):
        sec_add = th_num / frame_IR._meta['fps']
        #sec_add = 13.265
        RGB_num = int((float(th_num) / IR_nframes) * RGB_nframes)
        image_IR = frame_IR.get_data(th_num)  #480*640*3
        image_RGB = frame_RGB.get_data(RGB_num)  #1080*1920*3
        # plt.imshow(image_RGB)
        # plt.show()
        img_temp = image_RGB.copy()
        bounding_boxes_th, bounding_boxes_RGB, human = Detection.Integrate(
            image_IR, image_RGB)
        bounding_boxes_B, object = Detection.hsv_thresholding(image_RGB)
        time_final = time_ori + datetime.timedelta(
            hours=0, minutes=0, seconds=sec_add)
        time_final_string = getTimestr(time_final)

        if human is True:
            ########################################################
            #############################thermal#####################################
            pixel_T = np.zeros((bounding_boxes_th.shape[0], 2))  #n*2
            for i in range(bounding_boxes_th.shape[0]):
                # img_temp = cv2.rectangle(img_temp, (bounding_boxes_th[i][0], bounding_boxes_th[i][1]),
                #                          (bounding_boxes_th[i][0] + bounding_boxes_th[i][2],
                #                           bounding_boxes_th[i][1] + bounding_boxes_th[i][3]),
                #                          (255, 0, 0), 1)
                # plt.imshow(img_temp)
                # plt.show()
                pixel_T[i, :] = np.array(
                    (bounding_boxes_th[i, 2] / 2 + bounding_boxes_th[i, 0],
                     bounding_boxes_th[i, 3] + bounding_boxes_th[i, 1]))
                text_file_th.write(
                    "%s %s %s %s" %
                    (time_final_string, str(pixel_T[i, 0]), str(
                        pixel_T[i, 1]), "H\n"))  #thermal folder + txt'H'
            #plt.savefig()
            ########################################################
            #################################RGB#####################################
            pixel_R = np.zeros((bounding_boxes_RGB.shape[0], 2))  # n*2
            for i in range(bounding_boxes_RGB.shape[0]):
                bounding_boxes_RGB[i][0] += 106
                bounding_boxes_RGB *= 1080 / 480
                pixel_R[i, :] = np.array(
                    (bounding_boxes_RGB[i, 2] / 2 + bounding_boxes_RGB[i, 0],
                     bounding_boxes_RGB[i, 3] + bounding_boxes_RGB[i, 1]))
                text_file_RGB.write(
                    "%s %s %s %s" %
                    (time_final_string, str(pixel_R[i, 0]), str(
                        pixel_R[i, 1]), "H\n"))  #RGB folder + txt'H'
                # img_temp = cv2.rectangle(img_temp, (bounding_boxes_RGB[i][0], bounding_boxes_RGB[i][1]),
                #                          (bounding_boxes_RGB[i][0] + bounding_boxes_RGB[i][2],
                #                           bounding_boxes_RGB[i][1] + bounding_boxes_RGB[i][3]),
                #                          (255, 0, 0), 1)
                # plt.imshow(img_temp)
                # plt.show()
            #plt.savefig()

        if object is True:
            pixel_R_B = np.zeros((bounding_boxes_B.shape[0], 2))  # n*2
            for i in range(bounding_boxes_B.shape[0]):
                pixel_R_B[i, :] = np.array(
                    (bounding_boxes_B[i, 2] / 2 + bounding_boxes_B[i, 0],
                     bounding_boxes_B[i, 3] / 2 + bounding_boxes_B[i, 1]))
                text_file_th.write(
                    "%s %s %s %s" %
                    (time_final_string, str(pixel_R_B[i, 0]),
                     str(pixel_R_B[i, 1]), "H\n"))  #RGB folder + txt'B'
points_angular = Data.rectangular_to_angular(averaged_points,
                                             pixel_dimensions_m)

plt.figure('Radius Angle Data (pre-error min)'), plt.xlabel('angle [radians]'), plt.ylabel('r [m]'), plt.grid(True),\
    plt.scatter(Data.theta_in_points(points_angular), Data.r_in_points(points_angular), s=0.2)

points_angular = Data.minimize_point_error(points_angular)

plt.figure('Radius Angle Data (post-error min)'), plt.xlabel('angle [radians]'), plt.ylabel('r [m]'), plt.grid(True),\
    plt.scatter(Data.theta_in_points(points_angular), Data.r_in_points(points_angular), s=0.2)

points_rtime = Data.angular_to_rtime(points_angular, angular_velocity)

plt.figure('Radius Time Data'), plt.xlabel('t [s]'), plt.ylabel('r [m]'), plt.grid(True),\
    plt.scatter(Data.t_in_points(points_rtime), Data.r_in_points(points_rtime), s=0.2)

plt.show()

stylus = Conversion.Stylus(points_rtime)

plt.figure('Velocity'), plt.xlabel('t [s]'), plt.ylabel('v [m/s]'), plt.ylabel('v [m/s]'), \
    plt.grid(True), plt.scatter(stylus.get_time_axis(), stylus.get_velocity_axis(), s=0.2)

grooves = Detection.points_to_grooves(hist, bin_edges, inclusion_threshold,
                                      Data.points_to_tuples(points_pixel))

for i, groove in enumerate(grooves):
    plt.scatter(groove.get_theta_axis(), groove.get_rho_axis(),
                s=0.2), plt.show()
    # irregular_audio.append(Conversion.Stylus(groove, i + 1))
Example #30
0
                    default='11:39:0')

args = parser.parse_args()
set_time = [args.morning, args.afternoon, args.evening]
name = ''

while True:
    files= os.listdir('input')
    preProcess = PreProcess()
    index_new = preProcess.nameImage()
    d = datetime.now()
    cur_time = str(d.hour) +':'+ str(d.minute) + ':'+ str(d.second)  
    #capture
    for i in range(len(set_time)):
        if cur_time == set_time[i]:
            preProcess.captureImage()
            
    #resize & name
    if len(files)>0:
        for file in files:
            sleep(3)
            test = PreProcess(file=file, name=index_new)
            test.preImages()
            index_new = index_new +1
    if index_new > 0:
        detect = Detection()
        detect.analyzing()

   
        
        
Example #31
0
import Detection

import sys
import rospy
if __name__ == '__main__':
    try:
        print("----------------------------------------------------------")
        print("Welcome to the MoveIt MoveGroup Python Interface Nursing")
        print("----------------------------------------------------------")
        print("Press Ctrl-D to exit at any time")
        print("")
        input("============ Press `Enter` to begin the process ...")
        tutorial = MoveGroupPythonInterfaceNursing()

        input("============ Go to the door knob ...")
        node = Detection(0.9, 1, 10)
        #patrol_test.move(4.61152219772, -5.15782785416, -0.37051665948, 0.928825820619)

        input("============ Standby position ...")
        tutorial.go_to_joint_state()

        input("============ Stretch arm ...")

        input("gripper_hold")
        tutorial.gripper_hold()

        input("gripper_release")
        tutorial.gripper_release()

        input(
            "============ Press `Enter` to execute a movement using a pose goal ..."
                  action="store_true", default=False,
                  help="write diagnostic intermediate files")
(options, args) = parser.parse_args()

def getArg(ind, defValue):
    if ind < len(args):
        return args[ind]
    return defValue

diaPath = getArg(0, defDiaPath)
policyPath = getArg(1, defPolicyPath)

diaExposure = afwImage.ExposureF()
diaExposure.readFits(diaPath)
diaMaskedImage = diaExposure.getMaskedImage()
diaWCS = diaExposure.getWcs()

policy = lsst.pex.policy.Policy.createPolicy(policyPath)
if options.debugIO:
    policy.set("debugIO", True)

if options.verbosity > 0:
    print "Verbosity =", options.verbosity
    lsst.pex.logging.Trace_setVerbosity("lsst.detection", 9)

diaSources = Detection.detection(diaExposure, policy, 0)

for i in range(len(diaSources)):
    diaSource_i = diaSources[i]
    print diaSource_i.getColc(), " ", diaSource_i.getRowc(), " -> ", diaSource_i.getRa(), " ", diaSource_i.getDec()