示例#1
0
def getGame():
	'''Getting user inputs, creating a subfolder for the desired video'''

	game = input('Enter Choice of Game\n1.Kabaddi\n2.Football\n3.Cricket\n')	#@@
	if game not in '123':	#@@
		input('Wrong Choice'); exit()
	vidName, extension = input('Enter Name of Video File: ').split('.')

	path = f'{os.getcwd()}/{vidName}'	#Path of the working directory
	try:	#Creating a folder for the given video
		shutil.rmtree(path)
	except:
		pass
	finally:
		os.mkdir(path); os.chdir(path)

	try:	#Opening video file
		vid = cv2.VideoCapture(f'{path}.{extension}')	#Video used for detecting the location of the scoreboard
		video = cv2.VideoCapture(f'{path}.{extension}')	#Video used for actual extraction
	except:
		input('No such file'); exit()

	D.load(game)
	main(vid, video, game)
	E.combine(vidName, extension, timeStamps, path)
示例#2
0
def main():
    global task_end_flag, task, DEBUG, task_running

    detector = Detector()

    with SerialPort('/dev/serial0', 9600, None) as sp:
        while True:
            if sp.receiveData():
                # 复位检测器
                detector.reset()
                try:
                    detect_type = sp.getReceive()['byte'][0]
                    if DEBUG:
                        print(detect_type)
                    # 等待
                    if detect_type == 0:
                        if task is not None and task_running:
                            task_end_flag = True
                            task.join()
                            task_end_flag = False
                        task = Thread(target=waiting)
                        task.start()
                    # 3数字二维码
                    if detect_type == 1:
                        if task is not None and task_running:
                            task_end_flag = True
                            task.join()
                            task_end_flag = False
                        task = Thread(target=detect_qrcode, args=[detector, sp,])
                        task.start()
                    # 3色块
                    elif detect_type == 2:
                        if task is not None and task_running:
                            task_end_flag = True
                            task.join()
                            task_end_flag = False
                        task = Thread(target=detect_color, args=[detector, sp,])
                        task.start()
                    # 3+3数字二维码
                    elif detect_type == 3:
                        if task is not None and task_running:
                            task_end_flag = True
                            task.join()
                            task_end_flag = False
                        task = Thread(target=detect_qrcode, args=[detector, sp, 1])
                        task.start()
                    # 3颜色条形码
                    elif detect_type == 4:
                        if task is not None and task_running:
                            task_end_flag = True
                            task.join()
                            task_end_flag = False
                        task = Thread(target=detect_qrcode, args=[detector, sp, 2])
                        task.start()
                except:
                    pass
            else:
                if not task_running and DISPLAY:
                    task = Thread(target=display)
                    task.start()
示例#3
0
def main(vid, video, game):
	'''Function to locate scoreboard, crop the image accordingly and call the function corresponding to the game every second'''

	log = open('events.txt', 'w')
	frRt = int(video.get(cv2.CAP_PROP_FPS))	#Framerate of the video
	
	i, k, box = 0, 0, []
	while(vid.isOpened()):	#Looping to locate the scoreboard
		ret, frame = vid.read()
		if not ret:
			break
		if i % (30*frRt) == 0:	#%%Checking for scoreboard every 30s
			cv2.imwrite('Image.png',frame)
			b = D.detect()
			if b != None:
				box.extend(b)	#Accumulating the locations of detected scoreboards to take the average over them
				k += 1
				if k == 10:	#Maximum number of scoreboards to take average over
					break
		i += 1
	if box == []:
		input('Couldn\'t find scoreboard in the video'); exit()
	else:
		for i in range(1, k):	#Averaging over scoreboard locations
			for j in range(4):
				box[j] += box[4*i + j]
		box = list(map(lambda x: x//k, box[:4]))
	vid.release()
	
	gameFunc = {'1': kabaddi, '2': football, '3': cricket}[game]	#@@
	events = {
			'1': {'Super Raids': 0},
			'2': {'Goals Scored by the Home Team': 0, 'Goals Scored by the Away Team': 0},
			'3': {'Wickets': 0, 'Sixes': 0, 'Fours': 0, '3s or 5s': 0}
			}[game]	#@@
	frNo = 0	#Frame number
	preScore = (0, 0)	#@@To keep track of score in the previous frame

	while(video.isOpened()):	#Looping over frames of the video
		ret, frame = video.read()
		if not ret:
			break
		if frNo % frRt == 0:	#Updating score every second
			cv2.imwrite('Image.png',frame)
			if D.detect() != None:
				cv2.imwrite('Scoreboard.png', frame[box[1]:box[3], box[0]:box[2]])	#Saving the cropped scoreboard image
				score = O.ocr(preScore)
				gameFunc(score, preScore, frNo//frRt, log, events)
				preScore = score
		frNo += 1
	video.release()
	cv2.destroyAllWindows()

	print(events)
	for i,j in events.items():
		log.write(f'\n{i}: {j}')
	log.close()
示例#4
0
def dthvsth():
    thvals = np.linspace(0,np.pi,200)
    Bdlvvals_fine = []
    Bdlvvals_coarse = []
    Bdlyvals_fine = []
    Bdlyvals_coarse = []
    
    for th in thvals:
        rhat = np.array([np.sin(th),0,np.cos(th)])
        yhat = np.array([0,1,0])
        vhat = np.cross(rhat,yhat)
        dr = 1.0
        ivvals_fine = []
        ivvals_coarse = []
        iyvals_fine = []
        iyvals_coarse = []
        rvals = np.arange(0,1500,dr)
        for r in rvals:
            rvec = r*rhat/100.0
    
            Params.UseFineBField = False
            B = Detector.getBField(rvec[0],rvec[1],rvec[2])
            Bv = np.dot(vhat,B)
            ivvals_coarse.append(Bv)
            iyvals_coarse.append(B[1])
    
            Params.UseFineBField = True
            B = Detector.getBField(rvec[0],rvec[1],rvec[2])
            Bv = np.dot(vhat,B)
            ivvals_fine.append(Bv)
            iyvals_fine.append(B[1])
    
        intv_coarse = 2*np.sum(ivvals_coarse)-ivvals_coarse[0]-ivvals_coarse[-1]
        Bdlvvals_coarse.append(intv_coarse * dr/100/2)
        intv_fine = 2*np.sum(ivvals_fine)-ivvals_fine[0]-ivvals_fine[-1]
        Bdlvvals_fine.append(intv_fine * dr/100/2)
        inty_coarse = 2*np.sum(iyvals_coarse)-iyvals_coarse[0]-iyvals_coarse[-1]
        Bdlyvals_coarse.append(inty_coarse * dr/100/2)
        inty_fine = 2*np.sum(iyvals_fine)-iyvals_fine[0]-iyvals_fine[-1]
        Bdlyvals_fine.append(inty_fine * dr/100/2)
    
    plt.figure(2)
    plt.plot(thvals*180/np.pi,Bdlvvals_fine, '-b', linewidth=2, label='1 cm field')
    plt.plot(thvals*180/np.pi,Bdlvvals_coarse, '--r', linewidth=2, label='10 cm field')
    plt.plot(thvals*180/np.pi,Bdlyvals_fine, '-g', linewidth=2)
    plt.plot(thvals*180/np.pi,Bdlyvals_coarse, '--y', linewidth=2)
    plt.xlabel(r'$\theta$ (deg)')
    plt.title(r'$\int$ $B dl$ ($\phi=0$ plane)')
    plt.legend(fontsize='small')
    plt.text(60,0.35,"y direction")
    plt.text(60,9.2,"xz direction")
    
    plt.savefig('/home/users/bemarsh/public_html/backup/B-integrator/fine_vs_coarse/intBdl_allTheta_interp.png')

    plt.show()
示例#5
0
 def load(self):
     try:
         ruleJson = json.load(open(self.rfname))
         #self.program = ruleJson['program']
         if self._program != ruleJson['program']:
             return False
         if ruleJson.has_key('source') != self.source:
             return False
         if ruleJson.has_key('source') and ruleJson['source'] == False:
             return False
         if ruleJson.has_key('debug'):
             self.debug = True
         else:
             self.debug = False
         #self.debug = ruleJson.has_key('debug')
         #self.url = self.scanner.urlroot + ruleJson['url']
         self.url = os.path.join(self.scanner.urlroot, ruleJson['url'])
         self.xurl = ruleJson['url']
         self.type = ruleJson['type']
         self.severity = ruleJson['severity']
         self.payload = ruleJson['payload']
         self.description = ruleJson['description']
         self.suggestion = ruleJson['suggestion']
         self.file = ruleJson['file']
         if self.source == False:
             self.method = ruleJson['method']
             if ruleJson['cookies']:
                 self.cookies = self.scanner.cookies
             else:
                 self.cookies = {}
             if ruleJson['headers']:
                 self.headers = self.scanner.headers
             else:
                 self.headers = {}
         if ruleJson['detector'] == 'Accurate':
             self.detector = Detector.Accurate(self)
             self.md5 = ruleJson['md5']
         elif ruleJson['detector'] == 'Fuzzy':
             self.detector = Detector.Fuzzy(self)
             self.basis = ruleJson['basis']
             if self.basis == 'keyword':
                 self.keyword = ruleJson['keyword']
         elif ruleJson['detector'] == 'Aduit':
             self.detector = Detector.Aduit(self)
             self.basis = ruleJson['basis']
             if self.basis == 'keyword':
                 self.keyword = ruleJson['keyword']
         elif ruleJson['detector'] == 'Custom':
             exec('self.detector = Detector.' + ruleJson['detectorx'] + '(self)')
             self.customdata = ruleJson['customdata']
         return True
     except:
         print >> sys.stderr, 'Rule error!', self.rfname, sys.exc_info()
         return False
示例#6
0
    def __init__(self, **options):

        if 'visualize' in options: self.visualize = options['visualize']
        else: self.visualize = False

        if 'wait' in options: self.wait = options['wait']
        else: self.wait = False

        # setup of the random number generator
        self.randEngine = G4.Ranlux64Engine()
        #    te = G4.HepRandom.getTheSeeds()
        G4.HepRandom.setTheEngine(self.randEngine)

        # creation/registering of the matter interaction physics
        G4.gRunManager.SetUserInitialization(G4.G4physicslists.LBE())

        # creation/registering of the detector constructor
        import Detector
        self.setup = Detector.Constructor()
        self.crystal = self.setup.calorimeter.logical
        self.hpge = Detector.MySD()
        self.crystal.SetSensitiveDetector(self.hpge)
        G4.gRunManager.SetUserInitialization(self.setup)

        # creation/registering of the source constructor
        import Generator
        self.hist = ROOT.TH1D("hist", "Energy deposit [keV]", 1500, 0, 3000.0)
        self.uaction = Generator.MyEventAction(self.hpge, self.hist)
        self.myPGA = Generator.MyPrimaryGeneratorAction()
        G4.gRunManager.SetUserAction(self.myPGA)
        G4.gRunManager.SetUserAction(self.uaction)

        G4.gRunManager.Initialize()
        #    G4.gVisManager.Initialize()
        #    G4.gUImanager.Initialize()
        if self.visualize != None:
            G4.gApplyUICommand("/vis/open %s 1600x1200-0+0" % self.visualize)
            G4.gApplyUICommand("/vis/scene/create")
            #      G4.gApplyUICommand("/vis/viewer/set/style surface")
            G4.gApplyUICommand("/vis/viewer/set/style wireframe")
            G4.gApplyUICommand("/vis/viewer/set/viewpointThetaPhi 90. 0.")
            G4.gApplyUICommand("/vis/scene/add/volume")
            G4.gApplyUICommand("/vis/sceneHandler/attach")
            G4.gApplyUICommand("/tracking/storeTrajectory 1")
            G4.gApplyUICommand("/vis/scene/add/trajectories")
            G4.gApplyUICommand("/vis/scene/endOfEventAction accumulate")
            G4.gApplyUICommand("/vis/enable true")
        print self.visualize
def camera_setup_all(cameras, engines, labels):
    disabled_list = []
    for name, settings in cameras.items():
        camera = None
        try:
            print('Setup camera: %s' % name)
            if not settings["enabled"]:
                print('Camera: %s is disabled' % name)
                disabled_list.append(name)
                continue

            settings["mqtt_topic_image"] += name
            settings["mqtt_topic_detection"] += name

            # To get the text correctly placed in the detection,
            # create One detection per camera
            # Setup the detector
            settings["detector"] = Detector(engines[settings["model"]],
                                            settings["height"], labels)

            camera = Camera(name, settings["camera_url"],
                            settings["camera_snapshot"], settings["width"],
                            settings["height"])
            camera.open()
            settings["camera"] = camera
        except:
            print('Setup camera: %s FAILED' % name)
            if camera:
                camera.close()
            pass  # Close all ....

    for key in disabled_list:
        del cameras[key]
示例#8
0
    def make_answer(self, execute_string, name):
        #print(execute_string)
        try:
            self.cur.execute(execute_string)
            detector_data = self.cur.fetchall()
            self.conn.commit()
            time = 1
            hour_dict = {}
            while time <= 24:
                hour_dict[time] = []
                time += 1
            time = 1

            #fill hour_dict of an object with data from DB
            for detector in detector_data:
                measurments = detector[-24:]
                while time <= 24:
                    if measurments[time - 1] is not None:
                        hour_dict[time].append(measurments[time - 1])
                    time += 1

            #make avarage hour temperature of an object
            for time_stamp in hour_dict.keys():
                if len(hour_dict[time_stamp]) > 0:
                    hour_dict[time_stamp] = round(
                        sum(hour_dict[time_stamp]) /
                        len(hour_dict[time_stamp]), 3)
            object = Detector.Detector(name, hour_dict)
            return object

        except Exception as e:
            print('Rise update error', e)
            self.conn.commit()
示例#9
0
 def __init__(self, fftsize:int):
     #self.socket=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     #self.socket.bind(('127.0.0.1', 4096))
     self.processor=Preprocessers.DataProcessor()
     self.fftsize=fftsize
     self.goodlist=dict({'F3': [], 'FC5': [], 'T7': [], 'F7': [], 'P7': [], 'P8': [], 'AF4': [], 'O2': [], 'O1': [], 'T8': [], 'AF3': [], 'FC6': [], 'F4': [], 'F8': []})
     self.firstlist=dict()
     self.subprocess_args=["python", "C:/Users/Gaurav/Documents/GitHub/KineticEEG/Tools/EmotivDataGetter.py"]
     self.detector=Detector.AverageBasedDetector(4)
示例#10
0
    def __init__(self,
                 expDir,
                 atmFile,
                 hwpFile,
                 bandID,
                 theta=None,
                 writeFile=False):

        channelFile = expDir + "channels.txt"
        cameraFile = expDir + "camera.txt"
        opticsFile = expDir + "opticalChain.txt"

        #Imports detector data
        self.det = dt.Detector(channelFile, cameraFile, bandID)
        self.freqs = np.linspace(self.det.flo, self.det.fhi,
                                 400)  #Frequency array of the detector
        """Creating the Optical Chain"""
        self.elements = []  #List of optical elements

        self.elements.append(
            opt.OpticalElement("CMB", self.det, 2.725, {"Absorb": 1}))  #CMB
        self.elements.append(opt.loadAtm(atmFile, self.det))  #Atmosphere
        self.elements += opt.loadOpticalChain(opticsFile,
                                              self.det,
                                              theta=theta)  #Optical Chain
        self.elements.append(
            opt.OpticalElement("Detector", self.det, self.det.bath_temp,
                               {"Absorb": 1 - self.det.det_eff}))  #Detector

        #Finds HWP
        try:
            self.hwpIndex = [e.name for e in self.elements].index("HWP")
        except:
            print "No HWP in Optical Chain"

        #Adds HWP curves
        fs, T, rho, _, _ = np.loadtxt(hwpFile, dtype=np.float, unpack=True)
        self.elements[self.hwpIndex].updateParams({
            "Freqs": fs,
            "EffCurve": T,
            "IPCurve": rho
        })

        #Calculates conversion from pW to Kcmb
        aniSpec = lambda x: th.aniPowSpec(1, x, Tcmb)
        self.toKcmb = 1 / intg.quad(aniSpec, self.det.flo, self.det.fhi)[0]
        #Conversion from pW to KRJ
        self.toKRJ = 1 / (th.kB * self.det.band_center * self.det.fbw)

        #Propagates Unpolarized Spectrum through each element
        self.propSpectrum()

        self.geta2()
        self.getA2()
        self.geta4()
        self.getA4()
示例#11
0
    def __init__(self):

        self.x_offset = 0
        self.y_offset = 0
        self.coords_ind = 0
        self.x_locs = []
        self.y_locs = []
        self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
        #self.screen = pygame.display.set_mode((600,400))
        self.myKc = kc.KeyController(self.screen)
        self.detect = Detector.Detector()
        self.__calibrate()
示例#12
0
def main(args):
    '''通过PNet或RNet生成下一个网络的输入'''
    size = args.input_size
    batch_size = config.batches
    min_face_size = config.min_face
    stride = config.stride
    thresh = config.thresh
    #模型地址
    model_path = ['../model/PNet/', '../model/RNet/', '../model/ONet']
    if size == 12:
        net = 'PNet'
        save_size = 24
    elif size == 24:
        net = 'RNet'
        save_size = 48
    #图片数据地址
    base_dir = '../data/WIDER_train/'
    #处理后的图片存放地址
    data_dir = '../data/%d' % (save_size)
    neg_dir = os.path.join(data_dir, 'negative')
    pos_dir = os.path.join(data_dir, 'positive')
    part_dir = os.path.join(data_dir, 'part')
    for dir_path in [neg_dir, pos_dir, part_dir]:
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
    detectors = [None, None, None]
    PNet = FcnDetector(P_Net, model_path[0])
    detectors[0] = PNet
    if net == 'RNet':
        RNet = Detector(R_Net, 24, batch_size[1], model_path[1])
        detectors[1] = RNet
    basedir = '../data/'
    filename = '../data/wider_face_train_bbx_gt.txt'
    #读取文件的image和box对应函数在utils中
    data = read_annotation(base_dir, filename)
    mtcnn_detector = MtcnnDetector(detectors,
                                   min_face_size=min_face_size,
                                   stride=stride,
                                   threshold=thresh)
    save_path = data_dir
    save_file = os.path.join(save_path, 'detections.pkl')
    if not os.path.exists(save_file):
        #将data制作成迭代器
        print('载入数据')
        test_data = TestLoader(data['images'])
        detectors, _ = mtcnn_detector.detect_face(test_data)
        print('完成识别')

        with open(save_file, 'wb') as f:
            pickle.dump(detectors, f, 1)
    print('开始生成图像')
    save_hard_example(save_size, data, neg_dir, pos_dir, part_dir, save_path)
示例#13
0
    def encode(self):
        # loop over the image paths
        if not self.imagePaths:
            print("ImagePath is empty")
            return

        for (i, imagePath) in enumerate(self.imagePaths):
            # extract the person name from the image path
            print("[Encode_INFO] Processimg image {}/{}".format(
                i + 1, len(self.imagePaths)))
            name = imagePath.split(os.path.sep)[-2]

            # load the input im
            # age and convert it from RGB (OpenCV ordering)
            # to dlib ordering (RGB)
            image = cv2.imread(imagePath)
            rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            img_copy = rgb.copy()

            # detect the face and get the transformation matrix list
            face_list, M = Detector.detect_face(rgb)

            top = face_list[0]['top']
            bottom = face_list[0]['bottom']
            left = face_list[0]['left']
            right = face_list[0]['right']
            boxes = [(top, right, bottom, left)]

            # align the face with M
            rgb_aligned = cv2.warpAffine(
                img_copy, M[0], (img_copy.shape[1], img_copy.shape[0]))
            alignedFace = rgb_aligned[top:bottom, left:right, :]
            rgb[top:bottom, left:right, :] = alignedFace

            # compute the facial embedding for the face
            encodings = face_recognition.face_encodings(rgb, boxes)

            # loop over the encodings
            for encoding in encodings:
                # add each encoding + name to our set of known names and encodings
                self.knownEncodings.append(encoding)
                self.knownNames.append(name)

        # dump the facial encodings + names to disk
        print("[Encode_INFO] serializing encodings...")
        data = {"encodings": self.knownEncodings, "name": self.knownNames}
        f = open(self.outputPath, "wb")
        f.write(pickle.dumps(data))
        f.close()

        return self.outputPath
示例#14
0
def display():
    global hsv, task_end_flag, task_running, DISPLAY
    task_running = True
    detector = Detector()
    #cv2.namedWindow("cam", 1)
    #video = "http://*****:*****@192.168.43.1:8081/"
    #cam = cv2.VideoCapture(video)
    cv2.namedWindow("cam", cv2.WINDOW_NORMAL)
    cam = cv2.VideoCapture(0)
    cam.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
    cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
    cv2.setMouseCallback("cam", mouse_click)
    result_last = {}
    while True:
        # 读取当前帧
        ret, frame = cam.read()
        #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        # 对图像进行识别
        detector.run(frame)
        if detector.status and result_last != detector.result:
            result_last = detector.result.copy()
            for result in detector.result:
                if detector.status == 1:
                    print(f"检测到二维码> 类别: {result['type']}, 内容: {result['content']}")
                else:
                    print(f"检测到色块> 颜色: {result['content']}, 大小: {result['size']}")
        cv2.imshow("cam", detector.img)
        # 按ESC键退出
        if (cv2.waitKey(5) == 27):
            DISPLAY = False
            break
        if task_end_flag:
            break
    cam.release()
    cv2.destroyAllWindows()
    task_running = False
示例#15
0
def setup():
    global detector, vs, frame_rate, prev, shotDetector, ppm, cp5, heightField, distField, backboard, rim, courtFloor, rects
    size(width, height)

    detector = Detector.Detector()
    vs = VideoStream(src=0).start()
    shotDetector = shot.Shot(2.35)
    frame_rate = 20
    prev = 0
    ppm = height / 5  # pixels per meter
    backboard = Rect(width - BACKBOARD_WIDTH * ppm - 5, height - TOP_BACKBOARD * ppm, BACKBOARD_WIDTH * ppm,
                     BACKBOARD_HEIGHT * ppm)
    rim = Rect(width - BACKBOARD_WIDTH * ppm - 5 - HOOP_D * ppm, height - HOOP_HEIGHT * ppm, HOOP_D * ppm, 5)
    courtFloor = Rect(0, height - 10, width, 10)
    rects = [backboard, rim, courtFloor]
示例#16
0
def getScatterAnglePDG(x, dt):
    # return thetax, thetay, yx, yy
    # given a velocity and timestep, compute a
    # deflection angle and deviation due to multiple scattering
    # Update the position/velocity and return deflection
    #
    # x is a 6-element vector (x,y,z,px,py,pz)
    # returns deflection angles thetax, thetay and
    # displacements yx, yy in two orthogonal directions to p

    p = x[3:]
    magp = np.linalg.norm(p) # must be in MeV
    E = np.sqrt(magp**2 + Params.m**2)
    v = p/E
    beta = np.linalg.norm(v)

    dx = (beta*2.9979e-1) * dt  # in m
    
    ## in the following we generate a random deflection angle theta
    ## and transverse displacement y for the given momentum. This is taken
    ## from the PDG review chapter on the Passage of Particles through Matter

    mat = Detector.getMaterial(x[0],x[1],x[2])

    X0 = Params.materials[mat][3]

    if X0<=0:
        return np.zeros(6)

    # rms of projected theta distribution.
    theta0 = 13.6/(beta*magp) * abs(Params.Q) * np.sqrt(dx/X0) * (1 + 0.038*np.log(dx/X0))
    
    # correlation coefficient between theta_plane and y_plane
    rho = 0.87
    
    getRandom = np.random.normal

    z1 = getRandom()
    z2 = getRandom()
    yx = z1*dx*theta0 * np.sqrt((1-rho**2)/3) + z2*rho*dx*theta0/np.sqrt(3)
    thetax = z2*theta0
    
    z1 = getRandom()
    z2 = getRandom()
    yy = z1*dx*theta0 * np.sqrt((1-rho**2)/3) + z2*rho*dx*theta0/np.sqrt(3)
    thetay = z2*theta0

    return thetax, thetay, yx, yy
示例#17
0
def find_intrusion():

    current_time = 0

    # per_time: current working packet per time
    per_time = PacketPerTime({}, current_time)

    # pet_time_interval: PacketPerInterval - last 5 PacketsPerTime
    per_time_interval = PacketsPerInterval([])

    # Brain object, packets to run on.
    brain, packets, ip_intrusions = JsonParser.json_get_brain_chunks(
        JsonParser.write_intrusion_packet_chunk)
    brain.generate_ip_country_map()

    intrusions = []

    for packet in packets:

        # translate to our object
        packet_chunk = parse(packet)

        # OR: just use packet as it is if we are running on our data set
        # packet_chunk = packet

        if packet_chunk.time == current_time:
            per_time.add_packet(packet_chunk)
            # Detector.find_new_ips([packet_chunk.sender, packet_chunk.receiver], brain.ip_set, brain.malicious_ips)
            continue

        to_update = per_time_interval.update(per_time)
        if to_update is not None:
            brain.update(to_update)

        brain_interval = brain.get_interval(current_time,
                                            PacketsPerInterval.INTERVAL)
        intrusions += Detector.detect_intrusion(brain_interval,
                                                per_time_interval,
                                                brain.ip_set,
                                                brain.malicious_ips)
        current_time += 1
        per_time = PacketPerTime({}, current_time)

    display_intrusions(intrusions, ip_intrusions)
示例#18
0
def multipleScatterKuhn(x, dt):
    # use the method from Kuhn paper

    if Detector.getMaterial(x[0],x[1],x[2])=='air':
        return np.zeros(6)


    p = x[3:]
    theta = getScatterAngleKuhn(x, dt)

    if theta==-1:
        return multipleScatterPDG(x,dt)

    vx = getNormVector(p)
    
    # deflection in momentum
    defl = np.linalg.norm(p) * (theta*vx)

    return np.append(np.zeros(3), defl)
示例#19
0
def traverseBField(t, x):
    # x is a 6-element vector (x,y,z,px,py,pz)
    # returns dx/dt
    #
    # if B is in Tesla, dt is in ns, p is in units of MeV/c,  then the basic eq is
    # dp/dt = (89.8755) Qv x B,
    
    
    dxdt = np.zeros(6)

    p = x[3:]
    magp = np.linalg.norm(p)
    E = np.sqrt(magp**2 + Params.m**2)
    v = p/E
    dxdt[:3] = v * 2.9979e-1

    B = Detector.getBField(x[0],x[1],x[2])

    dxdt[3:] = (89.8755) * Params.Q * np.cross(v,B)

    return dxdt
示例#20
0
def multipleScatterPDG(x, dt):
    # get the angles/displacements from above function and return the
    # net change in x=(x,y,z,px,py,pz)

    if Detector.getMaterial(x[0],x[1],x[2])=='air':
        return np.zeros(6)

    p = x[3:]

    vx = getNormVector(p)
    vy = np.cross(vx, p/np.linalg.norm(p))

    thetax, thetay, yx, yy = getScatterAnglePDG(x, dt)

    # transverse displacement
    disp = yx*vx + yy*vy
    
    # deflection in momentum
    defl = np.linalg.norm(p) * (thetax*vx + thetay*vy)

    return np.append(disp, defl)
示例#21
0
def traverseBField(t, x):
    # x is a 6-element vector (x,y,z,px,py,pz)
    # returns dx/dt
    #
    # if B is in Tesla, dt is in ns, p is in units of MeV/c,  then the basic eq is
    # dp/dt = (89.8755) Qv x B,
    
    
    dxdt = np.zeros(6)

    p = x[3:]
    magp = np.linalg.norm(p)
    E = np.sqrt(magp**2 + Params.m**2)
    v = p/E
    dxdt[:3] = v * 2.9979e-1

    B = Detector.getBField(x[0],x[1],x[2])

    dxdt[3:] = (89.8755) * Params.Q * np.cross(v,B)

    return dxdt
示例#22
0
def doEnergyLoss(x, dt):
    ## returns new x after losing proper amount of energy according to Bethe-Bloch

    p = x[3:]
    magp = np.linalg.norm(p)
    E = np.sqrt(magp**2+Params.m**2)
    gamma = E/Params.m
    beta = magp/E;
    me = 0.511;  #electron mass in MeV
    
    Wmax = 2*me*beta**2*gamma**2/(1+2*gamma*me/Params.m + (me/Params.m)**2)
    K = 0.307075  # in MeV cm^2/mol

    mat = Detector.getMaterial(x[0],x[1],x[2])
    Z,A,rho,X0 = Params.materials[mat]
    I,a,k,x0,x1,Cbar,delta0 = Params.dEdx_params[mat]

    I = I/1e6  ## convert from eV to MeV

    xp = np.log10(magp/Params.m)
    if xp>=x1:
        delta = 2*np.log(10)*xp - Cbar
    elif xp>=x0:
        delta = 2*np.log(10)*xp - Cbar + a*(x1-xp)**k
    else:
        delta = delta0*10**(2*(xp-x0))

    # mean energy loss in MeV/cm
    dEdx = K*rho*Params.Q**2*Z/A/beta**2*(0.5*np.log(2*me*beta**2*gamma**2*Wmax/I**2) - beta**2 - delta/2)

    dE = dEdx * beta*2.9979e1 * dt

    if dE>(E-Params.m):
        return np.zeros(6)

    newmagp = np.sqrt((E-dE)**2-Params.m**2)
    x[3:] = p*newmagp/magp

    return x
示例#23
0
def getKuhnScatteringParams(x, dt):

    mat = Detector.getMaterial(x[0],x[1],x[2])

    Z,A,rho,X0 = Params.materials[mat]

    z = abs(Params.Q)

    p = x[3:]
    magp = np.linalg.norm(p)
    v = p/np.sqrt(magp**2 + Params.m**2)
    beta = np.linalg.norm(v)

    ds = beta * 2.9979e1 * dt

    Xc = np.sqrt(0.1569 * z**2 * Z*(Z+1) * rho * ds / (magp**2 * beta**2 * A))
    b = np.log(6700*z**2*Z**(1./3)*(Z+1)*rho*ds/A / (beta**2+1.77e-4*z**2*Z**2))

    if b<3:
        if not Params.MSCWarning:
            print "Warning: something (probably Q) is too small! Using PDG MSC algorithm."
            Params.MSCWarning = True
        return -1,-1

    ## we want to solve the equation B-log(B) = b. Using Newton-Raphson

    B = b
    prevB = 2*B
    
    f = lambda x: x-np.log(x)-b
    fp = lambda x: 1-1./x

    while abs((B-prevB)/prevB)>0.001:
        prevB = B
        B = B - f(B)/fp(B)

        
    # use B+1 for correction at intermediate angles
    return Xc, B+1
def EyeDetection(account):
    global MY_IP, MY_PORT
    sleep = False
    counter = 0

    # AWS Public Key
    con = Connector.Connector(ip=MY_IP,
                              port=MY_PORT,
                              method=Connector.CONNECTED_TCP)
    print("Connected to " + MY_IP + ':' + str(MY_PORT))

    try:
        print("start")
        eye = det.Eye(
            cascade_path=
            "./opencv/haarcascades/haarcascade_eye_tree_eyeglasses.xml")
        for frame in eye.getFrame():
            eye.analyze(frame)

            percent = float(eye.sleepPercentage())

            if percent > 0.8: counter += 1
            else: counter = 0

            if counter > 15:
                sleep = True
                # This statement just test statement
            elif counter == 0:
                sleep = False

            if DataProcessing(con, sleep, account) == -1:
                return -1

    except KeyboardInterrupt:
        print("end")
        return 0
示例#25
0
    clip.write_videofile(mp4_path)
    cap = cv2.VideoCapture(mp4_path)
    framen = 0
    while (cap.isOpened()):
        ret, frame = cap.read()
        if ret == False:
            break
        cv2.imwrite(os.path.join(test_path, str(framen) + ".jpg"), frame)
        framen += 1
    cap.release()
    cv2.destroyAllWindows()

results = os.path.join(os.getcwd(), "Data", "Source_Images",
                       "Test_Image_Detection_Results")
if len(os.listdir(results)) == 0:
    Detector.detect()

nFrames = len(os.listdir(results))
duration = clip.duration
fps = nFrames / duration

resultVid = os.path.join(os.getcwd(), "results.mp4")
tovid.conv(results, resultVid, fps)

import csv
with open(os.path.join(os.getcwd(), "Detection_Results.csv")) as csvfile:
    readCSV = csv.reader(csvfile, delimiter=',')

    rows = []
    skip = True  #skip the header row
    for row in readCSV:
示例#26
0
def runModel(expDir,
             bandID,
             hwpIndex=9,
             lensIP=.0004,
             theta=0.1308,
             writeFile=False,
             printChain=False):
    """ Gets A4 for specified experiment
    
    Parameters
    -------
    expDir : string
        Path to folder containing `channels.txt`, `camera.txt`, and `opticalChain.txt.`
    bandID : int (1 or 2)
        Frequency number for specified experiment
    hwpIndex : int
        Index for HWP to be placed at
    lensIP : float
        IP of lenses in telescope
    theta : float [rad]
        Incident angle (Small aperture)
    writeFile : bool
        If optical power file should be printed
        
    Returns
    --------
    det : Detector
        Detector object used for experiment
    elements : list of OpticalElements
        Optical chain created for experiment
    powOnDetector : float [pW]
        Polarized Power incident on detector
    powCMB : float [Kcmb]
        Equivalent power in Kcmb at the start of the telescope
    """
    channelFile = expDir + "channels.txt"
    cameraFile = expDir + "camera.txt"
    opticsFile = expDir + "opticalChain.txt"
    atmFile = "Atacama_1000um_60deg.txt"

    outputString = ""

    #Imports detector data
    det = dt.Detector(channelFile, cameraFile, bandID)
    """
        CREATES OPTICAL CHAIN
    """

    elements = []  #List of optical elements

    #CMB Element
    e = opt.OpticalElement()
    e.load("CMB", 2.725, 1)
    elements.append(e)

    #Atmosphere Element
    e = opt.OpticalElement()
    e.loadAtm(atmFile, det)
    elements.append(e)

    #Telescope elements
    elements += opt.loadOpticalChain(opticsFile,
                                     det,
                                     lensIP=lensIP,
                                     theta=theta)

    #Detector Element
    e = opt.OpticalElement()
    e.load("Detector", det.bath_temp, 1 - det.det_eff)
    elements.append(e)

    #Gets HWP index

    try:
        hwpIndex = [e.name for e in elements].index("HWP")
    except:
        e = opt.OpticalElement()
        e.load("HWP", elements[hwpIndex - 1].temp, 0)
        elements.insert(hwpIndex, e)

    #Inserts HWP at desired position
    # hwpIndex = 9      #-----SO
    # hwpIndex = 10        #-----Ebex
    # hwpIndex = 3         #-----pb

    freqs, UPspecs, UPout, PPout = ps.A4Prop(elements, det, hwpIndex)

    incPow = map(lambda x: th.powFromSpec(freqs, x), UPspecs)

    pW_per_Kcmb = th.dPdT(elements, det) * pW
    effs = [e.Eff(det.band_center) for e in elements[1:]]
    #    effs.insert(0, .979985868865*.9991602)
    #    print effs
    cumEff = reduce(lambda x, y: x * y, effs)
    print cumEff
    #######################################################
    ## Print table
    #######################################################
    outputString += "bandID: %d \t freq: %.2f GHz\n" % (det.bid,
                                                        det.band_center / GHz)
    outputString += "Name\t\t\tIncident UP(pW)\t\tUP Output (pW) \t\tPP Output (pW)\n"
    outputString += "-" * 70 + "\n"

    for i in range(len(elements)):
        outputString += "%-8s\t\t%e\t\t%e\t\t%e\n" % (
            elements[i].name, incPow[i] * pW, UPout[i] * pW, PPout[i] * pW)

    outputString += "\n%e pW / Kcmb\n" % pW_per_Kcmb
    outputString += "Telescope Efficiency: %e" % (cumEff)
    outputString += "\nFinal output up:\t%e pW \t %e Kcmb\n" % (
        sum(UPout) * pW, sum(UPout) * pW / pW_per_Kcmb)
    outputString += "Final output pp:\t%e pW \t %e Kcmb\n" % (
        sum(PPout) * pW, sum(PPout) * pW / pW_per_Kcmb)
    if printChain:
        print outputString

    if writeFile:
        fname = expDir + "%dGHz_opticalPowerTable.txt" % (det.band_center /
                                                          GHz)
        f = open(fname, 'w')
        f.write(outputString)
        f.close()
    return det, elements, sum(PPout) * pW, sum(PPout) * pW / pW_per_Kcmb
示例#27
0
from CrossSection import dSigdEr
import Detector
import numpy as np
from matplotlib import pyplot as plt

E_thr = np.linspace(0., 10., 200)
E_nu = np.linspace(0.001, 10., 1000)
E_R = np.linspace(0.001, 10., 1000)
dE_nu = E_nu[2] - E_nu[1]

reactor = ReactorSpectra.ReactorSpectra()
reactor.reactorPower = 3.e9
reactor.ComputeFullSpectrum(E_nu)
print(sum(reactor.fullSpectrum) * dE_nu)

detector = Detector.ArgonDetector()
detector.distance = 25.
detector.ComputeSolidAngleFraction()
print(detector.fluxFactor)

##################################################################
# Plot the spectrum of a roughly accurate combination of
# P239, P241, and U235, using the expoential 5th-order polynomial
# parameterization
plt.figure(1)
plt.plot(E_nu, reactor.fullSpectrum, '-k')
plt.axis([0.01, 10., 1.e14, 1.e22])
plt.yscale('log')
plt.xlabel('Neutrino energy (MeV)')
plt.ylabel('Neutrinos/MeV')
示例#28
0
    def recognize(self):
        frame_number = 0

        # loop over frames from the video file stream
        while True:
            # grab the next frame
            (grabbed, frame) = self.stream.read()
            # if the frame was not grabbed, then we have reached the end of the stream
            if not grabbed:
                break

            # convert the input frame from BGR to RGB then resize it to have a height of 720px (to speedup processing)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            rgb = imutils.resize(rgb, height=720)

            frame_copy = rgb.copy()

            face_list, M = Detector.detect_face(rgb)

            boxes = []
            for i in range(len(face_list)):
                top = face_list[i]['top']
                bottom = face_list[i]['bottom']
                left = face_list[i]['left']
                right = face_list[i]['right']
                boxes.append((top, right, bottom, left))
                face_list[i]['frame'] = frame_number

                # align the face with M
                alignedFace = None
                try:
                    rgb_aligned = cv2.warpAffine(
                        frame_copy, M[i],
                        (frame_copy.shape[1], frame_copy.shape[0]))
                    alignedFace = rgb_aligned[top:bottom, left:right, :]
                except:
                    print("align error")
                try:
                    frame_copy[top:bottom, left:right, :] = alignedFace
                except:
                    print("size difference")

            face_encodings = face_recognition.face_encodings(frame_copy, boxes)

            for i, encoding in enumerate(face_encodings):
                # attempt to match each face in the input image to our known encodings
                matches = face_recognition.compare_faces(
                    self.data['encodings'], encoding, tolerance=0.4)

                # check to see if we have found a match
                if True in matches:
                    face_list[i]['mosaic'] = False

            self.detected_faces.extend(face_list)

            print("[Recog_INFO] Processing image {}".format(frame_number + 1),
                  "/", self.frame_length)
            frame_number += 1
        print("[Recog_INFO] serializing encodings...")
        data = self.detected_faces
        f = open("detected_faces.pickle", "wb")
        f.write(pickle.dumps(data))
        f.close()

        return self.detected_faces
示例#29
0
center = rp.centerOfDetector
distToDetect = np.linalg.norm(center)
normToDetect = center/distToDetect

detV = np.array([0., 1., 0.])
detW = np.cross(normToDetect, detV)

detWidth = rp.detWidth
detHeight = rp.detHeight
detDepth = rp.detDepth

detectorDict = {"norm":normToDetect, "dist":distToDetect, "v":detV, 
            "w":detW, "width":detWidth, "height":detHeight, "depth":detDepth}

# the four corners (only for drawing)
c1,c2,c3,c4 = Detector.getDetectorCorners(detectorDict)

intersects = []
ntotaltrajs = 0

mt = MilliTree()

if mode=="STATS":
    # if file already exists, check if we want to overwrite or append
    if os.path.isfile(outname):
        ow = 'q'
        while ow not in 'yYnN':
            ow = raw_input("Overwrite file? (y/n) ")
        if ow in 'yY':
            txtfile = open(outname,'w')
            if rp.useCustomOutput:
示例#30
0
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)

displayCnt = None

for c in cnts:
	#Aproximate the contour
	peri = cv2.arcLength(c, True)
	approx = cv2.approxPolyDP(c, 0.02*peri, True)

	if(len(approx) == 4):
		displayCnt = approx
		break

#Sudoku Extraído

warped = four_point_transform(gray, displayCnt.reshape(4,2))
blur = cv2.GaussianBlur(warped, (5,5), 0)

#Vai receber um vetor com as coordenadas Y das linhas
lines = Func.getLines(blur)
	
if(len(lines) == 9):
	sudoku = Func.getNumbers(lines)

sudokuList = Detector.defineNumbers(sudoku)
Solver.solve(sudokuList)

cv2.waitKey(0)
cv2.destroyAllWindows()
# Demo 3, convert the trained classifier into java format

import Detector
import Util

print 'Start conversion.'

cascadeClassifier = Detector.getCascadeClassifierFromFile(Util.DEFAULT_JSON_FILE)
Util.convertJsonFromPythonToJava(cascadeClassifier)

print 'Completed the conversion, please check file ', Util.DEFAULT_JSON_FILE_FOR_JAVA
示例#32
0
Params.BFieldType = 'cms'
Params.Q = 1.0
Params.MSCtype = 'none'
Params.EnergyLossOn = False
Params.Interpolate = True
Params.UseFineBField = False

z = 0
phi = 0
Bdl=0
Bdlvals = [0]
dr = 0.5
rvals = np.arange(0,901,dr)
for r in rvals:

    igd = Detector.getBField(r/100.0,0,0)[2]*dr/100 * 0.3/20 * 180/np.pi
    Bdl += igd
    Bdlvals.append(Bdl)

print "\nPredicted deflection for 20 GeV particle:",Bdl,"deg\n"

plt.plot(rvals,Bdlvals[:-1],'-b', linewidth=2, label=r"$\int$ $Bdl$ straight line")


x0 = np.array([0,0,0,20000,0,0])
dt = 0.2
nsteps = 1000
traj = Integrator.rk4(x0,Integrator.traverseBField,dt,nsteps,cutoff=9,cutoffaxis=3)

#trajRvals = traj[0,:]*100
trajRvals = np.linalg.norm(traj[:3,:],axis=0)*100
示例#33
0
def f2Model(expDir, hwpi=9, writeFile=False):
    channelFile = expDir + "channels.txt"
    cameraFile = expDir + "camera.txt"
    opticsFile = expDir + "opticalChain.txt"
    atmFile = "Atacama_1000um_60deg.txt"

    outFile = expDir + "2f_out.txt"

    outString = ""
    outString += "bid\tf\tHWP_f\ta2 Ave\t\tA2\t\t\tA2\n"
    outString += "[]\t[GHz]\t[GHz]\t[]\t\t[pW]\t\t[Kcmb]\n"
    outString += "-" * 40 + "\n"

    band_centers = []
    a2s = []
    A2_pW = []
    A2_K = []
    for bandID in [1, 2]:

        #Imports detector data
        det = dt.Detector(channelFile, cameraFile, bandID)

        elements = []  #List of optical elements

        #CMB optical element
        e = opt.OpticalElement()
        e.load("CMB", 2.725, 1)
        elements.append(e)

        e = opt.OpticalElement()
        e.loadAtm(atmFile, det)
        elements.append(e)

        # Loads elements from Optical Chain file
        elements += opt.loadOpticalChain(opticsFile, det)

        e = opt.OpticalElement()
        e.load("Detector", det.bath_temp, 1 - det.det_eff)
        elements.append(e)

        # Checks if HWP is already in Optical chain.
        # If not, inserts it at index specified.
        try:
            hwpIndex = [e.name for e in elements].index("WP")
        except ValueError:
            hwpIndex = hwpi
            e = opt.OpticalElement()
            e.load("HWP", elements[-1].temp, 0)
            elements.insert(hwpIndex, e)

        #Inserts HWP at desired position
        # hwpIndex = 9      #-----SO
        # hwpIndex = 10        #-----Ebex
        # hwpIndex = 3         #-----pb

        ## Get closest hwp frequency to the band center
        bc = det.band_center / GHz
        posFreqs = [30, 40, 90, 150, 220, 230, 280]
        hwpFreq = reduce(
            lambda x, y: (x if (abs(x - bc) < abs(y - bc)) else y), posFreqs)

        incAngle = 8
        # Import mueller data file
        muellerDir = "Mueller_AR/"

        muellerFile = muellerDir + "Mueller_V2_nu%.1f_no3p068_ne3p402_ARcoat_thetain%.1f.txt" % (
            hwpFreq, incAngle)
        print "reading from:  \t %s" % muellerFile

        f, r = np.loadtxt(muellerFile,
                          dtype=np.float,
                          unpack=True,
                          usecols=[0, 2])

        #Interpolates a2 from data
        rho = interpolate.interp1d(f, r, kind="linear")
        x = np.linspace(det.flo, det.fhi, 400)
        y = rho(x)

        #Saves plot of rho (or a2) in bandwidth
        if writeFile:
            plt.plot(x / GHz, y)
            plt.savefig(expDir + "%.1fGHz_a2.pdf" % (det.band_center / GHz))
            plt.clf()

        # Gets average a2 value
        a2Ave = abs(intg.simps(y, x=x) / (det.fhi - det.flo))

        #Inserts HWP at hwpIndex
        # elements.insert(hwpIndex, opt.OpticalElement("HWP", elements[hwpIndex - 1].temp, 0, 1))
        elements.insert(hwpIndex, e)

        pW_per_Kcmb = th.dPdT(elements, det) * pW

        freqs, UPspecs, _, _ = ps.A4Prop(elements, det, hwpIndex)

        effs = lambda f: map(lambda x: x.Eff(f), elements[hwpIndex + 1:])
        cumEff = lambda f: reduce((lambda x, y: x * y), effs(f))

        # Incident power on the HWP
        hwpInc = UPspecs[hwpIndex]
        detIp = hwpInc * rho(freqs) * cumEff(freqs)
        # Polarized emission of HWP
        hwpEmis = th.weightedSpec(freqs, elements[hwpIndex].temp,
                                  elements[hwpIndex].pEmis) * cumEff(freqs)

        #2f power at the detector
        det2FPow = detIp + hwpEmis

        #Total A2 (W)
        A2 = abs(np.trapz(det2FPow, freqs))

        telEff = reduce((lambda x, y: x * y),
                        [e.Eff(det.band_center) for e in elements[2:]])
        print telEff
        band_centers.append(det.band_center / GHz)
        a2s.append(a2Ave)
        A2_pW.append(A2 * pW)
        A2_K.append(A2 * pW / pW_per_Kcmb)

        outString += "%d\t%.1f\t%.1f\t%.2e\t%.8e\t%.3f\n" % (
            det.bid, det.band_center / GHz, hwpFreq, a2Ave, A2 * pW / telEff,
            A2 * pW / pW_per_Kcmb)
    print outString

    if writeFile:

        f = open(outFile, 'w')
        f.write(outString)
        f.close()

    return band_centers, a2s, A2_pW, A2_K
示例#34
0
        fuzzer.setCertFuzz(vars.keyfile, vars.certfile)
    # WS-Sec
    if vars.wsseuser and vars.wssepass:
        fuzzer.setWSSEFuzz(vars.wsseuser, vars.wssepass)

    establishMethods(wrapper=wrapper, dictionary=vars.dictionary, automate=vars.automate, simultaneous=vars.simultaneous)
# if -h is used
elif (vars.target and not vars.wsdl and (not vars.endpoint and not vars.namespace)):
    vars.directory = genUtils.defineDirName(0)

    print "0) Basic Discovery (faster but less accurate)"
    print "1) Advanced Discovery (slower and more intrusive but more thorough and accurate)"
    print "2) Advanced Discovery (like #1) with port scanning first"
    probeChosen = input("\nProbe Type: ")

    detect = Detector.WSDLDetector()
    if (probeChosen == 0):
        detect.detect(vars.target, 'basic', 5)
    elif (probeChosen == 1):
        spiderChosen = raw_input("\nWould you like to Spider the target on top of the advanced probe: ")
        spiderChosen = spiderChosen.strip('\r')
        if y.search(spiderChosen):
            detect.detect(vars.target, 'advanced', 7, True)
        else:
            detect.detect(vars.target, 'advanced', 7)
    elif (probeChosen == 2):
        startPort = input("\nBeginning TCP port for scan: ")
        endPort = input("\nEnding TCP port for scan: ")
        if (startPort in range (0, 65534)) and (endPort in range (1, 65535)
                                                and startPort <= endPort):
            portScan = PortScanner.WrapPortScanner(vars.target, startPort, endPort)
示例#35
0
import numpy as np
import matplotlib.pyplot as plt
import ROOT
import Params
import Integrator
import Detector
import Drawing
from MilliTree import MilliTree
import run_params as rp

suffix = sys.argv[1]

outname = "../output_{0}".format(suffix)

# must run at the beginning of main script. Loads the B field map into memory
Detector.LoadCoarseBField("../bfield/bfield_coarse.pkl")

# turn on CMS magnetic field and PDG multiple scattering
Params.BFieldType = rp.BFieldType
Params.MSCtype = 'pdg'
Params.MatSetup = 'cms'
# turn on dE/dx energy loss (Bethe-Bloch)
Params.EnergyLossOn = True
# charge and mass of the particle. Q in units of electric charge, m in MeV
Params.Q = rp.particleQ
Params.m = rp.particleM
#suppress annoying warnings
Params.SuppressStoppedWarning = False
Params.RockBegins = rp.RockBegins

if rp.useCustomMaterialFunction:
示例#36
0
import Read_file
import Generate_model
import Detector
import PARAMETER
import os

if __name__ == '__main__':

    Read_file.allfiles(PARAMETER.PACKAGE_TRAIN)

    Generate_model.create_model()

    Read_file.allfiles(PARAMETER.PACKAGE_TEST)

    Detector.run()
示例#37
0
plt.rc('text', usetex=True)
plt.rc('font', size=15)

E_thr = np.linspace(0., 10., 200)
E_nu = np.linspace(0.0001, 10., 500)
E_R = np.linspace(0.0001, 8., 1000)
dE_R = E_R[2] - E_R[1]
dE_nu = E_nu[2] - E_nu[1]

reactor = ReactorSpectraVogel.ReactorSpectra()
reactor.reactorPower = 1.e9
reactor.ComputeFullSpectrum(E_nu)
print(sum(reactor.fullSpectrum) * dE_nu)

xedetector = Detector.XenonDetector()
ardetector = Detector.ArgonDetector()
gedetector = Detector.GeDetector()
pbdetector = Detector.PbWO4Detector()
aldetector = Detector.Al2O3Detector()
al_leu_detector = Detector.Al2O3Detector()
al_mox25_detector = Detector.Al2O3Detector()
bidetector = Detector.Bi4Ge3O12Detector()
xedetector.distance = 25.
gedetector.distance = 25.
ardetector.distance = 25.
pbdetector.distance = 25.
aldetector.distance = 25.
bidetector.distance = 25.
xedetector.ComputeSolidAngleFraction()
gedetector.ComputeSolidAngleFraction()
示例#38
0
for _ in range(FLAGS.training_iters):
    # time records 
    t0 = time.time()
    
    # get data and label from generator
    #batch_img, batch_joints = train_itr.next()
    
    #batch_img, batch_joints = sess.run([batch_img, batch_joints])
    batch_img, batch_joints = sess.run(next_ele)
    
    # normalize 
    batch_img = batch_img / 255 - 0.5
        
    # express label in the form of a heatmap
    batch_heatmap = dt.transform_joints_to_heatmap(FLAGS.image_size, FLAGS.hmap_size,
                                                        FLAGS.joint_gaussian_variance,
                                                        batch_joints)
    
    
    # go forward one step and prepare record
#     stage_loss, total_loss, op, cur_lr, stage_heatmap, global_step = sess.run([model.stage_loss, # loss of each stage
#               model.total_loss, # sum loss of all stages
#               model.train_op, # optimization, this must be done, otherwise the global step will not increment
#               #summary,# summary
#               model.cur_lr, # depreciating
#               model.stage_heatmap, # heatmaps of each stage
#               model.global_step # count steps
#               ],feed_dict={model.input_images: batch_img, model.gt_hmap_placeholder: batch_heatmap})
    stage_loss, total_loss, op, cur_lr, stage_heatmap, global_step = sess.run([model.stage_loss, # loss of each stage
              model.total_loss, # sum loss of all stages
              model.train_op, # optimization, this must be done, otherwise the global step will not increment
示例#39
0
def main(argv):

    
    """ Initial tracker
    """
    tracker = tracking_module.SelfTracker([FLAGS.webcam_height, FLAGS.webcam_width], FLAGS.input_size)

    """ Build network graph
    """
    model = cpm_model.CPM_Model(input_size=FLAGS.input_size,
                                heatmap_size=FLAGS.heatmap_size,
                                stages=FLAGS.cpm_stages,
                                joints=FLAGS.num_of_joints,
                                img_type=FLAGS.color_channel,
                                is_training=False)
    saver = tf.train.Saver()

    """ Get output node
    """
    output_node = tf.get_default_graph().get_tensor_by_name(name=FLAGS.output_node_names)

    device_count = {'GPU': 1} if FLAGS.use_gpu else {'GPU': 0}
    sess_config = tf.ConfigProto(device_count=device_count)
    sess_config.gpu_options.per_process_gpu_memory_fraction = 0.2
    sess_config.gpu_options.allow_growth = True
    sess_config.allow_soft_placement = True
    with tf.Session(config=sess_config) as sess:

        model_path_suffix = os.path.join(FLAGS.network_def,
                                         'input_{}_output_{}'.format(FLAGS.input_size, FLAGS.heatmap_size),
                                         'joints_{}'.format(FLAGS.num_of_joints),
                                         'stages_{}'.format(FLAGS.cpm_stages),
                                         'init_{}_rate_{}_step_{}'.format(FLAGS.init_lr, FLAGS.lr_decay_rate,
                                                                          FLAGS.lr_decay_step)
                                         )
        model_save_dir = os.path.join('models',
                                      'weights',
                                      model_path_suffix)
        print('Load model from [{}]'.format(os.path.join(model_save_dir, FLAGS.model_path)))

        # if the model is a pkl file, then load it, else just restore the pretrained cpm_hand by default
        # by here we can see the loading weight structure
        if FLAGS.model_path.endswith('pkl'):
            model.load_weights_from_file(FLAGS.model_path, sess, False)
        else:
            saver.restore(sess, 'models/weights/cpm_hand')

        # Check weights, this seems to be the part that print out weights
        for variable in tf.global_variables():
            with tf.variable_scope('', reuse=True):
                var = tf.get_variable(variable.name.split(':0')[0])
                print(variable.name, np.mean(sess.run(var)))

        # Create webcam instance
        if FLAGS.DEMO_TYPE in ['MULTI', 'SINGLE', 'Joint_HM']:
            print("REFUSEREFUSE\n\n\n\n\n\n\n\nrefule")
            cam = cv2.VideoCapture(FLAGS.cam_id)

        # Create kalman filters
        if FLAGS.use_kalman:
            kalman_filter_array = [cv2.KalmanFilter(4, 2) for _ in range(FLAGS.num_of_joints)]
            for _, joint_kalman_filter in enumerate(kalman_filter_array):
                joint_kalman_filter.transitionMatrix = np.array(
                    [[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]],
                    np.float32)
                joint_kalman_filter.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)
                joint_kalman_filter.processNoiseCov = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
                                                               np.float32) * FLAGS.kalman_noise
        else:
            kalman_filter_array = None

        if FLAGS.DEMO_TYPE.endswith(('png', 'jpg')):
            print("TESTINGTESTING\n\n\n\n\n\n\n\nrTESGING")
            test_img = cpm_utils.read_image(FLAGS.DEMO_TYPE, [], FLAGS.input_size, 'IMAGE')

            #fixme: I haven't seen how it cut the box, I have put the coordinates here, but not using them currently
            b_box, b_height, b_width = dt.bounding_box_from_file(FLAGS.DEMO_TYPE)
            b_box = b_box[0][0]
            bb_box = dt.normalize_and_centralize_img(b_box[0], b_box[1], b_box[2], b_box[3], 10, b_height, b_width)\
            #fixme: this is the end of my code
            test_img_resize = cv2.resize(test_img, (FLAGS.input_size, FLAGS.input_size))

            test_img_input = normalize_and_centralize_img(test_img_resize)

            t1 = time.time()

            #current_heatmap is the output after going through the last layer
            #input_images is a placeholder matrix for iiamge size
            # FIXME: I don't know what exactly does this sess run with? the two arguments?
            #predict_heatmap.shape = (1,32,32,22) 
            #stage_heatmap.shape = (1,32,32,22), it seems like, for stage_heatmap, each of the 1*32*32 is something same, a 22 length-array
            predict_heatmap, stage_heatmap_np = sess.run([model.current_heatmap,
                                                          output_node,
                                                          ],
                                                         feed_dict={model.input_images: test_img_input}
                                                         )
            #frame per second
            print('fps: %.2f' % (1 / (time.time() - t1)))

            print(stage_heatmap_np[0], "stage heatmap")
            print(stage_heatmap_np[0].shape)
            #print(predict_heatmap, "predict heatmap")
            #print(predict_heatmap.shape)

            tmp_img = cv2.resize(stage_heatmap_np[0], (FLAGS.input_size, FLAGS.input_size))
            print(tmp_img, "tmp_img")
            print(tmp_img.shape)

            correct_and_draw_hand(test_img,
                                  cv2.resize(stage_heatmap_np[0], (FLAGS.input_size, FLAGS.input_size)),
                                  kalman_filter_array, tracker, tracker.input_crop_ratio, test_img)

            # Show visualized image
            #demo_img = visualize_result(test_img, stage_heatmap_np, kalman_filter_array, tracker, tracker.input_crop_ratio, test_img.copy())
            # local_img = visualize_result(full_img, stage_heatmap_np, kalman_filter_array, tracker, crop_full_scale, test_img_copy)
            cv2.imshow('demo_img', test_img.astype(np.uint8))
            cv2.waitKey(0)

        elif FLAGS.DEMO_TYPE in ['SINGLE', 'MULTI']:
            print("SINGLE\n\n\n\n\n\n\nMULTI")
            while True:
                # # Prepare input image
                _, full_img = cam.read()

                # then I kow the misterious joint_detection comes from here! but now it's only zeros???
                test_img = tracker.tracking_by_joints(full_img, joint_detections=joint_detections)
                crop_full_scale = tracker.input_crop_ratio
                test_img_copy = test_img.copy()

                # White balance
                test_img_wb = utils.img_white_balance(test_img, 5)
                test_img_input = normalize_and_centralize_img(test_img_wb)

                # Inference
                t1 = time.time()
                stage_heatmap_np = sess.run([output_node],
                                            feed_dict={model.input_images: test_img_input})
                print('FPS: %.2f' % (1 / (time.time() - t1)))

                local_img = visualize_result(full_img, stage_heatmap_np, kalman_filter_array, tracker, crop_full_scale,
                                             test_img_copy)

                cv2.imshow('local_img', local_img.astype(np.uint8))
                cv2.imshow('global_img', full_img.astype(np.uint8))
                if cv2.waitKey(1) == ord('q'): break


        elif FLAGS.DEMO_TYPE == 'Joint_HM':
            print("JOINTHM\n\n\n\n\n\n\nJOINTHM")
            while True:
                # Prepare input image
                test_img = cpm_utils.read_image([], cam, FLAGS.input_size, 'WEBCAM')
                test_img_resize = cv2.resize(test_img, (FLAGS.input_size, FLAGS.input_size))

                test_img_input = normalize_and_centralize_img(test_img_resize)

                # Inference
                t1 = time.time()
                stage_heatmap_np = sess.run([output_node],
                                            feed_dict={model.input_images: test_img_input})
                print('FPS: %.2f' % (1 / (time.time() - t1)))

                demo_stage_heatmap = stage_heatmap_np[len(stage_heatmap_np) - 1][0, :, :,
                                     0:FLAGS.num_of_joints].reshape(
                    (FLAGS.heatmap_size, FLAGS.heatmap_size, FLAGS.num_of_joints))
                demo_stage_heatmap = cv2.resize(demo_stage_heatmap, (FLAGS.input_size, FLAGS.input_size))

                vertical_imgs = []
                tmp_img = None
                joint_coord_set = np.zeros((FLAGS.num_of_joints, 2))

                for joint_num in range(FLAGS.num_of_joints):
                    # Concat until 4 img
                    if (joint_num % 4) == 0 and joint_num != 0:
                        vertical_imgs.append(tmp_img)
                        tmp_img = None

                    demo_stage_heatmap[:, :, joint_num] *= (255 / np.max(demo_stage_heatmap[:, :, joint_num]))

                    # Plot color joints
                    if np.min(demo_stage_heatmap[:, :, joint_num]) > -50:
                        joint_coord = np.unravel_index(np.argmax(demo_stage_heatmap[:, :, joint_num]),
                                                       (FLAGS.input_size, FLAGS.input_size))
                        joint_coord_set[joint_num, :] = joint_coord
                        color_code_num = (joint_num // 4)

                        if joint_num in [0, 4, 8, 12, 16]:
                            joint_color = list(
                                map(lambda x: x + 35 * (joint_num % 4), FLAGS.joint_color_code[color_code_num]))
                            cv2.circle(test_img, center=(joint_coord[1], joint_coord[0]), radius=3, color=joint_color,
                                       thickness=-1)
                        else:
                            joint_color = list(
                                map(lambda x: x + 35 * (joint_num % 4), FLAGS.joint_color_code[color_code_num]))
                            cv2.circle(test_img, center=(joint_coord[1], joint_coord[0]), radius=3, color=joint_color,
                                       thickness=-1)

                    # Put text
                    tmp = demo_stage_heatmap[:, :, joint_num].astype(np.uint8)
                    tmp = cv2.putText(tmp, 'Min:' + str(np.min(demo_stage_heatmap[:, :, joint_num])),
                                      org=(5, 20), fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.3, color=150)
                    tmp = cv2.putText(tmp, 'Mean:' + str(np.mean(demo_stage_heatmap[:, :, joint_num])),
                                      org=(5, 30), fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.3, color=150)
                    tmp_img = np.concatenate((tmp_img, tmp), axis=0) \
                        if tmp_img is not None else tmp

                # Plot FLAGS.limbs
                for limb_num in range(len(FLAGS.limbs)):
                    if np.min(demo_stage_heatmap[:, :, FLAGS.limbs[limb_num][0]]) > -2000 and np.min(
                            demo_stage_heatmap[:, :, FLAGS.limbs[limb_num][1]]) > -2000:
                        x1 = joint_coord_set[FLAGS.limbs[limb_num][0], 0]
                        y1 = joint_coord_set[FLAGS.limbs[limb_num][0], 1]
                        x2 = joint_coord_set[FLAGS.limbs[limb_num][1], 0]
                        y2 = joint_coord_set[FLAGS.limbs[limb_num][1], 1]
                        length = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
                        if length < 10000 and length > 5:
                            deg = math.degrees(math.atan2(x1 - x2, y1 - y2))
                            polygon = cv2.ellipse2Poly((int((y1 + y2) / 2), int((x1 + x2) / 2)),
                                                       (int(length / 2), 3),
                                                       int(deg),
                                                       0, 360, 1)
                            color_code_num = limb_num // 4
                            limb_color = list(
                                map(lambda x: x + 35 * (limb_num % 4), FLAGS.joint_color_code[color_code_num]))

                            cv2.fillConvexPoly(test_img, polygon, color=limb_color)

                if tmp_img is not None:
                    tmp_img = np.lib.pad(tmp_img, ((0, vertical_imgs[0].shape[0] - tmp_img.shape[0]), (0, 0)),
                                         'constant', constant_values=(0, 0))
                    vertical_imgs.append(tmp_img)

                # Concat horizontally
                output_img = None
                for col in range(len(vertical_imgs)):
                    output_img = np.concatenate((output_img, vertical_imgs[col]), axis=1) if output_img is not None else \
                        vertical_imgs[col]

                output_img = output_img.astype(np.uint8)
                output_img = cv2.applyColorMap(output_img, cv2.COLORMAP_JET)
                test_img = cv2.resize(test_img, (300, 300), cv2.INTER_LANCZOS4)
                cv2.imshow('hm', output_img)
                cv2.moveWindow('hm', 2000, 200)
                cv2.imshow('rgb', test_img)
                cv2.moveWindow('rgb', 2000, 750)
                if cv2.waitKey(1) == ord('q'): break
示例#40
0
import Detector
import numpy as np
from matplotlib import pyplot as plt
from InelasticAnalysisLib import RateVsEnergy

E_thr = np.linspace(0., 10., 200)
E_nu = np.linspace(0.01, 10., 500)
E_R = np.linspace(0.00000000001, 10., 500)
dE_nu = E_nu[2] - E_nu[1]

reactor = ReactorSpectra.ReactorSpectra()
reactor.reactorPower = 3.e9
reactor.ComputeFullSpectrum(E_nu)
print(sum(reactor.fullSpectrum) * dE_nu)

xedetector = Detector.XenonDetector()
ardetector = Detector.ArgonDetector()
gedetector = Detector.GeDetector()
xedetector.distance = 25.
gedetector.distance = 25.
ardetector.distance = 25.
xedetector.ComputeSolidAngleFraction()
gedetector.ComputeSolidAngleFraction()
ardetector.ComputeSolidAngleFraction()
print(xedetector.fluxFactor)

##################################################################
# Plot the spectrum of a roughly accurate combination of
# P239, P241, and U235, using the expoential 5th-order polynomial
# parameterization
print('Plotting reactor neutrino spectrum...')