コード例 #1
0
    def __init__(self, ne, ngq, method):
        logger.debug('Read a sparse matrix NetCDF file')

        self.ne = ne
        self.ngq = ngq
        self.method = method
        
        fname = __file__.split('/')[-1]
        fdir = __file__.rstrip(fname)

        if method.upper() == 'AVG':
            # Average the boundary of elements for the Spectral Element Method
            spmat_fpath = fdir + 'spmat_avg_ne%dngq%d.nc'%(ne, ngq)

        elif method.upper() == 'COPY':
            # Copy from UP to EPs at the boundary of elements
            spmat_fpath = fdir + 'spmat_copy_ne%dngq%d.nc'%(ne, ngq)

        elif method.upper() == 'IMPVIS':
            # Implicit Viscosity
            # High-Order Elliptic Filter
            spmat_fpath = fdir + 'spmat_impvis_ne%dngq%d.nc'%(ne, ngq)

        else:
            raise ValueError, "The method must be one of 'AVG', 'COPY', 'IMPVIS'"

        self.ncf = ncf = nc.Dataset(spmat_fpath, 'r', format='NETCDF4')
        self.dsts = ncf.variables['dsts'][:]
        self.srcs = ncf.variables['srcs'][:]
        self.wgts = ncf.variables['weights'][:]
コード例 #2
0
 def send(self, params):
     result = HTTPsClient(self.__url).send(params)
     # 直接输出字典会有中文乱码的情况
     # 所以使用json库中的dumps方法
     logger.debug(json.dumps(params, ensure_ascii=False))
     logger.debug(json.dumps(result, ensure_ascii=False))
     return result
コード例 #3
0
    def extract_local_sparse_matrix(self, target_rank):
        logger.debug('Extract local sparse matrix for rank%d'%target_rank)
        
        t_rank = target_rank
        dsts = self.dsts
        srcs = self.srcs
        wgts = self.wgts
        rank_dsts = self.rank_dsts
        rank_srcs = self.rank_srcs


        t_rank_dsts = (rank_dsts == t_rank)   # bool type array
        t_rank_srcs = (rank_srcs == t_rank)

        local_idxs = np.where( t_rank_dsts * t_rank_srcs )[0]
        send_idxs = np.where( np.invert(t_rank_dsts) * t_rank_srcs )[0]
        recv_idxs = np.where( t_rank_dsts * np.invert(t_rank_srcs) )[0]

        arr_dict = dict()
        arr_dict['spmat_size'] = self.spmat_size

        arr_dict['local_dsts'] = dsts[local_idxs]
        arr_dict['local_srcs'] = srcs[local_idxs]
        arr_dict['local_wgts'] = wgts[local_idxs]

        arr_dict['send_ranks'] = rank_dsts[send_idxs]
        arr_dict['send_dsts'] = dsts[send_idxs]
        arr_dict['send_srcs'] = srcs[send_idxs]
        arr_dict['send_wgts'] = wgts[send_idxs]

        arr_dict['recv_ranks'] = rank_srcs[recv_idxs]
        arr_dict['recv_dsts'] = dsts[recv_idxs]

        return arr_dict
コード例 #4
0
    def __init__(self, myrank, spfile, ranks, lids):
        # Classify Destination, source, weight from the sparse matrix
        logger.debug('Classify the meta indices grouped for rank%d'%myrank)

        dsts = spfile.dsts
        srcs = spfile.srcs
        wgts = spfile.wgts

        rank_dsts = ranks[dsts]                 # rank number of destinations
        rank_srcs = ranks[srcs]                 # rank number of sources
        myrank_dsts = (rank_dsts == myrank)     # bool type array
        myrank_srcs = (rank_srcs == myrank)

        local_idxs = np.where( myrank_dsts * myrank_srcs )[0]
        send_idxs = np.where( np.invert(myrank_dsts) * myrank_srcs )[0]
        recv_idxs = np.where( myrank_dsts * np.invert(myrank_srcs) )[0]

        dsw_list = [(dsts[i],srcs[i],wgts[i]) for i in local_idxs]
        rdsw_list = [(rank_dsts[i],dsts[i],srcs[i],wgts[i]) for i in send_idxs]
        rds_list = [(rank_srcs[i],dsts[i],srcs[i]) for i in recv_idxs]


        # packaging variables
        self.data_dict = data_dict = dict()

        data_dict['myrank'] = myrank
        data_dict['lids'] = lids
        data_dict['dsw_list'] = dsw_list
        data_dict['rdsw_list'] = rdsw_list
        data_dict['rds_list'] = rds_list
コード例 #5
0
    def read_sparse_matrix(self):
        logger.debug('Read a NetCDF file as sparse matrix')

        ne = self.ne
        ngq = self.ngq
        method = self.method
        ranks = self.ranks

        if method.upper() == 'AVG':
            # Average the boundary of elements for the Spectral Element Method
            spmat_fpath = fdir + 'spmat_avg_ne%dngq%d.nc'%(ne, ngq)

        elif method.upper() == 'COPY':
            # Copy from UP to EPs at the boundary of elements
            spmat_fpath = fdir + 'spmat_copy_ne%dngq%d.nc'%(ne, ngq)

        elif method.upper() == 'IMPVIS':
            # Implicit Viscosity
            # High-Order Elliptic Filter
            spmat_fpath = fdir + 'spmat_impvis_ne%dngq%d.nc'%(ne, ngq)

        else:
            raise ValueError, "The method must be one of 'AVG', 'COPY', 'IMPVIS'"

        spmat_ncf = nc.Dataset(spmat_fpath, 'r', format='NETCDF4')
        self.spmat_size = len( spmat_ncf.dimensions['spmat_size'] )
        self.dsts = spmat_ncf.variables['dsts'][:]
        self.srcs = spmat_ncf.variables['srcs'][:]
        self.wgts = spmat_ncf.variables['weights'][:]

        self.rank_dsts = ranks[self.dsts]   # rank number of destinations
        self.rank_srcs = ranks[self.srcs]   # rank number of sources
コード例 #6
0
ファイル: box_door.py プロジェクト: qybing/deep_sort_yolov3
def is_near_door(center_mass, door):
    '''
    Args:
        center_mass: (list) 人体矩形坐标
        door: (list) 门的坐标
    Returns:
        False or True 是否靠近门
    '''
    door_h_half = int(int((door[1] + door[3]) / 2) * DOOR_HIGH)
    near_door = True
    for key, value in center_mass.items():
        # tlwh1 = np.asarray(value[-1], dtype=np.float)
        tlwh1 = np.asarray(value, dtype=np.float)
        x2, y2 = tlwh1[0:2]
        x3, y3 = tlwh1[2:]
        if door[1] < y3 < door[3] and door[0] < x3 < door[
                2] or y2 <= door_h_half:
            near_door = True
            break
        else:
            near_door = False
    if near_door:
        logger.debug('person near door')
    else:
        logger.debug('person away door')
    return near_door
コード例 #7
0
ファイル: sms_service_handler.py プロジェクト: tigerinsky/sms
    def send_sms(self, request):
        logger.info('send sms, request[%s]' % request)
        if request.mobile == '' or request.content == '':
            logger.warning('param error request[%s]' % request)
            return PARAM_ERROR

        param = self.__build_param(request)
        result = http.request(SEND_SMS_URL, param, 'GET', 10)
        if not result:
            return HTTP_REQUEST_ERROR

        logger.debug('result:%s' % result)
        root = None
        sid = request.sid
        errno = 0
        task_id = ''
        valid = 0
        dbstatus = 0
        try:
            root = ET.fromstring(result)
            status = root.find('returnstatus').text
            if status != 'Success':
                msg = root.find('message').text
                logger.warning('send failed, msg[%s]' % msg)
                errno =  SEND_FAILED
            else:
                task_id = root.find('taskID').text
                dbstatus = 1
                valid = 1
        except xml.etree.ElementTree.ParseError:
            logger.warning("invalid result[%s] request[%s]" % (result, request))
            errno = THIRD_SERVER_ERROR
        except Exception, e:
            logger.warning('xml parse exception,ult[%s] request[%s]' % (result, request))
コード例 #8
0
ファイル: decorator.py プロジェクト: luoqingfu/ui_checker
 def wrapper(*args, **kwargs):
     t0 = time.time()
     parent_action = inspect.stack()[1][4][0].strip()
     back = func(*args, **kwargs)
     _exec_time = time.time() - t0
     logger.debug(parent_action + '---' + func.__name__ + '---' +
                  str("%.3fs" % _exec_time))
     return back
コード例 #9
0
 def setUpClass(cls):
     logger.debug("setUp...")
     # self.driver = Chrome.normal()
     cls.driver = Chrome.headless()
     # cls.driver = webdriver.Firefox()
     cls.driver.implicitly_wait(10)
     cls.driver.maximize_window()
     page = LoginPage(cls.driver)
     page.login()
コード例 #10
0
ファイル: server.py プロジェクト: lixiangyuan98/dnsrelay
    def _handle(self, data: bytes, source_addr: tuple) -> None:
        """Handle the received data.

        Parse received data to DNS message, loop up the requested domain name
        in local database or foreign DNS server and send the result back to
        the user.

        Args:

            data: Received data.
            source_addr: Source host addr.
        """

        message = Message.from_bytes(data)
        question = message.Questions[0]
        if question is None:
            logger.error('{addr}Format error'.format(addr=source_addr))
            return
        if question.QTYPE == RR_type.A:
            rdata = self._cache.get(question.get_QNAME(), 'A')
            if rdata is not None:
                if rdata != b'\x00\x00\x00\x00':
                    logger.info('{addr}Found A of {name}'.format(
                        name=question.get_QNAME(), addr=source_addr))
                    header = message.Header
                    header.RA = b'\x01'
                    header.QDCOUNT = b'\x00\x01'
                    header.ANCOUNT = b'\x00\x01'
                    header.QR = b'\x01'
                    ttl = self._cache.get_ttl(question.get_QNAME()) \
                            if self._cache.get_ttl(question.get_QNAME()) != -1 else 0
                    answer = ResourceRecord(b'\xc0\x0c', RR_type.A, RR_class.IN, ttl, 4, 
                            self._cache.get(question.get_QNAME(), 'A'))
                    response = Message(header, [question], [answer], [], []).get_bytes()
                else:
                    logger.warning('{addr}Blocked {name}'.format(
                        name=question.get_QNAME(), addr=source_addr))
                    header = message.Header
                    header.RA = b'\x01'
                    header.QDCOUNT = b'\x00\x00'
                    header.ANCOUNT = b'\x00\x00'
                    header.QR = b'\x01'
                    header.RCODE = b'\x03'
                    response = Message(header, [], [], [], []).get_bytes()
            else:
                logger.debug('{addr}Forward to remote DNS server: {name}'.format(
                    name=question.get_QNAME(), addr=source_addr))
                response = self._forward(data)
        else:
            logger.debug('{addr}Forward to remote DNS server: {name}'.format(
                name=question.get_QNAME(), addr=source_addr))
            response = self._forward(data)
        self._socket.sendto(response, source_addr)
コード例 #11
0
    def send_to_slave(self, comm, target_rank, comm_rank):
        '''
        This routine is designed to work pseudo MPI environment.
        The target_rank is a real rank number.
        the comm_rank is only used for a communication.
        '''

        mtl = LocalMPITables(target_rank, self.spfile, self.ranks, self.lids)

        logger.debug('Send local tables to rank%d'%target_rank)
        req = comm.isend(mtl.data_dict, dest=comm_rank, tag=tag)
        logger.debug('after Send local tables to rank%d'%target_rank)

        return req
コード例 #12
0
    def send_to_slave(self, comm, target_rank, comm_rank):
        '''
        This routine is designed for working at a pseudo MPI environment.
        The target_rank is a real rank number.
        the comm_rank is only used for a communication.
        '''

        cgl = CubeGridLocalIndices(target_rank, \
                self.csfile, self.partition, self.ranks)

        logger.debug('Send local indices to rank%d'%target_rank)
        req = comm.isend(cgl.data_dict, dest=comm_rank, tag=tag)

        return req
コード例 #13
0
    def distribute_local_sparse_matrix(self, comm):
        logger.debug('Distribute local sparse matrixes')

        if self.myrank == 0:
            req_list = list()
            
            for target_rank in xrange(1,nproc):
                arr_dict = self.extract_local_sparse_matrix(target_rank)
                req = comm.isend(arr_dict, dest=target_rank, tag=10)
                req_list.append(req)

            for req in req_list: req.wait()

        else:
            self.arr_dict = comm.recv(source=0, tag=10)
コード例 #14
0
ファイル: server.py プロジェクト: lixiangyuan98/dnsrelay
    def _forward(self, data: bytes) -> bytes:
        """Forward the message to remote server."""

        remote_socket = socket.socket(type=socket.SOCK_DGRAM)
        remote_socket.sendto(data, (self._remote_host, 53))
        response, _ = remote_socket.recvfrom(2048)
        logger.info('Receive from remote DNS server: {data}'.format(data=response))
        message = Message.from_bytes(response)
        for answer in message.Answers:
            if answer.TYPE == RR_type.A and answer.TTL != b'\x00\x00':
                # cache the record
                self._cache.put(message.Questions[0].get_QNAME(), 
                  'A', answer.RDATA)
                self._cache.set_ttl(message.Questions[0].get_QNAME(), 
                  int(answer.TTL.hex(), 16))
                logger.debug('Cache the record {answer}, TTL={ttl}'
                  .format(answer=ip.get_ipv4_from_bytes(answer.RDATA), 
                  ttl=int(answer.TTL.hex(), 16)))
        return response
コード例 #15
0
    def __init__(self, ne, ngq):
        logger.debug('Read a grid indices NetCDF file')

        self.ne = ne
        self.ngq = ngq

        fname = __file__.split('/')[-1]
        fdir = __file__.rstrip(fname)
        fpath = fdir + 'cs_grid_ne%dngq%d.nc'%(ne, ngq)
        self.ncf = ncf = nc.Dataset(fpath, 'r', format='NETCDF4')

        self.ep_size = len( ncf.dimensions['ep_size'] )
        self.up_size = len( ncf.dimensions['up_size'] )
        self.gq_indices = ncf.variables['gq_indices'][:]  # (ep_size,5)
        self.is_uvps = ncf.variables['is_uvps'][:]        # (ep_size)
        self.uids = ncf.variables['uids'][:]              # (ep_size)
        self.gids = ncf.variables['gids'][:]              # (up_size)
        self.xyzs = ncf.variables['xyzs'][:]              # (up_size,3)
        self.latlons = ncf.variables['latlons'][:]        # (up_size,2)
        self.alpha_betas = ncf.variables['alpha_betas'][:]# (ep_size,2)
コード例 #16
0
def start():
    manager = Mymanager()
    manager.start()
    model_filename = 'model_data/mars-small128.pb'
    CreateBoxEncoder = manager.CreateBoxEncoder(model_filename, batch_size=1)
    yolo = manager.YOLO()
    video_mes = VIDEO_NAME
    if ENVIRO and os.environ[DOCKER_ID]:
        video_mes = video_mes[
                    int(os.environ[DOCKER_ID]) * PROCESS_NUM - PROCESS_NUM:int(os.environ[DOCKER_ID]) * PROCESS_NUM]
    logger.debug('video_url size: {} Total is {}'.format(len(video_mes), video_mes))
    gpu_proccess = PROCESS_NUM
    if 0 < len(video_mes) < gpu_proccess:
        gpu_proccess = len(video_mes)
    logger.debug('proccess num {}'.format(gpu_proccess))
    if len(video_mes) > 0:
        # urls = [video_mes[i:i + step] for i in range(0, len(video_mes), step)]
        logger.debug('proccess loading')
        urls = video_mes
        with ProcessPoolExecutor(max_workers=gpu_proccess) as pool:
            for url in urls:
                pool.submit(rec_start, url, yolo, CreateBoxEncoder)
    else:
        logger.error('No stream was read')
    logger.error('game over')
コード例 #17
0
def producer(url, q):
    while True:
        i = 0
        logger.debug('rtmp: {} read+'.format(url))
        video_capture = cv2.VideoCapture(url)
        ret_val, image = video_capture.read()
        if False is video_capture.isOpened() or False is ret_val:
            logger.warning('{} url is: {} {}'.format(url, video_capture.isOpened(), ret_val))
            continue
        logger.debug('rtmp: {} load finish'.format(url))
        while True:
            i += 1
            ret, frame = video_capture.read()
            if not ret:
                break
            if i % TIMES != 0 or image is None:
                continue
            if not FX == FY == 1:
                try:
                    logger.debug('{}: {} fps image resize'.format(url, i))
                    frame = cv2.resize(frame, (0, 0), fx=FX, fy=FY)
                except Exception as e:
                    logger.error(e)
                    logger.error('image is bad')
                    break
            if q.full():
                q.get()
            q.put(frame)
            logger.info('{} image save to  queue {}'.format(i, q.qsize()))
コード例 #18
0
 def tearDown(self):
     params = {"id": insertUser.user_id}
     url = config.get("server",
                      "host") + self.__module + '/' + 'deleteUserById'
     result = HTTPsClient(url).send(params)
     logger.debug(json.dumps(params, ensure_ascii=False))
     logger.debug(json.dumps(url, ensure_ascii=False))
     logger.debug(json.dumps(result, ensure_ascii=False))
コード例 #19
0
    def __init__(self, target_rank, csfile, partition, ranks):
        logger.debug('Make local indices of rank%d'%target_rank)

        local_nelem = partition.nelems[target_rank]
        local_ep_size = local_nelem*csfile.ngq*csfile.ngq

        local_gids = np.where(ranks == target_rank)[0]        # (local_ep_size,), int32
        local_is_uvps = csfile.is_uvps[local_gids]            # (local_ep_size,), bool
        local_gq_indices = csfile.gq_indices[local_gids,:]    # (local_ep_size,5), int32
        local_alpha_betas = csfile.alpha_betas[local_gids,:]  # (local_ep_size,2), float64

        local_uids = csfile.uids[local_gids]                  # (local_ep_size,), int32
        local_xyzs = csfile.xyzs[local_uids,:]                # (local_ep_size,3), float64
        local_latlons = csfile.latlons[local_uids,:]          # (local_ep_size,2), float64

        local_up_size = local_is_uvps.sum()


        # Packaging variables
        self.data_dict = data_dict = dict()

        data_dict['ne'] = csfile.ne
        data_dict['ngq'] = csfile.ngq
        data_dict['nproc'] = partition.nproc
        data_dict['myrank'] = target_rank

        data_dict['ep_size'] = csfile.ep_size
        data_dict['up_size'] = csfile.up_size 
        data_dict['local_ep_size'] = local_ep_size
        data_dict['local_up_size'] = local_up_size

        data_dict['local_gids'] = local_gids 
        data_dict['local_uids'] = local_uids
        data_dict['local_is_uvps'] = local_is_uvps
        data_dict['local_gq_indices'] = local_gq_indices
        data_dict['local_alpha_betas'] = local_alpha_betas
        data_dict['local_latlons'] = local_latlons
        data_dict['local_xyzs'] = local_xyzs
コード例 #20
0
ファイル: data.py プロジェクト: EugenePY/DeepMarket
    def init(self):
        logger.debug("Initial the database")

        try:
            CREDIT = json.load(open('./util/vcap.json'))[
                'cloudantDB']['credentials']
            self.server = couchdb.Server(CREDIT['url'])
            self.server.resource.credentials = (CREDIT['username'],
                                                CREDIT['password'])
            try:
                db = self.server.create(self.db_name)
                logger.debug("Create a new database " + self.db_name)
            except:
                db = self.server.__getitem__(self.db_name)
                logger.debug("Use Data Base" + self.db_name)

            logger.debug("Create datadase successfully")
            self.__dict__.update(db.__dict__)
        except:
            print('cannot find the credentials pls bind a CloudantDB Service')

        return self
コード例 #21
0
    def __init__(self, comm, root=0):
        #data_dict = comm.recv(source=root, tag=tag)
        req = comm.irecv(source=root, tag=tag)
        data_dict = req.wait()

        myrank = data_dict['myrank']
        lids = data_dict['lids']
        dsw_list = data_dict['dsw_list']
        rdsw_list = data_dict['rdsw_list']
        rdw_list = data_dict['rds_list']


        #-----------------------------------------------------
        # Destination, source, weight from the sparse matrix
        # Make Generate the meta index grouped by rank
        # local_group: {dst:[(src,wgt),...]}
        # send_group:  {rank:{dst:[(src,wgt),...]),...}
        # recv_group:  {rank:{dst:[src,...]),...}
        #-----------------------------------------------------

        #---------------------------------------
        # local_group
        #---------------------------------------
        logger.debug('Make a local_group')

        local_group = OrderedDict([(dst, [(s,w) for (d,s,w) in val]) \
                for (dst, val) in groupby(dsw_list, lambda x:x[0])])
        local_src_size = len(dsw_list)
        local_buf_size = len(local_group)

        #---------------------------------------
        # send_group
        #---------------------------------------
        logger.debug('Make a send_group')

        sorted_rdsw_list = sorted(rdsw_list, key=lambda x:x[0])
        send_group_tmp = OrderedDict([(rank, [(d,s,w) for (r,d,s,w) in val]) \
                for (rank, val) in groupby(sorted_rdsw_list, lambda x:x[0])])

        send_group = OrderedDict()
        for rank, dsw_list in send_group_tmp.items():
            send_group[rank] = OrderedDict([(dst, [(s,w) for (d,s,w) in val]) \
                for (dst, val) in groupby(dsw_list, lambda x:x[0])])

        #---------------------------------------
        # recv_group
        #---------------------------------------
        logger.debug('Make the recv_group')

        sorted_rds_list = sorted(rds_list, key=lambda x:x[0])
        recv_group_tmp = OrderedDict([(rank, [(d,s) for (r,d,s) in val]) \
                for (rank, val) in groupby(sorted_rds_list, lambda x:x[0])])

        recv_group = OrderedDict()
        for rank, ds_list in recv_group_tmp.items():
            recv_group[rank] = OrderedDict([(dst, [s for (d,s) in val]) \
                for (dst, val) in groupby(ds_list, lambda x:x[0])])


        #-----------------------------------------------------
        # Make the send_schedule, send_dsts, send_srcs, send_wgts
        #-----------------------------------------------------
        logger.debug('Make the send_schedule, send_dsts, send_srcs, send_wgts')

        send_schedule = np.zeros((len(send_group),3), 'i4')  #(rank,start,size)

        #---------------------------------------
        # send_schedule
        #---------------------------------------
        send_buf_seq = 0
        for seq, rank in enumerate( sorted(send_group.keys()) ):
            start = send_buf_seq
            size = len(send_group[rank])
            send_schedule[seq][:] = (rank, start, size)
            send_buf_seq += size

        send_buf_size = send_buf_seq
        send_buf = np.zeros(send_buf_size, 'i4')    # global dst index

        #---------------------------------------
        # send local indices in myrank
        # directly go to the recv_buf, not to the send_buf
        #---------------------------------------
        send_dsts, send_srcs, send_wgts = list(), list(), list()
        send_seq = 0
        for dst, sw_list in local_group.items():
            for src, wgt in sw_list:
                send_dsts.append(send_seq)      # buffer index
                send_srcs.append(lids[src])     # local index
                send_wgts.append(wgt)
            send_seq += 1

        #---------------------------------------
        # send indices for the other ranks
        #---------------------------------------
        send_seq = 0
        for rank, dst_dict in send_group.items():
            for dst, sw_list in dst_dict.items():
                for src, wgt in sw_list:
                    send_dsts.append(send_seq)
                    send_srcs.append(lids[src])
                    send_wgts.append(wgt)

                send_buf[send_seq] = dst     # for diagnostics
                send_seq += 1


        #-----------------------------------------------------
        # Make the recv_schedule, recv_dsts, recv_srcs
        #-----------------------------------------------------
        logger.debug('Make the recv_schedule, recv_dsts, recv_srcs')

        recv_schedule = np.zeros((len(recv_group),3), 'i4')  #(rank,start,size)


        #---------------------------------------
        # recv_schedule
        #---------------------------------------
        recv_buf_seq = local_buf_size
        for seq, rank in enumerate( sorted(recv_group.keys()) ):
            start = recv_buf_seq
            size = len(recv_group[rank])
            recv_schedule[seq][:] = (rank, start, size)
            recv_buf_seq += size

        recv_buf_size = recv_buf_seq


        # recv indices
        recv_buf_list = local_group.keys()      # destinations
        for rank in recv_group.keys():
            recv_buf_list.extend( recv_group[rank].keys() )

        recv_buf = np.array(recv_buf_list, 'i4')
        equal(recv_buf_size, len(recv_buf))
        unique_dsts = np.unique(recv_buf)

        recv_dsts, recv_srcs = [], []
        for dst in unique_dsts:
            for bsrc in np.where(recv_buf==dst)[0]:
                recv_dsts.append(lids[dst])     # local index
                recv_srcs.append(bsrc)          # buffer index


        #-----------------------------------------------------
        # Public variables for diagnostic
        #-----------------------------------------------------
        self.local_group = local_group
        self.send_group = send_group
        self.recv_group = recv_group

        self.send_buf = send_buf    # global dst index
        self.recv_buf = recv_buf    # global dst index


        #-----------------------------------------------------
        # Public variables
        #-----------------------------------------------------
        self.myrank         = myrank

        self.local_src_size = local_src_size
        self.send_buf_size  = send_buf_size
        self.recv_buf_size  = recv_buf_size
                                                         
        self.send_schedule  = send_schedule              # (rank,start,size(
        self.send_dsts      = np.array(send_dsts, 'i4')  # to buffer   
        self.send_srcs      = np.array(send_srcs, 'i4')  # from local  
        self.send_wgts      = np.array(send_wgts, 'f8')                
                                                                           
        self.recv_schedule  = recv_schedule              # (rank,start,size)
        self.recv_dsts      = np.array(recv_dsts, 'i4')  # to local    
        self.recv_srcs      = np.array(recv_srcs, 'i4')  # from buffer 
コード例 #22
0
    def save_netcdf(self, cubegrid, base_dir, target_method, nc_format):
        logger.debug('Save to netcdf of MPI tables of rank%d'%self.myrank)

        ncf = nc.Dataset(base_dir + '/nproc%d_rank%d.nc'%(self.nproc,self.myrank), 'w', format=nc_format)
        ncf.description = 'MPI index tables with the SFC partitioning on the cubed-sphere'
        ncf.target_method = target_method

        ncf.ne = cubegrid.ne
        ncf.ngq = cubegrid.ngq
        ncf.nproc = cubegrid.nproc
        ncf.myrank = self.myrank
        ncf.ep_size = cubegrid.ep_size
        ncf.up_size = cubegrid.up_size
        ncf.local_ep_size = cubegrid.local_ep_size
        ncf.local_up_size = cubegrid.local_up_size

        ncf.createDimension('local_ep_size', cubegrid.local_ep_size)
        ncf.createDimension('send_sche_size', len(self.send_schedule))
        ncf.createDimension('recv_sche_size', len(self.recv_schedule))
        ncf.createDimension('send_size', len(self.send_dsts))
        ncf.createDimension('recv_size', len(self.recv_dsts))
        ncf.createDimension('3', 3)

        vlocal_gids = ncf.createVariable('local_gids', 'i4', ('local_ep_size',))
        vlocal_gids.long_name = 'Global index of local points'

        vbuf_sizes = ncf.createVariable('buf_sizes', 'i4', ('3',))
        vbuf_sizes.long_name = '[local_src_size, send_buf_size, recv_buf_size]'

        vsend_schedule = ncf.createVariable('send_schedule', 'i4', ('send_sche_size','3',))
        vsend_schedule.long_name = '[rank, start, size]'
        vrecv_schedule = ncf.createVariable('recv_schedule', 'i4', ('recv_sche_size','3',))
        vrecv_schedule.long_name = '[rank, start, size]'

        vsend_dsts = ncf.createVariable('send_dsts', 'i4', ('send_size',))
        vsend_dsts.long_name = 'Destination index for local and send buffer'
        vsend_srcs = ncf.createVariable('send_srcs', 'i4', ('send_size',))
        vsend_srcs.long_name = 'Source index for local and send buffer'
        vsend_wgts = ncf.createVariable('send_wgts', 'f8', ('send_size',))
        vsend_wgts.long_name = 'Weight value for local and send buffer'

        vrecv_dsts = ncf.createVariable('recv_dsts', 'i4', ('recv_size',))
        vrecv_dsts.long_name = 'Destination index for recv buffer'
        vrecv_srcs = ncf.createVariable('recv_srcs', 'i4', ('recv_size',))
        vrecv_srcs.long_name = 'Source index for recv buffer'


        vlocal_gids[:]    = self.local_gids[:]
        vbuf_sizes[:]     = (self.local_src_size, \
                             self.send_buf_size, \
                             self.recv_buf_size)
        vsend_schedule[:] = self.send_schedule[:]
        vrecv_schedule[:] = self.recv_schedule[:]
        vsend_dsts[:]     = self.send_dsts[:]
        vsend_srcs[:]     = self.send_srcs[:]
        vsend_wgts[:]     = self.send_wgts[:]
        vrecv_dsts[:]     = self.recv_dsts[:]
        vrecv_srcs[:]     = self.recv_srcs[:]

        ncf.close()

        logger.debug('Save to netcdf of MPI tables of rank%d...OK'%self.myrank)
コード例 #23
0
    def __init__(self, ne, ngq, nproc, myrank, is_rotate=False, homme_style=False):
        self.ne = ne
        self.ngq = ngq
        self.nproc = nproc
        self.myrank = myrank

        if is_rotate:
            pass
        else:
            self.lat0 = 0
            self.lon0 = 0


        #-----------------------------------------------------
        # Read the grid indices
        #-----------------------------------------------------
        cs_fpath = fdir + 'cs_grid_ne%dngq%d.nc'%(ne, ngq)
        cs_ncf = nc.Dataset(cs_fpath, 'r', format='NETCDF4')

        ep_size = len( cs_ncf.dimensions['ep_size'] )
        up_size = len( cs_ncf.dimensions['up_size'] )
        gq_indices = cs_ncf.variables['gq_indices'][:]  # (ep_size,5)
        is_uvps = cs_ncf.variables['is_uvps'][:]        # (ep_size)
        uids = cs_ncf.variables['uids'][:]              # (ep_size)
        gids = cs_ncf.variables['gids'][:]              # (up_size)
        xyzs = cs_ncf.variables['xyzs'][:]              # (up_size,3)
        latlons = cs_ncf.variables['latlons'][:]        # (up_size,2)
        alpha_betas = cs_ncf.variables['alpha_betas'][:]# (ep_size,2)

        #mvps = cs_ncf.variables['mvps'][:]              # (ep_size,4)
        #nbrs = cs_ncf.variables['nbrs'][:]              # (up_size,8)


        #-----------------------------------------------------
        # Set the rank and local indices
        #-----------------------------------------------------
        logger.debug('Set the rank and local indices')

        partition = CubePartition(ne, nproc, homme_style)
        local_nelem = partition.nelems[myrank]

        local_ep_size = local_nelem*ngq*ngq

        local_gids = np.zeros(local_ep_size, 'i4')
        local_uids = np.zeros(local_ep_size, 'i4')
        local_is_uvps = np.zeros(local_ep_size, 'bool')
        local_gq_indices = np.zeros((local_ep_size,5), 'i4')
        local_alpha_betas = np.zeros((local_ep_size,2), 'f8')
        local_latlons = np.zeros((local_ep_size,2), 'f8')
        local_xyzs = np.zeros((local_ep_size,3), 'f8')


        # MPI rank at each grid point
        gq_eijs = gq_indices[:,:3] - 1
        idxs = gq_eijs[:,0]*ne*ne + gq_eijs[:,1]*ne + gq_eijs[:,2] 
        ranks = partition.elem_proc.ravel()[idxs]

        local_gids[:] = np.where(ranks == myrank)[0]
        local_is_uvps[:] = is_uvps[local_gids]
        local_gq_indices[:] = gq_indices[local_gids,:]
        local_alpha_betas[:] = alpha_betas[local_gids,:]

        local_uids[:] = uids[local_gids]
        local_xyzs[:] = xyzs[local_uids,:]
        local_latlons[:] = latlons[local_uids,:]


        # Local index at each grid point
        lids = np.zeros(ep_size, 'i4')
        for proc in xrange(nproc):
            local_ep_size_tmp = partition.nelems[proc]*ngq*ngq
            idxs = np.where(ranks == proc)[0]
            lids[idxs] = np.arange(local_ep_size_tmp, dtype='i4')
        

        #-----------------------------------------------------
        # Public variables
        #-----------------------------------------------------
        self.partition = partition
        self.ranks = ranks
        self.lids = lids

        self.ep_size = ep_size
        self.up_size = up_size
        self.local_ep_size = local_ep_size
        self.local_up_size = local_is_uvps.sum()
        self.local_nelem = local_nelem

        self.local_gids = local_gids
        self.local_uids = local_uids
        self.local_is_uvps = local_is_uvps
        self.local_gq_indices = local_gq_indices
        self.local_alpha_betas = local_alpha_betas
        self.local_latlons = local_latlons
        self.local_xyzs = local_xyzs
コード例 #24
0
    def __init__(self, cubegrid, method):
        self.cubegrid = cubegrid
        self.method = method        # method represented by the sparse matrix

        self.ne = ne = cubegrid.ne
        self.ngq = ngq = cubegrid.ngq
        self.nproc = nproc = cubegrid.nproc
        self.myrank = myrank = cubegrid.myrank
        self.ranks = ranks = cubegrid.ranks
        self.lids = lids = cubegrid.lids
        

        #-----------------------------------------------------
        # Read the sparse matrix
        #-----------------------------------------------------
        if method.upper() == 'AVG':
            # Average the boundary of elements for the Spectral Element Method
            spmat_fpath = fdir + 'spmat_avg_ne%dngq%d.nc'%(ne, ngq)

        elif method.upper() == 'COPY':
            # Copy from UP to EPs at the boundary of elements
            spmat_fpath = fdir + 'spmat_copy_ne%dngq%d.nc'%(ne, ngq)

        elif method.upper() == 'IMPVIS':
            # Implicit Viscosity
            # High-Order Elliptic Filter
            spmat_fpath = fdir + 'spmat_impvis_ne%dngq%d.nc'%(ne, ngq)

        else:
            raise ValueError, "The method must be one of 'AVG', 'COPY', 'IMPVIS'"

        spmat_ncf = nc.Dataset(spmat_fpath, 'r', format='NETCDF4')
        spmat_size = len( spmat_ncf.dimensions['spmat_size'] )
        dsts = spmat_ncf.variables['dsts'][:]
        srcs = spmat_ncf.variables['srcs'][:]
        wgts = spmat_ncf.variables['weights'][:]


        #-----------------------------------------------------
        # Destination, source, weight from the sparse matrix
        # Make Generate the meta index grouped by rank
        # local_group: {dst:[(src,wgt),...]}
        # send_group:  {rank:{dst:[(src,wgt),...]),...}
        # recv_group:  {rank:{dst:[src,...]),...}
        # All dictionaries are OrderedDicts.
        #-----------------------------------------------------
        logger.debug('Make Generate the meta index grouped by rank')

        rank_dsts = ranks[dsts]                 # rank number of destinations
        rank_srcs = ranks[srcs]                 # rank number of sources
        myrank_dsts = (rank_dsts == myrank)     # bool type array
        myrank_srcs = (rank_srcs == myrank)

        local_idxs = np.where( myrank_dsts * myrank_srcs )[0]
        send_idxs = np.where( np.invert(myrank_dsts) * myrank_srcs )[0]
        recv_idxs = np.where( myrank_dsts * np.invert(myrank_srcs) )[0]

        #---------------------------------------
        # local_group
        #---------------------------------------
        local_dsts = dsts[local_idxs]
        local_srcs = srcs[local_idxs]
        local_wgts = wgts[local_idxs]
        '''
        dsw_list = [(dsts[i],srcs[i],wgts[i]) for i in local_idxs]
        local_group = OrderedDict([(dst, [(s,w) for (d,s,w) in val]) \
                for (dst, val) in groupby(dsw_list, lambda x:x[0])])
        local_src_size = len(dsw_list)
        local_buf_size = len(local_group)
        '''

        #---------------------------------------
        # send_group
        #---------------------------------------
        send_ranks = rank_dsts[send_idxs]
        send_dsts = dsts[send_idxs]
        send_srcs = srcs[send_idxs]
        send_wgts = wgts[send_idxs]
        '''
        rdsw_list = [(rank_dsts[i],dsts[i],srcs[i],wgts[i]) for i in send_idxs]
        sorted_rdsw_list = sorted(rdsw_list, key=lambda x:x[0])
        send_group_tmp = OrderedDict([(rank, [(d,s,w) for (r,d,s,w) in val]) \
                for (rank, val) in groupby(sorted_rdsw_list, lambda x:x[0])])

        send_group = OrderedDict()
        for rank, dsw_list in send_group_tmp.items():
            send_group[rank] = OrderedDict([(dst, [(s,w) for (d,s,w) in val]) \
                for (dst, val) in groupby(dsw_list, lambda x:x[0])])
        '''

        #---------------------------------------
        # recv_group
        #---------------------------------------
        recv_ranks = rank_srcs[recv_idxs]
        recv_dsts = dsts[recv_idxs]
        recv_srcs = srcs[recv_idxs]
        '''
        rds_list = [(rank_srcs[i],dsts[i],srcs[i]) for i in recv_idxs]
        sorted_rds_list = sorted(rds_list, key=lambda x:x[0])
        recv_group_tmp = OrderedDict([(rank, [(d,s) for (r,d,s) in val]) \
                for (rank, val) in groupby(sorted_rds_list, lambda x:x[0])])

        recv_group = OrderedDict()
        for rank, ds_list in recv_group_tmp.items():
            recv_group[rank] = OrderedDict([(dst, [s for (d,s) in val]) \
                for (dst, val) in groupby(ds_list, lambda x:x[0])])
        '''


        #-----------------------------------------------------
        # Make the send_schedule, send_dsts, send_srcs, send_wgts
        #-----------------------------------------------------
        logger.debug('Make the send_schedule, send_dsts, send_srcs, send_wgts')

        #---------------------------------------
        # size and allocation
        #---------------------------------------
        r_uniques, r_indices, r_counts = \
                np.unique(send_ranks, unique_index=True, return_counts=True)

        send_schedule_size = r_uniques.size
        send_buf_size = np.unique(send_dsts).size
        send_map_size = local_dsts.size + send_dsts.size

        send_schedule = np.zeros((send_schedule_size,3), 'i4') #(rank,start,size)
        send_dsts = np.zeros(send_map_size, 'i4')
        send_srcs = np.zeros(send_map_size, 'i4')
        send_wgts = np.zeros(send_map_size, 'f8')
        send_buf = np.zeros(send_buf_size, 'i4')    # global dst index

        #---------------------------------------
        # send_schedule
        #---------------------------------------
        send_buf_seq = 0
        for rank, r_start, r_size in zip(r_uniques, r_indices, r_counts):
            r_end = r_start + r_size

            start = send_buf_seq
            size = np.unique(send_dsts[r_start:r_end]).size
            send_schedule[i][:] = (rank, start, size)
            send_buf_seq += size

        logger.error("Error: send_buf_size(%d) != send_buf_seq(%d)"%(send_buf_size, send_buf_seq))

        #---------------------------------------
        # send local indices in myrank
        # directly go to the recv_buf, not to the send_buf
        #---------------------------------------
        d_uniques, d_indices, d_counts = \
                np.unique(local_dsts, unique_index=True, return_counts=True)

        seq = 0
        recv_buf_seq = 0
        for d_start, d_size in zip(d_indices, d_counts):
            d_end = d_start + d_size

            send_dsts[seq:seq+d_size] = recv_buf_seq
            send_srcs[seq:seq+d_size] = lids[local_srcs[d_start:d_end]]
            send_wgts[seq:seq+d_size] = local_wgts[d_start:d_end]

            seq += d_size
            recv_buf_seq += 1

        #---------------------------------------
        # send indices for the other ranks
        #---------------------------------------
        send_buf_seq = 0
        for r_start, r_size in zip(r_indices, r_counts):
            r_end = r_start + r_size

            d_uniques, d_indices, d_counts = \
                    np.unique(send_dsts[r_start:r_end], \
                    unique_index=True, return_counts=True)

            for dst, d_start, d_size in zip(d_uniques, d_indices, d_counts):
                d_end = d_start + d_size

                send_dsts[seq:seq+d_size] = send_buf_seq
                send_srcs[seq:seq+d_size] = lids[send_srcs[d_start:d_end]]
                send_wgts[seq:seq+d_size] = send_wgts[d_start:d_end]

                send_buf[send_buf_seq] = dst    # for diagnostics
                seq += d_size
                send_buf_seq += 1

        logger.error("Error: seq(%d) != send_map_size(%d)"%(seq, send_map_size))
        logger.error("Error: send_buf_seq(%d) != send_buf_size(%d)"%(send_buf_seq, send_buf_size))


        #-----------------------------------------------------
        # Make the recv_schedule, recv_dsts, recv_srcs
        #-----------------------------------------------------
        logger.debug('Make the recv_schedule, recv_dsts, recv_srcs')

        #---------------------------------------
        # sorting
        #---------------------------------------
        sort_idx = np.argsort(recv_ranks)
        recv_ranks = recv_ranks[sort_idx]
        recv_dsts = recv_dsts[sort_idx]
        recv_srcs = recv_srcs[sort_idx]

        #---------------------------------------
        # size and allocation
        #---------------------------------------
        r_uniques, r_indices, r_counts = \
                np.unique(recv_ranks, unique_index=True, return_counts=True)

        recv_schedule_size = r_uniques.size
        unique_local_dsts = np.unique(local_dsts)
        recv_buf_local_size = unique_local_dsts.size
        recv_buf_size = recv_buf_local_size + np.unique(recv_dsts).size
        recv_map_size = recv_dsts.size

        recv_schedule = np.zeros((recv_schedule_size,3), 'i4') #(rank,start,size)
        recv_dsts = np.zeros(recv_map_size, 'i4')
        recv_srcs = np.zeros(recv_map_size, 'i4')
        recv_buf = np.zeros(recv_buf_size, 'i4')    

        #---------------------------------------
        # recv_schedule
        #---------------------------------------
        recv_buf_seq = 0
        for rank, r_start, r_size in zip(r_uniques, r_indices, r_counts):
            r_end = r_start + r_size

            start = recv_buf_seq
            size = np.unique(recv_dsts[r_start:r_end]).size
            recv_schedule[i][:] = (rank, start, size)
            recv_buf_seq += size

        logger.error("Error: recv_buf_size(%d) != recv_buf_seq(%d)"%(recv_buf_size, recv_buf_seq))

        #---------------------------------------
        # recv indices
        #---------------------------------------
        recv_buf[:recv_buf_local_size] = unique_local_dsts[:]   # destinations

        for rank, r_start, r_size in zip(r_uniques, r_indices, r_counts):
            r_end = r_start + r_size

            sort_idx = np.argsort(recv_dsts[r_start:r_end])
            recv_dsts = recv_dsts[r_start:r_end][sort_idx]
            recv_srcs = recv_srcs[r_start:r_end][sort_idx]

            d_uniques, d_indices, d_counts = \
                    np.unique(recv_dsts, unique_index=True, return_counts=True)

            for dst, d_start, d_size in zip(d_uniques, d_indices, d_counts):
                d_end = d_start + d_size




        for rank in recv_group.keys():
            recv_buf_list.extend( recv_group[rank].keys() )

        recv_buf = np.array(recv_buf_list, 'i4')
        equal(recv_buf_size, len(recv_buf))
        unique_dsts = np.unique(recv_buf)

        recv_dsts, recv_srcs = [], []
        for dst in unique_dsts:
            for bsrc in np.where(recv_buf==dst)[0]:
                recv_dsts.append(lids[dst])     # local index
                recv_srcs.append(bsrc)          # buffer index


        #-----------------------------------------------------
        # Public variables for diagnostic
        #-----------------------------------------------------
        self.send_buf = send_buf    # global dst index
        self.recv_buf = recv_buf    # global dst index


        #-----------------------------------------------------
        # Public variables
        #-----------------------------------------------------
        self.spmat_size = spmat_size
        self.local_gids = cubegrid.local_gids

        self.local_src_size = local_src_size
        self.send_buf_size = send_buf_size
        self.recv_buf_size = recv_buf_size

        self.send_schedule = send_schedule          # (rank,start,size)
        self.send_dsts = np.array(send_dsts, 'i4')  # to buffer
        self.send_srcs = np.array(send_srcs, 'i4')  # from local
        self.send_wgts = np.array(send_wgts, 'f8')

        self.recv_schedule = recv_schedule          # (rank,start,size)
        self.recv_dsts = np.array(recv_dsts, 'i4')  # to local
        self.recv_srcs = np.array(recv_srcs, 'i4')  # from buffer
コード例 #25
0
 def tearDownClass(cls):
     logger.debug("tearDown...")
     page = BasePage(cls.driver)
     page.logout()
     cls.driver.quit()
コード例 #26
0
def out_or_in(center_mass, door, in_house, disappear_box, in_out_door):
    '''
    判断人员进出门
    Args:
        center_mass: 人员矩形坐标点集合
        door: 门的坐标
        in_house:
        disappear_box:
        in_out_door:
    '''
    door_h_half = int(int((door[1] + door[3]) / 2) * DOOR_HIGH)

    def is_out_door(key, way):
        if in_out_door['into_door_per'] > in_out_door['out_door_per']:
            if key in in_house.keys():
                del in_house[key]
            in_out_door['out_door_per'] += 1
            logger.info('{} id:{} after out of door: {}'.format(
                way, key, in_out_door['out_door_per']))
        else:
            in_out_door['into_door_per'] = in_out_door['out_door_per'] = 0

    for key, value in disappear_box.items():
        logger.debug('id:{} box length:{}'.format(key, len(value)))
        tlwh0 = np.asarray(value[0], dtype=np.float)
        tlwh1 = np.asarray(value[-1], dtype=np.float)

        dis_x0, dis_y0 = tlwh0[:2]
        dis_x1, dis_y1 = tlwh0[2:]

        dis_x2, dis_y2 = tlwh1[:2]
        dis_x3, dis_y3 = tlwh1[2:]
        near_door = door[1] < dis_y2 < dis_y3 <= door[
            3] and door_h_half > dis_y2
        real_near_door = door[1] < dis_y2 < dis_y3 <= door[3] and door[
            0] < dis_x2 < dis_x3 < door[2]
        box0 = [dis_x0, dis_y0, dis_x1, dis_y1]
        box1 = [dis_x2, dis_y2, dis_x3, dis_y3]

        dis_inter0, dis_iou0 = iou(box0, door)
        dis_inter1, dis_iou1 = iou(box1, door)
        if dis_inter1 == 0 and len(value) < 3:
            continue
        # is_has = key not in disappear_id.keys()
        is_has = True
        x_in_door = door[0] < dis_x2 < door[2]

        if dis_inter1 >= dis_inter0 and door_h_half >= dis_y2 \
                and door_h_half >= dis_y0 and door[3] >= dis_y3 \
                and is_has:
            is_out_door(key, 'one')
        elif dis_x2 >= door[0] and dis_y2 >= door[1] and \
                door[3] >= dis_y3 and door_h_half >= dis_y2 and \
                is_has:
            is_out_door(key, 'two')
        elif door_h_half >= dis_y2 and door_h_half >= dis_y0 and \
                door[3] >= dis_y3 and is_has:
            is_out_door(key, 'three')
        elif near_door and is_has:
            is_out_door(key, 'foure')
        elif real_near_door and is_has:
            is_out_door(key, 'five')
        else:
            pass

    disappear_box.clear()
    logger.debug('center_mass:{}'.format(center_mass.keys()))
    for key, value in center_mass.items():
        tlwh0 = np.asarray(value[0], dtype=np.float)
        tlwh1 = np.asarray(value[-1], dtype=np.float)

        x0, y0 = tlwh0[:2]
        x1, y1 = tlwh0[2:]

        x2, y2 = tlwh1[:2]
        x3, y3 = tlwh1[2:]

        box0 = [x0, y0, x1, y1]
        box1 = [x2, y2, x3, y3]
        inter0, iou0 = iou(box0, door)
        inter1, iou1 = iou(box1, door)
        door_wide = door[2] - door[0]
        if inter1 == 0 and inter0 > 0 and door_h_half > y0 and \
                y1 < door[3] and key not in in_house.keys():
            in_house[key] = box0
            in_out_door['into_door_per'] += 1
            logger.info('3333333 id: {} after into of door: {}'.format(
                key, in_out_door['into_door_per']))

        if inter1 == 0 and inter0 > 0 and door_h_half > y2 and \
                door[1] < y3 < door[3] and door[0] - door_wide < x0 < door[2] + door_wide and \
                key not in in_house.keys():
            in_house[key] = box0
            in_out_door['into_door_per'] += 1
            logger.info('4444444 id: {} after into of door: {}'.format(
                key, in_out_door['into_door_per']))
コード例 #27
0
    def make_mpi_tables(self):
        '''
        Destination, source, weight from the sparse matrix
        Make Generate the meta index grouped by rank
        local_group: {dst:[(src,wgt),...]}
        send_group:  {rank:{dst:[(src,wgt),...]),...}
        recv_group:  {rank:[dst,...],...}
        All dictionaries are OrderedDicts.
        '''
        logger.debug('Make MPI tables')

        lids = self.lids
        arr_dict = self.arr_dict

        self.spmat_size = arr_dict['spmat_size']

        #---------------------------------------
        # local_group
        #---------------------------------------
        local_dsts = arr_dict['local_dsts']
        local_srcs = arr_dict['local_srcs']
        local_wgts = arr_dict['local_wgts']

        dsw_list = [(d,s,w) for d,s,w in zip(local_dsts,local_srcs,local_wgts)]
        local_group = OrderedDict([(dst, [(s,w) for (d,s,w) in val]) \
                for (dst, val) in groupby(dsw_list, lambda x:x[0])])

        local_src_size = len(dsw_list)
        local_buf_size = len(local_group)

        #---------------------------------------
        # send_group
        #---------------------------------------
        send_ranks = arr_dict['send_ranks']
        send_dsts = arr_dict['send_dsts']
        send_srcs = arr_dict['send_srcs']
        send_wgts = arr_dict['send_wgts']

        rdsw_list = [(r,d,s,w) for r,d,s,w in \
                zip(send_ranks,send_dsts,send_srcs,send_wgts)]

        sorted_rdsw_list = sorted(rdsw_list, key=lambda x:x[0])
        send_group_tmp = OrderedDict([(rank, [(d,s,w) for (r,d,s,w) in val]) \
                for (rank, val) in groupby(sorted_rdsw_list, lambda x:x[0])])

        send_group = OrderedDict()
        for rank, dsw_list in send_group_tmp.items():
            send_group[rank] = OrderedDict([(dst, [(s,w) for (d,s,w) in val]) \
                for (dst, val) in groupby(dsw_list, lambda x:x[0])])

        #---------------------------------------
        # recv_group
        #---------------------------------------
        recv_ranks = arr_dict['recv_ranks']
        recv_dsts = arr_dict['recv_dsts']

        rd_list = [(r,d) for r,d in zip(recv_ranks,recv_dsts)]

        sorted_rd_list = sorted(rd_list, key=lambda x:x[0])
        recv_group = OrderedDict([(rank, np.unique([d for (r,d) in val])) \
                for (rank, val) in groupby(sorted_rd_list, lambda x:x[0])])


        #-----------------------------------------------------
        # Make the send_schedule, send_dsts, send_srcs, send_wgts
        #-----------------------------------------------------
        logger.debug('Make the send_schedule, send_dsts, send_srcs, send_wgts')

        #---------------------------------------
        # size and allocation
        #---------------------------------------
        send_sche_size = len(send_group)
        send_buf_size = np.unique(send_dsts).size
        send_map_size = local_dsts.size + send_dsts.size

        send_schedule = np.zeros((send_sche_size,3), 'i4')  #(rank,start,size)
        send_dsts = np.zeros(send_map_size, 'i4')
        send_srcs = np.zeros(send_map_size, 'i4')
        send_wgts = np.zeros(send_map_size, 'f8')
        send_buf = np.zeros(send_buf_size, 'i4')    # global dst index

        #---------------------------------------
        # send_schedule
        #---------------------------------------
        send_buf_seq = 0
        for seq, rank in enumerate( send_group.keys() ):
            start = send_buf_seq
            size = len(send_group[rank])
            send_schedule[seq][:] = (rank, start, size)
            send_buf_seq += size

        if send_buf_size != send_buf_seq:
            logger.error("Error: send_buf_size(%d) != send_buf_seq(%d)"%(send_buf_size, send_buf_seq))
            raise SystemError

        #---------------------------------------
        # send local indices in myrank
        # directly go to the recv_buf, not to the send_buf
        #---------------------------------------
        seq = 0
        recv_buf_seq = 0
        for dst, sw_list in local_group.items():
            for src, wgt in sw_list:
                send_dsts[seq] = recv_buf_seq
                send_srcs[seq] = lids[src]
                send_wgts[seq] = wgt
                seq += 1

            recv_buf_seq += 1

        #---------------------------------------
        # send indices for the other ranks
        #---------------------------------------
        send_buf_seq = 0
        for rank, dst_dict in send_group.items():
            for dst, sw_list in dst_dict.items():
                for src, wgt in sw_list:
                    send_dsts[seq] = send_buf_seq
                    send_srcs[seq] = lids[src]
                    send_wgts[seq] = wgt
                    seq += 1

                send_buf[send_buf_seq] = dst     # for diagnostics
                send_buf_seq += 1

        if seq != send_map_size:
            logger.error("Error: seq(%d) != send_map_size(%d)"%(seq, send_map_size))
            raise SystemError

        if send_buf_seq != send_buf_size:
            logger.error("Error: send_buf_seq(%d) != send_buf_size(%d)"%(send_buf_seq, send_buf_size))
            raise SystemError

        #-----------------------------------------------------
        # Make the recv_schedule, recv_dsts, recv_srcs
        #-----------------------------------------------------
        logger.debug('Make the recv_schedule, recv_dsts, recv_srcs')

        #---------------------------------------
        # size and allocation
        #---------------------------------------
        recv_sche_size = len(recv_group)
        recv_buf_size = local_buf_size \
                + np.sum([d_unique.size for d_unique in recv_group.values()])
        recv_map_size = recv_buf_size

        recv_schedule = np.zeros((recv_sche_size,3), 'i4') #(rank,start,size)
        recv_dsts = np.zeros(recv_map_size, 'i4')
        recv_srcs = np.zeros(recv_map_size, 'i4')


        #---------------------------------------
        # recv_schedule
        #---------------------------------------
        recv_buf_seq = local_buf_size
        for seq, (rank,d_unique) in enumerate( recv_group.items() ):
            start = recv_buf_seq
            size = d_unique.size
            recv_schedule[seq][:] = (rank, start, size)
            recv_buf_seq += size

        #---------------------------------------
        # recv indices
        #---------------------------------------
        recv_buf_list = local_group.keys()      # destinations
        for rank, d_unique in recv_group.items():
            recv_buf_list.extend(d_unique)
        recv_buf = np.array(recv_buf_list, 'i4')

        unique_dsts = np.unique(recv_buf)
        seq = 0
        for dst in unique_dsts:
            for bsrc in np.where(recv_buf==dst)[0]:
                recv_dsts[seq] = lids[dst]      # local index
                recv_srcs[seq] = bsrc           # buffer index
                seq += 1


        #-----------------------------------------------------
        # Public variables for diagnostic
        #-----------------------------------------------------
        self.local_group = local_group
        self.send_group = send_group
        self.recv_group = recv_group

        self.send_buf = send_buf    # global dst index
        self.recv_buf = recv_buf    # global dst index


        #-----------------------------------------------------
        # Public variables
        #-----------------------------------------------------
        self.local_src_size = local_src_size
        self.send_buf_size = send_buf_size
        self.recv_buf_size = recv_buf_size

        self.send_schedule = send_schedule          # (rank,start,size)
        self.send_dsts = np.array(send_dsts, 'i4')  # to buffer
        self.send_srcs = np.array(send_srcs, 'i4')  # from local
        self.send_wgts = np.array(send_wgts, 'f8')

        self.recv_schedule = recv_schedule          # (rank,start,size)
        self.recv_dsts = np.array(recv_dsts, 'i4')  # to local
        self.recv_srcs = np.array(recv_srcs, 'i4')  # from buffer
コード例 #28
0
 def on_page(self, subject):
     actual_subject = self.find_element(By.XPATH,
                                        '//*[@id="iframe"]/div/h1').text
     logger.debug("page_obj subject:%s" % actual_subject +
                  "---config subject:%s" % subject)
     return subject in actual_subject
コード例 #29
0
def main(yolo, url, CreateBoxEncoder, q):
    producer = None
    if KAFKA_ON:
        ip_port = '{}:{}'.format(KAFKA_IP, KAFKA_PORT)
        producer = KafkaProducer(bootstrap_servers=ip_port)
        logger.debug('open kafka')
    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0
    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)
    door = get_door(url)
    #    init   var
    center_mass = {}
    miss_ids = []
    disappear_box = {}
    person_list = []
    in_house = {}
    in_out_door = {"out_door_per": 0, "into_door_per": 0}
    only_id = str(uuid.uuid4())
    logger.debug('rtmp: {} load finish'.format(url))
    last_person_num = 0
    last_monitor_people = 0
    while True:
        t1 = time.time()
        if q.empty():
            continue
        frame = q.get()
        image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
        boxs, scores_ = yolo.detect_image(image)
        t2 = time.time()
        # print('5====={}======{}'.format(os.getpid(), round(t2 - t1, 4)))
        now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
        logger.debug("box_num: {}".format(len(boxs)))
        features = CreateBoxEncoder.encoder(frame, boxs)
        # score to 1.0 here).
        # detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)]
        detections = [Detection(bbox, scores_, feature) for bbox, scores_, feature in zip(boxs, scores_, features)]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
        detections = [detections[i] for i in indices]

        # Call the tracker
        tracker.predict()
        tracker.update(detections)
        # 实时人员ID保存
        track_id_list = []
        high_score_ids = {}
        # if len(tracker.tracks) == 0:
        #     logger.info('sleep {}'.format(SLEEP_TIME))
        #     sleep(SLEEP_TIME)
        #     continue
        for track in tracker.tracks:
            # 当跟踪的目标在未来的20帧未出现,则判断丢失,保存至消失的id中间区
            if track.time_since_update == MAX_AGE:
                miss_id = str(track.track_id)
                miss_ids.append(miss_id)
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            # 如果人id存在,就把人id的矩形框坐标放进center_mass 否则 创建一个key(人id),value(矩形框坐标)放进center_mass
            track_id = str(track.track_id)
            bbox = track.to_tlbr()
            near_door = is_near_door({track_id: bbox}, door)
            if track.score >= 0.92 and not near_door:
                high_score_ids[track_id] = [[int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])]]

            track_id_list.append(track_id)

            if track_id in center_mass:
                center_ = center_mass.get(track_id)
                if len(center_) > 49:
                    center_.pop(0)
                center_.append([int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])])
            else:
                center_mass[track_id] = [[int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])]]
            logger.info('id:{}, score:{}'.format(track_id, track.score))

        for id in miss_ids:
            if id in center_mass.keys():
                disappear_box[id] = center_mass[id]
                del center_mass[id]
        miss_ids.clear()

        # # 进出门判断
        out_or_in(center_mass, door, in_house, disappear_box, in_out_door)
        # 相对精准识别人 用来实时传递当前人数
        box_score_person = [scores for scores in scores_ if scores > 0.72]
        person_sum = in_out_door['into_door_per'] - in_out_door['out_door_per']
        # if person_sum <= len(high_score_ids) and not near_door:
        if person_sum <= len(high_score_ids):
            # 当时精准人数大于进出门之差时 来纠正进门人数 并把出门人数置为0
            if person_sum == len(high_score_ids) == 1:
                pass
                # print('person_sum == len(high_score_ids) == 1')
            else:
                logger.warning('reset in_out_door person')
                in_out_door['out_door_per'] = 0
                in_out_door['into_door_per'] = len(high_score_ids)
                in_house.update(high_score_ids)
                # print('high score:{}'.format(high_score_ids))
                logger.warning(
                    '22222222-id: {} after into of door: {}'.format(in_house.keys(), in_out_door['into_door_per']))
                person_sum = len(high_score_ids)
        if in_out_door['into_door_per'] == in_out_door['out_door_per'] > 0:
            in_out_door['into_door_per'] = in_out_door['out_door_per'] = 0
        if len(person_list) > 100:
            person_list.pop(0)
        person_list.append(person_sum)
        # 从url提取摄像头编号
        pattern = str(url)[7:].split(r"/")
        logger.debug('pattern {}'.format(pattern[VIDEO_CONDE]))
        video_id = pattern[VIDEO_CONDE]
        logger.info('object tracking cost {}'.format(time.time() - t1))
        # 当列表中都是0的时候 重置进出门人数和所有字典参数变量
        if person_list.count(0) == len(person_list) == 101:
            logger.debug('long time person is 0')
            in_out_door['into_door_per'] = 0
            in_out_door['out_door_per'] = 0
            in_house.clear()
            logger.warning('All Clear')
        # 当满足条件时候 往前端模块发送人员的信息
        if (last_person_num != person_sum or last_monitor_people != len(box_score_person)) and producer:
            monitor_people_num = len(box_score_person)
            logger.debug("person-sum:{} monitor-people_num:{}".format(person_sum, monitor_people_num))
            save_to_kafka(TOPIC_SHOW, now, person_sum, url, producer, video_id, monitor_people_num, only_id)
            if last_person_num > 0 and person_sum == 0:
                only_id = str(uuid.uuid4())

            if last_person_num == 0 and person_sum > 0:
                save_to_kafka(TOPIC_NVR, now, person_sum, url, producer, video_id, len(box_score_person), only_id)

            # last_time = int(time.time())
            last_person_num = person_sum
            last_monitor_people = len(box_score_person)
        # 当满足条件时候 往NVR模块发送信息

        logger.info('url:{} into_door_per: {}'.format(url, in_out_door['into_door_per']))
        logger.info('url:{} out_door_per: {}'.format(url, in_out_door['out_door_per']))
        logger.info('url:{} in_house: {}'.format(url, in_house))
        logger.info('url:{} monitor_people_num: {}'.format(url, len(box_score_person)))
        logger.info('url:{} person_sum: {}'.format(url, person_sum))
        logger.info('GPU image load cost {}'.format(time.time() - t1))
        t3 = time.time()
        fps = ((1 / (round(t3 - t1, 4))))
        print('6====={}======{}'.format(os.getpid(), round(round(t3 - t1, 4) * 1000, 2)))
        logger.debug("fps= %f" % (fps))
コード例 #30
0
    def generateTableData(self, path, rows):
        ''' ['调用方法','隶属线程', '线程PID', '基准分支排名', '对比分支排名', '基准分支方法耗时', '对比分支方法耗时',
             '耗时差(对比分支-基准分支)', '耗时上涨比例(%)', '基准分支方法调用次数','对比分支方法调用次数','方法耗时排名变化'] '''
        logger.debug("self.cmp_cost:\n" + str(self.cmp_cost))
        logger.debug("self.base_cost:\n" + str(self.base_cost))
        if self.base_cost != 0:
            ratio = format(
                float(self.cmp_cost - self.base_cost) / float(self.base_cost),
                '.2%')
        else:
            ratio = self.cmp_cost
        data = []
        add_rows = rows
        add_rows[0] = add_rows[0] + "- 系数: " + str(ratio)
        add_flag = 0
        for cmp_obj in self.order_cmp_keys:
            ''' 当cmp_obj有新增方法时 '''
            if cmp_obj not in self.order_base_keys:
                add_flag = 1
                method = cmp_obj
                base_index = "-"
                cmp_index = self.order_cmp_keys.index(cmp_obj)
                base_time = 0
                cmp_time = self.order_cmp_values[cmp_index]
                cmp_call_times = self.cmp_call_times[
                    cmp_obj] if self.cmp_call_times.has_key(cmp_obj) else "-"
                if self.cmp_method_thread.has_key(cmp_obj):
                    cmp_thread = self.cmp_method_thread[cmp_obj]
                    self.cmp_method_thread.pop(cmp_obj)
                else:
                    cmp_thread = "-"
                base_call_times = 0
                diff = cmp_time
                rate = format(float(1), '.2%')
                rank_change = cmp_index
                content = (method, str(cmp_thread), str(base_index),
                           str(cmp_index), str(base_time), str(cmp_time),
                           str(diff), str(rate), str(base_call_times),
                           str(cmp_call_times), str(rank_change))
                data.append(content)
        if add_flag == 1:
            data.insert(0, add_rows)
        rows[0] = rows[0] + "- 系数: " + str(ratio)
        data.append(rows)
        for base_obj in self.order_base_keys:
            method = base_obj
            base_index = self.order_base_keys.index(base_obj)  # 获取base_key的排名
            if base_obj in self.order_cmp_keys:
                cmp_index = self.order_cmp_keys.index(
                    base_obj)  # 当base_obj方法还在cmp_obj方法中
                base_call_times = self.base_call_times[
                    base_obj] if self.base_call_times.has_key(
                        base_obj) else "-"
                cmp_call_times = self.cmp_call_times[
                    base_obj] if self.cmp_call_times.has_key(base_obj) else "-"
            else:
                cmp_index = "-"  # 当base_obj方法在cmp_obj已经删减
                base_call_times = self.base_call_times[
                    base_obj] if self.base_call_times.has_key(
                        base_obj) else "-"
                cmp_call_times = 0
            if self.base_method_thread.has_key(base_obj):
                base_thread = self.base_method_thread[base_obj]
                self.base_method_thread.pop(base_obj)
            else:
                base_thread = "-"

            base_time = self.order_base_values[base_index]
            if cmp_index == "-":
                cmp_time = 0
                rank_change = base_index
            else:
                cmp_time = self.order_cmp_values[cmp_index]
                rank_change = base_index - cmp_index
            diff = cmp_time - base_time
            try:
                rate = format(float(diff) / float(base_time),
                              '.2%')  # -100%:代表base_obj方法在cmp_obj已经删减的比率
            except Exception as e:
                rate = "error"
            content = (method, str(base_thread), str(base_index),
                       str(cmp_index), str(base_time), str(cmp_time),
                       str(diff), str(rate), str(base_call_times),
                       str(cmp_call_times), str(rank_change))
            data.append(content)
        self.generateTable(path, rows, data)
        logger.debug("self.base_cost-self.cmp_cost:\n" +
                     str(self.base_cost - self.cmp_cost))
        logger.debug("self.base_method_thread:\n" +
                     str(self.base_method_thread))
        logger.debug("self.cmp_method_thread:\n" + str(self.cmp_method_thread))