Example #1
0
def get_data(request_meta, hash_table_ref):
    """ Extract data from the global hash given a request object """

    # Traverse the hash key structure to find data
    # @TODO rather than iterate through REQUEST_META_BASE &
    #   REQUEST_META_QUERY_STR look only at existing attributes

    logging.debug(__name__ + "::Attempting to pull data for request {0}".
        format(str(request_meta)))
    for key_name in REQUEST_META_BASE + REQUEST_META_QUERY_STR:
        if hasattr(request_meta, key_name) and getattr(request_meta, key_name):
            key = getattr(request_meta, key_name)
        else:
            continue

        full_key = key_name + HASH_KEY_DELIMETER + key
        if hasattr(hash_table_ref, 'has_key') and hash_table_ref.has_key(
            full_key):
            hash_table_ref = hash_table_ref[full_key]
        else:
            return None

    # Ensure that an interface that does not rely on keyed values is returned
    # all data must be in interfaces resembling lists
    if not hasattr(hash_table_ref, '__iter__'):
        return hash_table_ref
    else:
        return None
Example #2
0
def user_request(user, metric):
    """ View for requesting metrics for a single user """

    user = str(escape(user))
    url = request.url.split(request.url_root)[1]

    # If it is a user name convert to ID
    if search(MW_UNAME_REGEX, user):
        # Extract project from query string
        # @TODO `project` should match what's in REQUEST_META_QUERY_STR
        project = request.args['project'] if 'project' in request.args \
                                                            else 'enwiki'
        logging.debug(__name__ + '::Getting user id from name.')
        conn = dl.Connector(instance='slave')
        conn._cur_.execute('SELECT user_id FROM {0}.user WHERE ' \
                           'user_name = "{1}"'.format(project, user))
        try:
            user_id = str(conn._cur_.fetchone()[0])
            url = sub(user,user_id, url)
        except Exception:
            logging.error(error_codes[3])
            return redirect(url_for('all_cohorts') + '?error=3')

    url = sub('user','cohorts', url)
    return redirect(url)
Example #3
0
def main():
    os.environ["QT_IM_MODULE"] = "qtvirtualkeyboard"

    app = QtWidgets.QApplication(sys.argv)

    # class instance
    chartManager1 = ChartManager1()
    chartManager2 = ChartManager2()
    chartManager3 = ChartManager3()
    alarmManager = AlarmManager()
    # patient = Patient()
    userInput = UserInput()
    modeSelect = ms.ModeSelect()

    # connect signal
    app.aboutToQuit.connect(chartManager1.stop)
    app.aboutToQuit.connect(chartManager2.stop)
    app.aboutToQuit.connect(chartManager3.stop)
    app.aboutToQuit.connect(modeSelect.stop)
    app.aboutToQuit.connect(alarmManager.stop)

    dp = 0

    # start thread
    chartManager1.start()
    modeSelect.start()
    alarmManager.start()

    engine = QtQml.QQmlApplicationEngine()
    ctx = engine.rootContext()

    ctx.setContextProperty("ChartManager1", chartManager1)
    ctx.setContextProperty("ChartManager2", chartManager2)
    ctx.setContextProperty("ChartManager3", chartManager3)

    ctx.setContextProperty("ModeSelect", modeSelect)
    # ctx.setContextProperty("Patient", patient)
    ctx.setContextProperty("UserInput", userInput)
    ctx.setContextProperty("AlarmManager", alarmManager)

    ctx.setContextProperty("dp", dp)
    ctx.setContextProperty("fs", False)

    # if redis exists take the userinput
    if config.useredis:
        params = config.r.get("PARAMS")
        params = json.loads(params)
        ctx.setContextProperty("Params", params)

    if config.args.fullscreen:
        logging.debug("Runnin in full screen")
        ctx.setContextProperty("fs", True)

    # engine.load('main.qml')
    engine.load('./qml/MainQt.qml')
    if not engine.rootObjects():
        sys.exit(-1)

    sys.exit(app.exec_())
Example #4
0
def testlog(num):
    for x in range(num):


        if x%3==0:
            logging.error('错误')
        if x%4==0:
            logging.fatal('致命错误')
        else:
            logging.debug(str(x))

        sleep(3)
Example #5
0
 def _add_book_to_db(self, book: Book) -> Book:
     logging.critical("2")
     db = DataBase()
     session = Session(db.engine)
     try:
         session.add(book)
         session.commit()
         session.refresh(book)
     except SQLAlchemyError:
         logging.debug("Error in _add_book_to_db: {error}".format(
             error=str(SQLAlchemyError)))
         self.set_status(500)
     finally:
         session.close()
     return book
Example #6
0
File: obj.py Project: mirny/hc.py
 def __init__(self, netloc, *cachedirs):
     logging.debug(netloc)
     Loc.www = 'www.' if netloc.startswith('www.') else ''
     self.netloc = Netloc(quote(netloc.encode('utf-8'))\
                          .replace('%3F','?'))
     logging.debug(self.netloc)
     self.cacheloc = self.netloc.converted
     self.title = os.path.split(self.cacheloc)[0] + os.sep + '*.*'
     self.dirscount = 0
     self.filescount = 0
     self.sum_size = 0
     self.rows = []
     self.rows.append(('/' + self.cacheloc.parent.converted,
                       '[..]',
                       '[DIR]',
                       ''))
     self.listcache = self.cacheloc.listdir(*cachedirs)
     for cacheitem in self.listcache.keys():
         if cacheitem.isdir:
             netitem = cacheitem.converted.encode('utf-8')
             a_href = '/' + netitem.replace('?', '%3F')
             a_text = '[%s]' % unquote(
                 netitem.split('/')[-(1 + netitem.endswith('/'))])
             self.rows.append((a_href, a_text, '[DIR]', ''))
             self.dirscount += 1
         else:
             a_href = cacheitem.converted.url
             netfile = a_href.split('?', 1)[1] if '?' in a_href else \
                       a_href.split('/')[-1]
             a_text = unquote(netfile.encode('utf-8')) if netfile else \
                      'INDEX'
             fpath = cacheitem.getabspath(self.listcache[cacheitem][0])
             size = os.path.getsize(fpath)
             self.sum_size += size
             fsize = formatbytes(size, config.formatbytes)
             ftime = datetime.datetime.fromtimestamp(
                 os.path.getmtime(fpath))\
                 .strftime(config.formattime)
             self.rows.append((a_href, a_text, fsize, ftime))
             self.filescount += 1
     self.rows.sort()
     self.sum_fsize = formatbytes(self.sum_size, config.formatbytes)
Example #7
0
def set_data(request_meta, data, hash_table_ref):
    """
        Given request meta-data and a dataset create a key path in the global
        hash to store the data
    """

    key_sig = list()

    # Build the key signature
    for key_name in REQUEST_META_BASE: # These keys must exist
        key = getattr(request_meta, key_name)
        if key:
            key_sig.append(key_name + HASH_KEY_DELIMETER + key)
        else:
            logging.error(__name__ + '::Request must include %s. '
                                     'Cannot set data %s.' % (
                key_name, str(request_meta)))
            return

    for key_name in REQUEST_META_QUERY_STR: # These keys may optionally exist
        if hasattr(request_meta,key_name):
            key = getattr(request_meta, key_name)
            if key: key_sig.append(key_name + HASH_KEY_DELIMETER + key)

    logging.debug(__name__ + "::Adding data to hash @ key signature = {0}".
        format(str(key_sig)))
    # For each key in the key signature add a nested key to the hash
    last_item = key_sig[len(key_sig) - 1]
    for key in key_sig:
        if key != last_item:
            if not (hasattr(hash_table_ref, 'has_key') and
                    hash_table_ref.has_key(key) and
                    hasattr(hash_table_ref[key], 'has_key')):
                hash_table_ref[key] = OrderedDict()

            hash_table_ref = hash_table_ref[key]
        else:
            hash_table_ref[key] = data
 def process(self, schedule_file_name=None):
     if schedule_file_name is None:
         schedule_file_name = self.watch_file
     logging.debug('FileChangeHnadler.process: Processing {}'.format(schedule_file_name))
     schedule = read_schedule(schedule_file_name)
     #Stop current run_schedule
     if self.async_task is not None:
         logging.debug('Stopping previous async_task')
         self.async_task.cancel()
         asyncio.wait_for(self.async_task, 100, loop=self.loop)
         del self.async_task
         self.async_task = None
     #Start new run_schedule
     logging.debug('FileChangeHandler.process: Starting new async_task')
     self.async_task = asyncio.ensure_future(self.controller_function(schedule, self.loop, *self.args), loop=self.loop)
     logging.debug('FileChangeHandler.process: Return from processing')
     return
 def process(self, schedule_file_name=None):
     if schedule_file_name is None:
         schedule_file_name = self.watch_file
     logging.debug('FileChangeHnadler.process: Processing {}'.format(
         schedule_file_name))
     schedule = read_schedule(schedule_file_name)
     #Stop current run_schedule
     if self.async_task is not None:
         logging.debug('Stopping previous async_task')
         self.async_task.cancel()
         asyncio.wait_for(self.async_task, 100, loop=self.loop)
         del self.async_task
         self.async_task = None
     #Start new run_schedule
     logging.debug('FileChangeHandler.process: Starting new async_task')
     self.async_task = asyncio.ensure_future(self.controller_function(
         schedule, self.loop, *self.args),
                                             loop=self.loop)
     logging.debug('FileChangeHandler.process: Return from processing')
     return
    async def run_schedule(self, schedule, loop):
        logging.debug('run_schedule: calling controller.stop_all()')
        self.controller.stop_all() #This is currently unnecessary with the processes cancelling in the processes method. Which is better?
        for i, row in enumerate(schedule):
            logging.info('running {} at {}'.format(row['name'], row['start_time']))
            starting_time =  row['start_time']-config.STARTING_WARNING
            start_time =     row['start_time']
            warning_time =   row['start_time'] + row['talk_length'] - config.TALK_WARNING
            questions_time = row['start_time'] + row['talk_length']
            q_warning_time = row['start_time'] + row['talk_length'] + row['question_length'] - config.QUESTION_WARNING
            end_time =       row['start_time'] + row['talk_length'] + row['question_length']
            
            if seconds_until(starting_time) > 0:
                logging.debug('nothing until {}'.format(start_time))
                self.controller.stop_all()
                self.controller.start([light_controls.stop, self.screen.stop(row['name'], row['title'], start_time)])
                await asyncio.sleep(seconds_until(starting_time), loop=loop)
            if seconds_until(start_time) > 0: #before talk start
                logging.debug('start until {}'.format(start_time))
                self.controller.stop_all()
                self.controller.start([light_controls.starting, self.screen.starting(row['name'], row['title'], start_time)])
                await asyncio.sleep(seconds_until(start_time), loop=loop)
            if seconds_until(warning_time) > 0: #before talk warning
                logging.debug('speaking until {}'.format(warning_time))
                self.controller.stop_all()
                self.controller.start([light_controls.speaking, self.screen.speaking(row['name'], row['title'], questions_time)])
                await asyncio.sleep(seconds_until(warning_time), loop=loop)
            if seconds_until(questions_time) > 0: #before question time
                logging.debug('speaking warning until {}'.format(questions_time))
                self.controller.stop_all()
                self.controller.start([light_controls.speaking_warning, self.screen.speaking_warning(row['name'], row['title'], questions_time)])
                await asyncio.sleep(seconds_until(questions_time), loop=loop)
            if seconds_until(q_warning_time) > 0: #before question warning
                logging.debug('questions until {}'.format(q_warning_time))
                self.controller.stop_all()
                self.controller.start([light_controls.questions, self.screen.questions(row['name'], row['title'], end_time)])
                await asyncio.sleep(seconds_until(q_warning_time), loop=loop)
            if seconds_until(end_time) > 0: #before end of talk
                logging.debug('questions warning until {}'.format(end_time))
                self.controller.stop_all()
                self.controller.start([light_controls.questions_warning, self.screen.questions_warning(row['name'], row['title'], end_time)])
                await asyncio.sleep(seconds_until(end_time), loop=loop)
            logging.debug('end of talk. stopping all coroutines')
            self.controller.stop_all()

        logging.debug('empty schedule. Disco!')
        self.controller.stop_all()
        self.controller.start([light_controls.empty_schedule, self.screen.empty_schedule()])
        await asyncio.sleep(60*60*6, loop=loop)
        self.controller.stop_all()
Example #11
0
def get_num_block_statistics():
    index_uuid = dict()
    index_count = 0
    for program in config.PROGRAMS:
        dirs = os.listdir(os.path.join(config.FEA_DIR, program, \
                            config.CFG_DFG_GEMINIFEA_VULSEEKERFEA))

        logging.debug('original dirs:{}\n{}'.format(dirs, len(dirs)))

        dirs = [d for d in dirs if filter_by_arch_opt_levels(d)]

        logging.debug('PROGRAMS: {}, ARCHS: {}, OPT_LEVELS: {}'.format( \
                        config.PROGRAMS, config.ARCHS, config.OPT_LEVELS))
        logging.debug('filtered dirs:{}\n{}'.format(dirs, len(dirs)))
        for d in dirs:
            index_uuid.setdefault(str(index_count), os.path.join(program, d))
            index_count += 1

        logging.debug('index_uuid: {}'.format(index_uuid))
        logging.debug('index_count: {}'.format(index_count))

    func_list_arr = []
    func_list_dict = defaultdict(list)
    block_num_dict = defaultdict(int)

    for k, v in index_uuid.items():
        program, v = v.split(os.sep)
        cur_functions_list_file = os.path.join(config.FEA_DIR, program, \
                    config.CFG_DFG_GEMINIFEA_VULSEEKERFEA, v, 'functions_list.csv')
        if not os.path.exists(cur_functions_list_file):
            logging.error('No functions_list.csv in {}'.format(v))
        with open(cur_functions_list_file, 'r') as fp:
            logging.debug('Gen dataset: {}'.format(cur_functions_list_file))
            for line in csv.reader(fp):
                if line[0] == '':
                    continue
                block_num_dict[int(line[1])] += 1
                if block_num_max > 0:
                    if not (int(line[1]) >= block_num_min and \
                                int(line[1]) <= block_num_max):
                        continue
                if line[0] not in func_list_dict:
                    func_list_arr.append(line[0])
                value = os.path.join(line[4], \
                        config.CFG_DFG_GEMINIFEA_VULSEEKERFEA, line[5], line[0])
                func_list_dict[line[0]].append(value)
    return block_num_dict, len(func_list_arr)
Example #12
0
#                     # datefmt='%a, %d %b %Y %H:%M:%S',
#                     datefmt='%H:%M:%S',
#                     filename='myapp1.log',
#                     filemode='w')

from config import logging
from logging_t2 import testlog

from multiprocessing import Process

if __name__ == '__main__':
    p = Process(target=testlog, args=(10, ))
    print('开始 Process')
    p.start()

    for x in range(20):
        print('for loop:', x)

        if x % 3 == 0:
            logging.error('错误')
        if x % 4 == 0:
            logging.fatal('致命错误')
        else:
            logging.debug('This is debug message')
            logging.info('This is info message')
            logging.warning('This is warning message')

        sleep(2)

    p.join()
    print('结束 Process')