Example #1
0
    def handle_blocked(self, test, result):
        if result['custom_launchpad_bug']:
            return False
        m = re.search(r'Blocked by "(\S+)" test.', result['comment'])
        if m:
            blocked_test_group = m.group(1)
        else:
            logger.debug('Blocked result #{0} for test {1} does '
                         'not have upstream test name in its '
                         'comments!'.format(result['id'],
                                            test['custom_test_group']))
            return False

        if not result['version']:
            logger.debug('Blocked result #{0} for test {1} does '
                         'not have version, can\'t find upstream '
                         'test case!'.format(result['id'],
                                             test['custom_test_group']))
            return False

        bug_link = None
        blocked_test = self.get_test_by_group(blocked_test_group,
                                              result['version'])
        if not blocked_test:
            return False
        logger.debug('Test {0} was blocked by failed test {1}'.format(
            test['custom_test_group'], blocked_test_group))

        blocked_results = self.project.get_results_for_test(
            blocked_test['id'])

        # Since we manually add results to failed tests with statuses
        # ProdFailed, TestFailed, etc. and attach bugs links to them,
        # we could skip original version copying. So look for test
        # results with target version, but allow to copy links to bugs
        # from other results of the same test (newer are checked first)
        if not any(br['version'] == result['version'] and
                   br['status_id'] in self.failed_statuses
                   for br in blocked_results):
            logger.debug('Did not find result for test {0} with version '
                         '{1}!'.format(blocked_test_group, result['version']))
            return False

        for blocked_result in sorted(blocked_results,
                                     key=lambda x: x['id'],
                                     reverse=True):
            if blocked_result['status_id'] not in self.failed_statuses:
                continue

            if blocked_result['custom_launchpad_bug']:
                bug_link = blocked_result['custom_launchpad_bug']
                break

        if bug_link is not None:
            result['custom_launchpad_bug'] = bug_link
            self.project.add_raw_results_for_test(test['id'], result)
            logger.info('Added bug {0} to blocked result of {1} test.'.format(
                bug_link, test['custom_test_group']))
            return bug_link
        return False
Example #2
0
    def discover_functions(self):
	self.self_cursor.execute("SELECT pro_oid,func_name,id FROM function_name WHERE {0}={1} AND alive".format(self.sub_fk,self.id))
	local_funcs=self.self_cursor.fetchall()
	try:
	    self.prod_cursor.execute("""SELECT p.oid AS pro_oid,p.proname AS funcname,p.proretset,t.typname,l.lanname
FROM pg_proc p
LEFT JOIN pg_namespace n ON n.oid = p.pronamespace
JOIN pg_type t ON p.prorettype=t.oid
JOIN pg_language l ON p.prolang=l.oid
WHERE (p.prolang <> (12)::oid)
AND n.oid=(SELECT oid FROM pg_namespace WHERE nspname='public')""")
	except Exception as e:
	    logger.error("Cannot execute function discovery query: {0}".format(e.pgerror))
	    return
	prod_funcs=self.prod_cursor.fetchall()
	for l_func in local_funcs:
	    for p_func in prod_funcs:
		if l_func[0]==p_func[0] and l_func[1]==p_func[1]:
		    break
	    else:
		logger.info("Retired function {0} in schema {1}".format(l_func[1],self.db_fields['sch_name']))
		old_func=FunctionName(l_func[2])
#		old_func.populate()
		old_func.retire()
	for p_func in  prod_funcs:
	    for l_func in local_funcs:
		if p_func[0]==l_func[0] and p_func[1]==l_func[1]:
		    break
	    else:
		logger.info("Created new function: {0} in schema {1}".format(p_func[1],self.db_fields['sch_name']))
		new_func=FunctionName()
		new_func.set_fields(sn_id=self.id,pro_oid=p_func[0],func_name=p_func[1],proretset=p_func[2],prorettype=p_func[3],prolang=p_func[4])
		new_func.create()
		new_func.truncate()
Example #3
0
    def post(self, aid):

        # 图片上传
        if self.request.files:
            files_body = self.request.files['file']
            file_ = files_body[0]
            # 文件扩展名处理
            file_extension = parse_file_extension(file_)

            # 新建上传目录
            base_dir = config.UPLOADS_DIR['csv_dir']
            if not os.path.exists(base_dir):
                os.makedirs(base_dir)
            logger.info('new dir ---------- {}'.format(base_dir))

            new_file_name = '{}{}'.format(time.time(), file_extension)
            new_file = os.path.join(base_dir, new_file_name)

            # 备份以前上传的文件
            if os.path.isfile(new_file):
                bak_file_name = '{}bak{}'.format(time.time(), file_extension)
                bak_file = os.path.join(base_dir, bak_file_name)
                os.rename(new_file, bak_file)

            # 写入文件
            with open(new_file, 'w') as w:
                w.write(file_['body'])

            key = 'aid:{}'.format(aid)
            with open(new_file, 'r') as f:
                for line in csv.reader(f):
                    row =  rdb.lpush(key, line[0])
                    logger.info('redis lpush key-- {} resp-- {}'.format(key, row))
        self.redirect('/admin/awards')
Example #4
0
    def discover_indexes(self):
	self.self_cursor.execute("SELECT obj_oid,idx_name,id FROM index_name WHERE tn_id={0} AND alive".format(self.id))
	local_idxs=self.self_cursor.fetchall()
	try:
	    self.prod_cursor.execute("""SELECT i.indexrelid,c.relname,i.indisunique,i.indisprimary
FROM pg_index i
JOIN pg_class c ON i.indexrelid=c.oid
WHERE i.indrelid={0}""".format(self.db_fields['obj_oid']))
	except Exception as e:
	    logger.error("Cannot execute index discovery query: {0}".format(e.pgerror))
	    return
	prod_idxs=self.prod_cursor.fetchall()
	for l_idx in local_idxs:
	    for p_idx in prod_idxs:
		if l_idx[0]==p_idx[0] and l_idx[1]==p_idx[1]:
		    break
	    else:
		logger.info("Retired index {0} in table {1}".format(l_idx[1],self.db_fields['tbl_name']))
		old_idx=IndexName(l_idx[2])
		old_idx.retire()
	for p_idx in prod_idxs:
	    for l_idx in local_idxs:
		if l_idx[0]==p_idx[0] and l_idx[1]==p_idx[1]:
		    break
	    else:
		logger.info("Create new index {0} in table {1}".format(p_idx[1],self.db_fields['tbl_name']))
		new_index=IndexName()
		new_index.set_fields(tn_id=self.id,obj_oid=p_idx[0],idx_name=p_idx[1],is_unique=p_idx[2],is_primary=p_idx[3])
		new_index.create()
		new_index.truncate()
Example #5
0
 def set_ds_info():
     """ Setting information about available space for a given disk."""
     msg = "[Disk Space Available on %s] : %d percent free space - it is %s." % (
         self.__disk_name, 
         self.__percent_available,
         h.get_human_val_of_disk_space( self.__available) )
     logger.info(msg)
Example #6
0
    def message(self, msg):
        if msg['type'] not in ('chat', 'normal'):
            logger.debug('Strange message type: %(type)s' % msg)
            return
        #logger.info('Message from %(from)s: %(body)s' % msg)
        
        msg_text = msg['body'].strip()
        # msg['from'] is a JID object
        # http://sleekxmpp.com/api/xmlstream/jid.html
        from_user = msg['from'].bare 
        logger.info('FROM:' + from_user)
        logger.info('MSG:' + msg_text)

        try:
            if (from_user in settings.accept_command_from) and msg_text.startswith("$"):
                resp = commands.execute(msg_text[1:])
                msg.reply('\n'+resp).send()
            else:
                msg.reply(msg_text).send()
                #self.send_message(  mto=msg['from'],
                #                    mtype='chat',
                #                    mbody=msg_text,
                #                    mhtml='''<a href="http://www.google.co.jp">%s</a>'''% (msg_text))
        except:
            exc = traceback.format_exc()
            msg.reply(exc).send()
Example #7
0
 def get_current_user(self):
     super(BaseRequestHandler, self).get_current_user()
     user = self.session.get('user')
     logger.info('{}: Login user {}'.format(self.__class__.__name__, user))
     if not user:
         return None
     return user
Example #8
0
	def connect(self):
		while not self.conn:
			self.conn = self.__connect()
			if not self.conn:
				interval = random.randint(0, s.get('reconnect_interval', MAX_INTERVAL))
				logger.info('connection will be established in %ss' % interval)
				time.sleep(interval)
Example #9
0
	def read(self, filename, quiet=False):
		self.filename = filename
		if not quiet:
			try:
				logger.info('processing file: %s' % filename.decode(settings.DECODING).encode(settings.ENCODING))
			except UnicodeDecodeError, e:
				logger.info('processing file ...')
Example #10
0
    def write_tree(self, dest_path):
        ''' Creates directory tree from root node.
        write_tree(dest_path)
        Will delete tree if exists.
        '''

        self.delete_if_dir_exists(dest_path)
        dest_path, name = os.path.split(dest_path)
        self.root.name = name

        def make(dest_path, node):
            dest_path = os.path.join(dest_path, node.name)
            if node.type == PATH_TYPES.FOLDER:
                try:
                    os.makedirs(dest_path)
                except:
                    raise
            elif node.type == PATH_TYPES.FILE:
                with open(dest_path, 'a') as f:
                    pass

            for child in node.children:
                make(dest_path, child)
        make(dest_path, self.root)
        logger.info('tree.write_tree() completed [{}]'.format(dest_path))
Example #11
0
	def exec_dcfg(self):

		# first exec support script to install system tables.
		try:
			logger.info("============MYSQL DEFAULT CONFIG===========")
			logger.info("install system db.")
			os.chdir(mysql_home)
			exec_command('./scripts/mysql_install_db --user=mysql')

			logger.info("copy boot script to correct directory.")
			exec_command('cp ' + mysql_boot_script + ' /etc/init.d/')

			# sed config
			exec_command('sed -i -e "46s/basedir=/basedir=\/opt\/magima\/mysql/g" /etc/init.d/mysql.server')

			exec_command('sed -i -e "47s/datadir=/datadir=\/opt\/magima\/mysql\/data/g" /etc/init.d/mysql.server')

			exec_command("/etc/init.d/mysql.server start")
			exec_command("/etc/init.d/mysql.server status")
			exec_command("/etc/init.d/mysql.server stop")
			logger.info("==============TOMCAT DEFAULT CONFIG==============")
			logger.info("copy tomcat bootscript to /etc/init.d/")
			exec_command("cp " + tomcat_bootstrap + " /etc/init.d/tomcat6")
			exec_command("sudo /etc/init.d/tomcat6 start")
			exec_command("sudo /etc/init.d/tomcat6 status")
			exec_command("sudo /etc/init.d/tomcat6 stop")

		except OSError , oserr:
			logger.error("os error: %s " % str(oserr))
			sys.exit(1)
Example #12
0
    def discover_tables(self):
	self.self_cursor.execute("SELECT obj_oid,tbl_name,id FROM table_name WHERE {0}={1} AND alive".format(self.sub_fk,self.id))
	local_tbls=self.self_cursor.fetchall()
	try:
	    self.prod_cursor.execute("""SELECT r.oid,r.relname,
CASE WHEN h.inhrelid IS NULL THEN 'f'::boolean ELSE 't'::boolean END AS has_parent
FROM pg_class r
LEFT JOIN pg_inherits h ON r.oid=h.inhrelid
WHERE r.relkind='r'
AND r.relnamespace=(SELECT oid FROM pg_namespace WHERE nspname='public')""")
	except Exception as e:
	    logger.error("Cannot execute tables discovery query: {0}".format(e.pgerror))
	    return
	prod_tbls=self.prod_cursor.fetchall()
	for l_table in local_tbls:
	    for p_table in prod_tbls:
		if l_table[0]==p_table[0] and l_table[1]==p_table[1]:
		    break
	    else:
		logger.info("Retired table {0} in schema {1}".format(l_table[1],self.db_fields['sch_name']))
		old_table=TableName(l_table[2])
#		old_table.populate()
		old_table.retire()
	for p_table in  prod_tbls:
	    for l_table in local_tbls:
		if p_table[0]==l_table[0] and p_table[1]==l_table[1]:
		    break
	    else:
		logger.info("Created new table: {0} in schema {1}".format(p_table[1],self.db_fields['sch_name']))
		new_table=TableName()
		new_table.set_fields(sn_id=self.id,tbl_name=p_table[1],obj_oid=p_table[0],has_parent=p_table[2])
		new_table.create()
		new_table.truncate()
Example #13
0
def r_file(filename, mode=None, content="", action=""):

	is_exists_file = os.path.exists(filename)

	if action == "create":

		if is_exists_file:

			try:
				os.remove(filename)
				os.mknod(filename)
				logger.info("Create File Ok.")

				with open(filename , 'w+') as f:
					f.write(content)

			except OSError, e:
				logger.error("filename: %s " % (filename) + str(e) )
				sys.exit(1)
		else:

			try:
				os.mknod(filename)
				logger.info("Create File Ok.")

				with open(filename , 'w+') as f:
					f.write(content)

			except OSError, e:
				logger.error("filename: %s" % (filename) + str(e))
				sys.exit(1)
Example #14
0
    def wrapper(self, *args, **kwargs):
        sign = self.get_argument('sign', None)
        if not sign:
            raise ValueError(404)
        appid = self.get_argument('appid', None)
        if not appid:
            raise ValueError(405)

        model_base = BaseModel()
        arguments = sorted(self.request.arguments.iteritems(), key=lambda x: x[0])
        result_string = ''.join([k + v[0] for k, v in arguments if k != 'sign'])
        appsecret = model_base.getAppSercet(appid)
        if not appsecret:
            raise ValueError(405)

        def default(*args):
            raise ValueError(403)

        def md5Method(result_string, appsecret):
            return hashlib.md5(appsecret + result_string + appsecret).hexdigest()

        switch = {
            'md5': md5Method,
        }

        mysign = switch.get(self.get_argument('sign_method', None), default)(result_string, appsecret)
        logger.info("sign:%s" % mysign)
        if mysign != sign:
            raise ValueError(402)
        return func(*args, **kwargs)
Example #15
0
    def get_check_create_test_run(self, plan, cases):
        plan = self.project.plans.get(plan.id)
        suite_cases = self.suite.cases()
        run_name = self.get_run_name()
        runs = plan.runs.find_all(name=run_name)
        run = self.check_need_create_run(plan,
                                         runs,
                                         suite_cases)

        if run is None:
            logger.info('Run not found in plan "{}", create: "{}"'.format(
                plan.name, run_name))

            # Create new test run with cases from test suite
            suite_cases = self.get_suite_cases()

            if not suite_cases:
                logger.error('Empty test cases set.')
                return None

            # suite_cases = self.suite.cases.find(type_id=type_ids[0])
            run = Run(name=run_name,
                      description=self.run_description,
                      suite_id=self.suite.id,
                      milestone_id=self.milestone.id,
                      config_ids=[],
                      case_ids=[x.id for x in suite_cases]
                      )
            plan.add_run(run)
            logger.debug('Run created "{}"'.format(run_name))
        return run
Example #16
0
    def get(self):
        uid = self.current_user
        type_ = self.get_argument('type', None)
        if not type_:
            self.set_status(400)
            result = dict(code=40011, msg=u'缺少type参数')
            return self.jsonify(result)

        keep_info = self.keep_map(type_)

        key = "uid:{}:keep:{}".format(uid, type_)
        times = rdb.incr(key)
        if times == 1:
            rdb.expire(key, get_to_tomorrow())
        else:
            logger.warning('have try times {}'.format(times))
            result = dict(code=40010, msg=u'每天只能{}一次哦!'.format(keep_info['name']))
            return self.jsonify(result)

        try:
            row = Pet.keep(uid=uid, score=keep_info['score'])
            logger.info('keep pet {}'.format(row))
        except Exception, e:
            self.set_status(500)
            logger.error('keep pet error {}'.format(e))
            result = dict(code=40012, msg=u'更新服务器错误, 请稍后重试!')
            return self.jsonify(result)
Example #17
0
	def send(self, from_addr, to_addrs, subj, content):
		msg = MIMEText(content)

		msg['Subject'] = subj
		msg['From'] = from_addr
		msg['To'] = to_addrs
		logger.info("sending messages to {0}".format(to_addrs))
		self.smtp.sendmail(from_addr, to_addrs, msg.as_string())
Example #18
0
 def connect(self):
     while not self.is_connected:
         self.smb_conn = self.__connect()
         if not self.smb_conn:
             interval = random.randint(0, ss.get("reconnect_interval", MAX_INTERVAL))
             logger.info("connection will be established in %ss" % interval)
             time.sleep(interval)
     return self.smb_conn
Example #19
0
def is_exists_rpmpkg(pkg_name):

	if pkg_name:
		command_line_str = "rpm -qa " + pkg_name
		if not os.popen(command_line_str).read():
			logger.info("%s is not exists!" % (pkg_name))
			return False
		return True
Example #20
0
	def __init__(self, smtphost=None, smtpport=None, smtpuser=None, smtppass=None):
		super(BaseMailSender, self).__init__()
		self.smtphost = smtphost
		self.smtpport = smtpport
		self.smtpuser = smtpuser
		self.smtppass = smtppass

		self.smtp = smtplib.SMTP(self.smtphost, self.smtpport)
		logger.info("connected to smtp server {0}".format(self.smtphost))
Example #21
0
 def export(self):
     '''
     '''
     logger.info("Writing output to: %s."%self.filename)
     with open(self.filename, 'w') as f:
         for k,v in self._compile_data_in_a_single_dic().items():
             f.write('%s=%s\n'%(k,v))
         f.write('\n')
         f.write(self.data)
Example #22
0
    def __init__(self):
        super(Application, self).__init__(
            handlers=router.Route.get_routes(),
             **config.SETTINGS
        )
        logger.info('init the db conn {}'.format(id(db)))
        tornado.ioloop.PeriodicCallback(self.ping_db, 60 * 1000).start()

        self.session_manager = session_manager
Example #23
0
 def parse(self):
     logger.info("Parsing: %s"%self._filename)
     tree = ET.parse(self._filename)
     root = tree.getroot()
     xpath = "Header/Instrument"
     elems = root.findall(xpath)
     for elem in elems:
         logger.info("Elem: %s = %s"%(xpath,elem.text))
         setattr(Data, xpath, elem.text)
Example #24
0
def make_bug_statistics(tests_results, operation_systems):
    bugs = {}
    for os in operation_systems:
        for result in tests_results[os['distro']]:
            bug = result.launchpad_bug
            if not bug:
                continue    # Bug is not linked to the test case result.
            distro = os['distro']
            if bug not in bugs:
                bugs[bug] = {}
                bugs[bug]['distro'] = {}
                bugs[bug]['count'] = 0
                bugs[bug]['status'] = result.launchpad_bug_status
                bugs[bug]['importance'] = result.launchpad_bug_importance
                bugs[bug]['title'] = result.launchpad_bug_title
            if distro not in bugs[bug]['distro']:
                bugs[bug]['distro'][distro] = {}
            bugs[bug]['count'] += 1
            bugs[bug]['distro'][distro][result.url] = {}
            bugs[bug]['distro'][distro][result.url]['status'] = result.status
            bugs[bug]['distro'][distro][result.url]['group'] = result.group

    bugs_sorted = sorted(bugs.keys(), key=lambda x: bugs[x]['count'],
                         reverse=True)

    if bugs_sorted:
        bugs_link_file = os_path.join(LOGS_DIR, 'bugs_link_stat.html')
        with open(bugs_link_file, 'w') as fout:
            fout.write("<b>Summary of bugs in TestRail at {0}</b><br>"
                       .format(time.strftime("%c")))
            for bug in bugs_sorted:
                jresults = ""
                for distro in bugs[bug]['distro'].keys():
                    jresults += " {0}: ".format(distro)
                    num = 1
                    bugs_distro = bugs[bug]['distro'][distro]
                    for res in bugs_distro:
                        jresults += (
                            '<a href={res} title="{hint}">{num}</a> '
                            .format(res=res,
                                    hint=bugs_distro[res]['group'],
                                    num=num))
                        num += 1
                line = ('[{affected} test case(s)] [{importance}] [{status}] '
                        '<a href="{link}">{title}</a> [{jresults}]<br>\n'
                        .format(affected=bugs[bug]['count'],
                                importance=bugs[bug]['importance'],
                                status=bugs[bug]['status'],
                                link=bug,
                                title=bugs[bug]['title'],
                                jresults=jresults))
                fout.write(line)

        logger.info("Bug statistics saved to: {0}".format(bugs_link_file))
    else:
        logger.info("No linked to test cases bugs found")
Example #25
0
	def set_up(cls, *args, **kwargs):
		key_path = kwargs.get('key_path', ds['key_path'])
		try:
			with open(key_path, 'r') as f:
				cls.key = f.read()
				logger.info("the key to be used for decrypting is located at %s" % key_path)
				return cls.key
		except IOError, e:
			logger.error("unable to access the key specified to %s" % key_path)		
			raise e
Example #26
0
    def run(self):
        self.set_alarm_mode();
        logger.info( "Daemon is going to run." )
        while True:
            for _disk in const.DISKS:
                self.calculations_disk_manager(_disk)
                self.alarm_manager()

            logger.info( "Daemon is going to sleep for a %d sec\n\n" % const.DAEMON_SLEEP)
            time.sleep(const.DAEMON_SLEEP)
Example #27
0
def runLadder(userid, player):
    logger.info('running the ladder for %s/%s' % (userid, player))
    challenger = (userid, player)
    
    if Result.objects.count() == 0:  # hack to handle empty ladder
        runMatch(challenger, challenger)
        return
    
    ladder = buildLadder()
    runGauntlet(challenger, ladder)
Example #28
0
def create_project(category):
	rm = RecordManager()
	project_id, last_source = rm.last_record_id+1, rm.last_source
	logger.info("creating a project for category: " + category.encode('utf-8'))
	try:
		file_index = re.match(settings.FILE_INDEX_PATTERN, last_source, re.UNICODE).group(1)
		src_file = settings.SOURCE_FILE_TEMPLATE.format(index=int(file_index)+1)
	except AttributeError, e:
		logger.error("source file saved in the database is not legal")
		raise e
Example #29
0
 def generate(self):
     for test_run in self.test_runs_stats:
         test_run_stats = test_run.bugs_statistics
         self.bugs_statistics[test_run['id']] = dict()
         for bug, tests in test_run_stats.items():
             if bug in self.bugs_statistics[test_run['id']]:
                 self.bugs_statistics[test_run['id']][bug].update(tests)
             else:
                 self.bugs_statistics[test_run['id']][bug] = tests
         logger.info('Found {0} linked bug(s)'.format(
             len(self.bugs_statistics[test_run['id']])))
Example #30
0
        def serve_forever():
            logger.info('starting server')
            try:
                server.start_accepting()

                try:
                    server._stopped_event.wait()
                except:
                    raise
            except KeyboardInterrupt:
                pass                
Example #31
0
    def load_target_table_name(self):
        try:
            self.target_orm_name = self.conn_mysql.execute('select * from t_extract_config where l_task_id = {}'.format(self.get_param('taskid')))[0][2]
            self.source_orm_name = self.conn_mysql.execute('select * from t_extract_config where l_task_id = {}'.format(self.get_param('taskid')))[0][1]
            self.target_table_name = self.conn_mysql.execute('select * from t_extract_config where l_task_id = {}'.format(self.get_param('taskid')))[0][4]
            self.source_table_name = self.conn_mysql.execute('select * from t_extract_config where l_task_id = {}'.format(self.get_param('taskid')))[0][3]
            self.l_auto_model = self.conn_mysql.execute(
                'select * from t_extract_config where l_task_id = {}'.format(self.get_param('taskid')))[0][10]

            self.conn_mysql.close()

        except:
            logger.info( '{}加载常量有错误'.format(self.get_param('taskid')))
Example #32
0
 def __connect(self):
     conn_cnt = 0
     logger.info('trying to connect to sqlserver on %s:%s' %
                 (s.get('host'), s.get('port')))
     while conn_cnt < s.get('reconnect_cnt', 3):
         try:
             conn = pymssql.connect(host=s.get('host'), port=s.get('port'), user=s.get('user'),\
              password=s.get('password'), database=s.get('database'), charset=s.get('charset'))
             return conn
         except Exception, e:  # add a specified exception
             conn_cnt += 1
             logger.debug('connecting failed, times to reconnect: %d' %
                          conn_cnt)
Example #33
0
def to_cross_star_threshold(dataset):
    if isinstance(dataset, pd.DataFrame) and not dataset.empty:
        try:
            logger.info("品类%s 日期%s阀值写入数据到表cross_star_threshold",
                        dataset['varieties'][0], dataset['date'].tolist())
            dataset.to_sql("cross_star_threshold",
                           conn,
                           if_exists='append',
                           index=False)
        except:
            logger.error("表cross_star_threshold内容已存在,写入失败")
        else:
            logger.info("表cross_star_threshold数据写入成功")
Example #34
0
def job_runner(job_id, input_notebook, output_notebook,
               output_dir, parameters, **papermill_args):
    """
    Task to execute notebooks.

    Parameters
    ----------
    job_id: str, uuid4
        the job id
    input_notebook: str
        location of input notebook to run
    output_notebook: str
        location to put output_notebook
    parameters: dict
        notebook parameters
    papermill_args: **kwargs
        extra parameters to pass too papermill execution
    """
    log_context = dict(
        parameters=parameters, input_notebook=input_notebook,
        output_notebook=output_notebook, output_dir=output_dir,
        papermill_args=papermill_args
    )

    job_status = jobs.JobStatus.RUNNING

    # Execute Notebook
    try:
        logger.info('notebooks.executing.started', extra=log_context)

        papermill.execute_notebook(
            input_notebook,
            output_notebook,
            parameters=parameters,
            **papermill_args
        )

        job_status = jobs.JobStatus.SUCCESS
        log_context.update(dict(job_status=job_status))
        logger.info('notebooks.executing.finished', extra=log_context)

    except PapermillExecutionError as e:
        job_status = jobs.JobStatus.FAILED
        log_context.update(dict(job_status=job_status))
        logger.exception('notebooks.executing.error', extra=log_context)
        raise e

    return {
        "job_status": job_status,
        "output_notebook": output_notebook
    }
Example #35
0
 def cre_sql(self):
     df1 = pd.read_sql(
         'select * from hs_trade.{} where rownum < 5'.format(
             self.source_name), engine_source)
     column_list = df1.columns.values.tolist()
     columns = ''
     for i in column_list:
         columns += i
         columns += ','
     columns = columns.rstrip(',')
     sql = "self.config.conn_source.execute('select {0} from hs_trade.{1}')".format(
         columns, self.source_name)
     self.sql = sql
     logger.info("生成的sql为{}".format(sql))
Example #36
0
    def validate_continua(self, intervals):
        last_xmax = 0
        global_xmin = 0  # should start from 0
        global_xmax = intervals[-1][settings.INTERVAL_KEY_XMAX]
        for i, interval in enumerate(intervals, start=0):
            lineno = interval[settings.INTERVAL_KEY_LINENO]
            if interval[settings.INTERVAL_KEY_XMIN] > global_xmax:
                logger.info("value xmin %f at line %d is not in the range" %
                            (interval[settings.INTERVAL_KEY_XMIN], lineno))
                intervals[i][settings.INTERVAL_KEY_XMIN] = last_xmax

            if interval[settings.INTERVAL_KEY_XMAX] > global_xmax:
                logger.info("value xmax %f at line %d is not in the range" %
                            (interval[settings.INTERVAL_KEY_XMAX], lineno))
                try:
                    next_xmin = intervals[i + 1][settings.INTERVAL_KEY_XMIN]
                    if next_xmin > global_xmax:  # however if the next one was over the global xmax again...
                        raise IndexError
                except IndexError, e:
                    next_xmin = last_xmax
                intervals[i][settings.INTERVAL_KEY_XMAX] = next_xmin

            if last_xmax != interval[settings.INTERVAL_KEY_XMIN]:  # broken
                logger.info("time line is broken at line %d" % lineno)
                intervals[i][settings.INTERVAL_KEY_XMIN] = last_xmax

            if interval[settings.INTERVAL_KEY_XMIN] > interval[
                    settings.INTERVAL_KEY_XMAX]:  # overlapped
                logger.info(
                    "value xmin is bigger than the value of xmax at line %d" %
                    lineno)
                intervals[i][settings.INTERVAL_KEY_XMAX] = interval[
                    settings.INTERVAL_KEY_XMIN]

            last_xmax = interval[settings.INTERVAL_KEY_XMAX]
Example #37
0
    def write_new_student(self, telegram: str, email: str):
        conn = self.conn_to_db()
        cursor = conn.cursor()
        if cursor:
            # email add validation
            insert_student_command = "INSERT INTO students (telegram, email) VALUES (%s, %s)"
            try:

                logger.info(
                    f"Trying execute command {insert_student_command} ")
                cursor.execute(insert_student_command, (telegram, email))
                conn.commit()
                logger.info(
                    f"Command {insert_student_command} was executed succesfully"
                )
                cursor.execute(insert_student_command, (telegram, email))
                cursor.close()
                logger.info("Cursor closed ")
                conn.close()
                logger.info("Connection closed")
                return True
            except Exception as e:
                logger.error(e)
                raise Exception(e)
        else:
            logger.error("Something wrong with Postgres - cursor is None")
            raise Exception("Something wrong with Postgres - cursor is None")
Example #38
0
 def check_hooked_tensor_sequence(self, name):
     if self._seq_layernames is None:
         return
     ntensors = len(self._seq_layernames)
     idx = self._seq_layernames.index(name)
     if idx == ntensors - self._hook_checked_idx - 1:
         self._hook_checked_idx += 1
         if idx == 0:
             self._hook_checked_idx = 0
     else:
         logger.info(
             'Hook checked error, name: %s should be in the index of %d, which it runs at %d',
             name, self._hook_checked_idx, idx)
         raise
Example #39
0
async def distributor(request):
    response = '{}'
    pool = request.app['postgres']
    phone = request.match_info['phone'][-10:]
    lock = int(request.match_info['lock']) != 0
    if len(phone) in settings.PHONE_LENGTH:
        distributor, phone = await database.select_distributor(phone, pool)
        logger.info(f'{phone} {distributor}')
        if lock:
            await database.distributors[distributor].acquire()
        response = {'phone': phone, 'distributor': distributor, 'locked': lock}
    return web.Response(status=200,
                        text=json.dumps(response),
                        content_type='application/json')
Example #40
0
def find_element(content):
    elements = content.xpath(patterns.XPATH_LEVEL_1)
    for element in elements:
        if element.xpath(patterns.XPATH_LEVEL_2):
            continue
        element_text = "".join(
            [text.strip() for text in element.xpath("text()")])
        if element_text:
            element_path = element.getroottree().getpath(element)
            element_id = element.xpath("@id")
            element_id = element_id[0] if element_id else "Not Found"
            logger.info(
                'Element text: "{}". Element path: "{}". Element #id: "{}"\n'.
                format(element_text, element_path, element_id))
Example #41
0
def alarm_set():
    """Handling for the alarm-set form.

    """
    logger.info("ALARM FORM: {}".format(request.form))
    alarm_time = request.form.get("time")
    if not alarm_time:
        logger.warning("Time not received")
        return redirect('/')

    hour, minute = alarm_time.split(":")
    DB.set_alarm_time(int(hour), int(minute))

    return redirect('/')
Example #42
0
    def _dec(*args, **kwargs):
        blogger = kwargs.get('blogger')
        pb = kwargs.get('posterboard')

        if blogger is None:
            logger.info("Attempt to access PB without blogger o.O")
            return HttpResponseForbidden('Please specify a blogger first.')

        # Find the PB that corresponds to PB
        if pb is not None:
            pb = blogger.posterboard_set.get(title=pb)

        kwargs['posterboard'] = pb
        return func(*args, **kwargs)
Example #43
0
def get_existing_people():
    logger.info("Getting existing profiles.")
    q = rq_prefixes + """
    select ?p
    where
    {
        ?p a foaf:Person
    }
    """
    vstore = backend.get_store()
    out = []
    for row in vstore.query(q):
        out.append(row.p)
    return out
Example #44
0
def job_runner(job_id, input_notebook, output_notebook,
               output_dir, parameters, **papermill_args):
    """
    Task to execute notebooks. This task changes the working directory to
    {output_dir} and back to origin after function exists.
    Hence the @preserve_cwd

    Parameters
    ----------
    job_id: str, uuid4
        the job id
    input_notebook: str
        location of input notebook to run
    output_notebook: str
        location to put output_notebook
    parameters: dict
        notebook parameters
    papermill_args: **kwargs
        extra parameters to pass too papermill execution
    """
    log_context = dict(
        parameters=parameters, input_notebook=input_notebook,
        output_notebook=output_notebook, output_dir=output_dir,
        papermill_args=papermill_args
    )

    job_status = jobs.JobStatus.RUNNING

    try:
        logger.info('notebooks.executing.started', extra=log_context)

        _assert_path_notexist(output_dir)
        os.makedirs(output_dir)

        os.chdir(output_dir)

        papermill.execute_notebook(
            input_notebook,
            output_notebook,
            parameters=parameters,
            **papermill_args
        )

        job_status = jobs.JobStatus.SUCCESS
        logger.info('notebooks.executing.finished', extra=log_context)

    except PapermillExecutionError as e:
        job_status = jobs.JobStatus.FAILED
        logger.exception('notebooks.executing.error', extra=log_context)
        raise e
Example #45
0
def train_with_single(dnn,
                      dataset,
                      data_dir,
                      nworkers,
                      lr,
                      batch_size,
                      nsteps_update,
                      max_epochs,
                      num_steps=1):
    torch.cuda.set_device(0)
    trainer = DLTrainer(0,
                        nworkers,
                        dist=False,
                        batch_size=batch_size,
                        is_weak_scaling=True,
                        ngpus=1,
                        data_dir=data_dir,
                        dataset=dataset,
                        dnn=dnn,
                        lr=lr,
                        nworkers=nworkers,
                        prefix='singlegpu',
                        num_steps=num_steps)
    iters_per_epoch = trainer.get_num_of_training_samples() // (
        nworkers * batch_size * nsteps_update)

    times = []
    display = 40 if iters_per_epoch > 40 else iters_per_epoch - 1
    for epoch in range(max_epochs):
        if dnn == 'lstm':
            hidden = trainer.net.init_hidden()
        for i in range(iters_per_epoch):
            s = time.time()
            trainer.optimizer.zero_grad()
            for j in range(nsteps_update):
                if dnn == 'lstm':
                    _, hidden = trainer.train(1, hidden=hidden)
                else:
                    trainer.train(1)
            trainer.update_model()
            times.append(time.time() - s)
            if i % display == 0 and i > 0:
                time_per_iter = np.mean(times)
                throughput = batch_size * nsteps_update / time_per_iter
                logger.info(
                    'Time per iteration including communication: %f. Speed: %f images/s',
                    time_per_iter, batch_size * nsteps_update / time_per_iter)
                trainer.log_info(time_per_iter, throughput)
                times = []
Example #46
0
def ISEQUAL(env):
    """
    判断该周期是是否平盘,即十字星
    :param t_data:该周期数据,类数组
    :return:
    """
    from . import KOPEN, KCLOSE, data_gap, REF, DOJI_STAR_THRESHOLD
    threshold = REF(env, DOJI_STAR_THRESHOLD)[0]
    open_price = KOPEN(env)
    close_price = KCLOSE(env)
    data = [open_price, close_price]
    d1, d2, gd, r_gd = data_gap(data)
    logger.info("开盘价:%s, 收盘价:%s, 差值:%s, 差值比:%s", d1, d2, gd, r_gd)
    result = (threshold - abs(r_gd)) >= 0
    return result.all().tolist(), ["%.6f" % r_gd[0]], threshold
Example #47
0
 def __init__(self, project, run_id, check_blocked=False):
     self.project = project
     self.run = self.project.get_run(run_id)
     self.tests = self.project.get_tests(run_id)
     self.results = self.get_results()
     logger.info('Found TestRun "{0}" on "{1}" with {2} tests and {3} '
                 'results'.format(self.run['name'],
                                  self.run['config'] or 'default config',
                                  len(self.tests), len(self.results)))
     self.blocked_statuses = [self.project.get_status(s)['id']
                              for s in TestRailSettings.stauses['blocked']]
     self.failed_statuses = [self.project.get_status(s)['id']
                             for s in TestRailSettings.stauses['failed']]
     self.check_blocked = check_blocked
     self._bugs_statistics = {}
Example #48
0
def update_user(tg_user_id, field, amount, inc=False):
    logger.info("Updating user %d's %s by %d" % (tg_user_id, field, amount))

    if field not in ['heart', 'faith', 'heart_gifted']:
        raise Exception('Unknown field %s' % field)

    if inc:
        update = {'$inc': {field: amount}}
    else:
        update = {'$set': {field: amount}}

    disciples.find_one_and_update({'tg_user_id': tg_user_id},
                                  update,
                                  upsert=True,
                                  return_document=ReturnDocument.AFTER)
Example #49
0
def handle_message(message):
    logger.info(message.text.encode('utf-8').decode('utf-8'))
    if message.reply_to_message and message.from_user.id != message.reply_to_message.from_user.id:
        if message.text in '❤️🖤':
            change_field_with_reply(message, 'heart')
        else:
            change_field_with_reply(message, 'faith')
    elif message.text == '!topfaith':
        top_faith_command(message)
    elif message.text == '!topheart':
        top_heart_command(message)

    logger.info("%d, @%s (%d): %s" %
                (message.chat.id, message.from_user.username,
                 message.from_user.id, message.text))
Example #50
0
    def bugs_statistics(self):
        if self._bugs_statistics != {}:
            return self._bugs_statistics
        logger.info('Collecting stats for TestRun "{0}" on "{1}"...'.format(
            self.run['name'], self.run['config'] or 'default config'))

        for test in self.tests:
            logger.debug('Checking "{0}" test...'.format(
                test['title'].encode('utf8')))
            test_results = sorted(
                self.project.get_results_for_test(test['id'], self.results),
                key=lambda x: x['id'], reverse=True)

            linked_bugs = []

            for result in test_results:
                if result['status_id'] in self.blocked_statuses:
                    if self.check_blocked:
                        new_bug_link = self.handle_blocked(test, result)
                        if new_bug_link:
                            linked_bugs.append(new_bug_link)
                            break
                    if result['custom_launchpad_bug']:
                        linked_bugs.append(result['custom_launchpad_bug'])
                        break
                if result['status_id'] in self.failed_statuses \
                        and result['custom_launchpad_bug']:
                    linked_bugs.append(result['custom_launchpad_bug'])

            bug_ids = set([re.search(r'.*bug/(\d+)/?', link).group(1)
                           for link in linked_bugs
                           if re.search(r'.*bug/(\d+)/?', link)])

            for bug_id in bug_ids:
                if bug_id in self._bugs_statistics:
                    self._bugs_statistics[bug_id][test['id']] = {
                        'group': test['custom_test_group'] or 'manual',
                        'config': self.run['config'] or 'default'
                    }

                else:
                    self._bugs_statistics[bug_id] = {
                        test['id']: {
                            'group': test['custom_test_group'] or 'manual',
                            'config': self.run['config'] or 'default'
                        }
                    }
        return self._bugs_statistics
Example #51
0
 def run(self):
     target_data = self.load_target_data()
     if target_data is not None:
         self.len_target = len(target_data)
     self.len_source = len(self.load_source_data())
     self.compare_data()
     compare_data = self.update_and_delete()
     if compare_data is not None:
         self.delete_number = len(compare_data[0])
         self.update_number = len(compare_data[1])
         self.validate(compare_data[1])
     create_data = self.create_data()
     self.create_number = len(create_data)
     self.validate(create_data)
     self.save_datas()
     logger.info('{}抽取完成'.format(self.config.target_table_name))
Example #52
0
def write_file(filename, content):
    '''
    Write file to args.output_dir
    Create directory if doesnt exist
    '''
    output_dir = args['output_dir']
    out_filepath = os.path.join(output_dir, filename)
    if not os.path.exists(os.path.dirname(out_filepath)):
        try:
            os.makedirs(os.path.dirname(out_filepath))
        except OSError as exc:  # Guard against race condition
            if exc.errno != errno.EEXIST:
                raise
    logger.info("Writing new XML to file: %s" % out_filepath)
    with open(out_filepath, "w") as f:
        f.write(content)
Example #53
0
async def select_smsc(phone, pool):
    """Выбока SMS шлюза из БД"""
    phone = phone[-10:]
    phone = (settings.LOCAL_CODE + phone) if len(phone) < max(settings.PHONE_LENGTH) else phone
    ARGS = (datetime.datetime.now()-datetime.timedelta(days=1), phone[:3], int(phone[3:]), )
    logger.info(f'smsc {ARGS}')
    smsc, channel, sended = None, None, None
    with open(f'{settings.SQL_DIR}/select_smsc.sql', 'r') as sql:
        SQL = ''.join(sql.readlines())
    async with (await pool.acquire()) as db:
        async with (await db.cursor()) as c:
            await c.execute(SQL, ARGS)
            smsc = await c.fetchone()
            smsc, channel, sended = smsc[1], smsc[2] % 5060, smsc[3]
    phone = (settings.REGIO_CODE + phone)
    return smsc, channel, sended, phone
Example #54
0
 def call_pro(self):
     logger.info('开始执行存储过程')
     conn = cx_Oracle.connect('{}/{}@{}/{}'.format(MD_SERVER_USER,
                                                   MD_SERVER_PASS,
                                                   MD_SERVER_ADDRESS,
                                                   MD_SERVICE_NAME))
     cursor = conn.cursor()
     cursor.callproc('p_update_t_bsc_pooldata_dbf')
     if TODAY == time.strftime('%Y%m%d', time.localtime(time.time())):
         cursor.callproc("PKG_POOL_IMPORT.SP_POOL_IMPORT_MOD_FTP", [0])
     else:
         cursor.callproc("PKG_POOL_IMPORT.SP_POOL_IMPORT_MOD_FTP", [1])
     cursor.callproc('PKG_POOL_IMPORT.SP_POOL_IMPORT_MOD_O32')
     cursor.close()
     conn.close()
     logger.info('存储过程执行完成')
Example #55
0
def test_run(args):
    command = ('python ./logloss_beraf/__init__.py stability '
               '--features \"{0}/resources/test_features.csv\" '
               '--features_max_num 5 '
               '--min_beta_threshold 0.2 '
               '--annotation \"{0}/resources/test_annotation.csv\" '
               '--sample_name_column Sample_Name '
               '--class_column Type '
               '--bootstrap_iterations 10 --sampling 0.9'.format(
                   os.path.dirname(__file__)))

    logger.info("Initiating command \n %s" % command)

    os.system(command)

    logger.info("Test run finished successfuly")
Example #56
0
    def check_api_key(self, request):
        """Check if API Key is valid."""
        if "api_key" in request.headers:
            hash_val = hashlib.sha256(
                str.encode(request.headers.get("api_key"))).hexdigest()
            user = user_manager.all(params={"HashedAPI": hash_val})
            if not user:
                return False
            if len(user) > 1:
                logger.info(f"Hash Key collision - {hash_val}")
                return False

            matched_user = user[0]
            self.username = matched_user["Username"]
            return True
        return False
Example #57
0
 def OnRspUserLogin(self, pRspUserLogin, pRspInfo, nRequestID, bIsLast):
     logger.info('Trader OnRspUserLogin:{}'.format(pRspInfo))
     if pRspInfo.ErrorID == 0:  # Success
         logger.info('GetTradingDay:{}'.format(self.GetTradingDay()))
     '''Sending request for settlement confirm'''
     self.requestID += 1
     req = ApiStruct.SettlementInfoConfirm(BrokerID=self.brokerID,
                                           InvestorID=self.userID)
     self.ReqSettlementInfoConfirm(req, self.requestID)
     # 记录引用
     if not self.strategy:
         return
     self.strategy.session_id = pRspUserLogin.SessionID
     self.strategy.front_id = pRspUserLogin.FrontID
     for k in self.strategy.order_refs:
         self.strategy.order_refs[k] = pRspUserLogin.MaxOrderRef
Example #58
0
def shape():
    shape_components = 58

    shape_model = pca.PCAModel(model_shape_file)
    texture_model = pca.PCAModel(model_texture_file)

    logger.info('using %s shape_components', shape_components)
    image_filename = '/data/imm_face_db/01-1m.jpg'

    dataset_module = import_dataset_module('ibug')

    dst_image = reconstruction.reconstruct_shape_texture(
        dataset_module, shape_model, texture_model, image_filename,
        shape_components)

    cv2.imwrite('/data/reconstructed.png', dst_image)
Example #59
0
async def select_distributor(phone, pool):
    """Выборка дистрибьютора из БД"""
    phone = phone[-10:]
    phone = (settings.LOCAL_CODE + phone) if len(phone) < max(settings.PHONE_LENGTH) else phone
    ARGS = (phone[:3], int(phone[3:]))
    with (await distributors_semaphore):  # Лочка на случай, если в базу лезть придётся
        distributor = tuple(d[1] for d in tuple(filter(lambda x:
                                                       (x[0][0] == ARGS[0]) and
                                                       (x[0][1] <= ARGS[1]
                                                        <= x[0][2]),
                                                       distributors_cache.items())))
        if not distributor:  # Не нашли в кэше
            logger.info(f'Отсутствует {phone} в кэше.')
            with open(f'{settings.SQL_DIR}/select_distributor.sql', 'r') as sql:
                SQL = ''.join(sql.readlines())
            # async with aiopg.create_pool(dsn=settings.PG_DSN) as pool:
            async with (await pool.acquire()) as db:
                async with (await db.cursor()) as c:
                    await c.execute(SQL, ARGS)
                    try:
                        distributor, cut_code, distributor_id, a, b = await c.fetchone()
                        distributors_cache.update(
                            {(ARGS[0], a, b): (distributor, cut_code, distributor_id)})
                    except Exception as e:
                        logger.info(e)
                        distributor, cut_code, distributor_id = 'inter_city', None, 0
        else:
            logger.info(f'{distributor} {phone} найден в кэше.')
            distributor, cut_code, distributor_id = distributor[0]

    async with distributors_cache_semaphore:  # Лочка на судчай в базу лезть
        if distributor not in distributors.keys():  # На нашли в кэше
            logger.info(f'Отсутствует семафор для {distributor}')
            with open(f'{settings.SQL_DIR}/distributors.sql') as sql:
                SQL = ''.join(sql.readlines())
            async with (await pool.acquire()) as db:
                async with (await db.cursor()) as c:
                    await c.execute(SQL)
                    async for d in c:
                        if d[0] not in distributors.keys():
                            distributors[d[0]] = asyncio.BoundedSemaphore(d[1])
                        else:
                            distributors[d[0]]._value = d[1]

    phone = (settings.REGIO_CODE + phone)  # Привести номер телефога к окончательному виду
    logger.info(f'{phone} {distributor}')
    return distributor, phone
Example #60
0
def ssgd_with_horovod(dnn, dataset, data_dir, nworkers, lr, batch_size, nsteps_update, max_epochs, nwpernode, pretrain, num_steps = 1):
    rank = hvd.rank()
    torch.cuda.set_device(rank%nwpernode)
    if rank != 0:
        pretrain = None
    trainer = DLTrainer(rank, nworkers, dist=False, batch_size=batch_size, is_weak_scaling=True, ngpus=1, data_dir=data_dir, dataset=dataset, dnn=dnn, lr=lr, nworkers=nworkers, prefix='allreduce', pretrain=pretrain, num_steps=num_steps, tb_writer=writer)

    init_epoch = torch.ones(1) * trainer.get_train_epoch()
    init_iter = torch.ones(1) * trainer.get_train_iter()
    trainer.set_train_epoch(int(hvd.broadcast(init_epoch, root_rank=0)[0]))
    trainer.set_train_iter(int(hvd.broadcast(init_iter, root_rank=0)[0]))

    optimizer = hvd.DistributedOptimizer(trainer.optimizer, named_parameters=trainer.net.named_parameters())
    hvd.broadcast_parameters(trainer.net.state_dict(), root_rank=0)
    trainer.update_optimizer(optimizer)
    iters_per_epoch = trainer.get_num_of_training_samples() // (nworkers * batch_size * nsteps_update)

    times = []
    display = 20 if iters_per_epoch > 20 else iters_per_epoch-1
    for epoch in range(max_epochs):
        hidden = None
        if dnn == 'lstm':
            hidden = trainer.net.init_hidden()
        for i in range(iters_per_epoch):
            s = time.time()
            optimizer.zero_grad()
            for j in range(nsteps_update):
                if j < nsteps_update - 1 and nsteps_update > 1:
                    optimizer.local = True
                else:
                    optimizer.local = False
                if dnn == 'lstm':
                    _, hidden = trainer.train(1, hidden=hidden)
                else:
                    trainer.train(1)
            if dnn == 'lstm':
                optimizer.synchronize()
                torch.nn.utils.clip_grad_norm_(trainer.net.parameters(), 0.25)
            elif dnn == 'lstman4':
                optimizer.synchronize()
                torch.nn.utils.clip_grad_norm_(trainer.net.parameters(), 400)
            trainer.update_model()
            times.append(time.time()-s)
            if i % display == 0 and i > 0: 
                time_per_iter = np.mean(times)
                logger.info('Time per iteration including communication: %f. Speed: %f images/s', time_per_iter, batch_size * nsteps_update / time_per_iter)
                times = []