Beispiel #1
0
 def push_to_prd(self):
     if self.trans_files == []:
         logger.warn('no data to push')
         return False
     ip = envi['prd_sftp_ip']
     port = envi['prd_sftp_port']
     usr = envi['prd_sftp_user']
     pwd = envi['prd_sftp_pass']
     src = envi['data_src_name']
     rdir = envi['prd_data_dir']
     try:
         target = paramiko.Transport((ip, port))
         target.connect(username=usr, password=pwd)
         sftp = paramiko.SFTPClient.from_transport(target)
         # transfar via sftp put
         for f in self.trans_files:
             sftp.put(self.ldir + f, rdir + f)
         logger.info('pushing to prd finished')
     except Exception as e:
         logger.error(str(e))
         send_mail(src + u'自动同步失败', str(e))
         return False
     target.close()
     send_mail(src + u'自动同步成功', u'请使用md5文件校验数据文件*.tar.gz')
     return True
Beispiel #2
0
def send_mail(sub,content,receiver):
    if receiver == ['']:
       logger.warn('mail[' + sub + '] fail, empty receive list!')
       return False

    if not send_mail_int(sub,content,receiver):
       send_mail_int_nologin(sub,content,receiver)
def get_html_content(http_url):
    """
    获取网页对象
    :param http_url:地址
    :return: BeautifulSoup对象,日期字符串 as 2017/07/01
    """
    try:
        # 获取上月的日期字符串 as 2017/07/01  2017/07/31
        last_month_start, last_month_end = get_last_month()
        post_data = [
            ("curr_id", "8849"),
            ("smlID", "300060"),
            ("st_date", last_month_start),
            ("end_date", last_month_end),
            ("interval_sec", "Daily"),
            ("sort_col", "date"),
            ("sort_ord", "DESC"),
            ("action", "historical_data")
        ]
        headers = {"X-Requested-With": "XMLHttpRequest"}

        status, html = util_urllib.post_content_utf8(http_url, post_data, headers)

        # print(status, html)
        if 200 == status:
            return BeautifulSoup(html, "html.parser"), last_month_start
        else:
            logger.warn("status:" + status)
    except Exception as e:
        logger.error(e)
        return None, ""
    logger.error("status:" + status + " url:" + http_url)
    return None, ""
Beispiel #4
0
    def pull_from_cmm(self):
        ip = envi['cmm_sftp_ip']
        port = envi['cmm_sftp_port']
        usr = envi['cmm_sftp_user']
        pwd = envi['cmm_sftp_pass']
        src = envi['data_src_name']
        p = envi['cmm_data_pattern']
        try:
            target = paramiko.Transport((ip, port))
            target.connect(username=usr, password=pwd)
            sftp = paramiko.SFTPClient.from_transport(target)
            logger.debug("connect to cmm's sftp server")
            # to find files for transfaring
            for rdir in envi['cmm_data_dir']:
                for f in sftp.listdir(rdir):
                    if f not in self.loc_files and re.match(p, f) != None:
                        self.trans_files.append(f)
                        # transfar via sftp get
                        sftp.get(rdir + f, self.ldir + f)
                        logger.debug('file: <' + str(f) + '> transfared')

            if self.trans_files == []:
                logger.warn('no data to pull')
                send_mail(src + u'未更新', u'请咨询数据提供技术人员')
            else:
                logger.info('pulling finished: ' + str(self.trans_files))
        except Exception as e:
            logger.error(str(e))
            send_mail(src + u'自动同步失败', str(e))
            return False
        target.close()
        return True
Beispiel #5
0
def send_mail_ext(sub,content,receiver):
    if receiver == ['']:
       logger.warn('mail[' + sub + '] fail, empty receive list!')
       return False

    if(envi['smtp_server2'] == ''):
       return send_mail(sub,content,receiver)

    sender = "netmon<%s>" % envi['smtp_usr2']
    msg = MIMEText(content,'plain','gbk')
    msg['Subject'] = Header(sub, 'gbk')
    msg['From'] = sender
   
    try:
        smtp = smtplib.SMTP()
        smtp.connect(envi['smtp_server2'])
        smtp.login(envi['smtp_usr2'], envi['smtp_pwd2'])
        smtp.sendmail(sender, receiver, msg.as_string())  
        smtp.quit()
    except Exception, e:
        logger.error(str(e))
        return False
Beispiel #6
0
 def login(self):
     if self.name == '':
         logger.warn(self.ip + ' No device with this IP!')
         return None
     if self.login_mode == 22010:
         return self.cisco_ssh_login1()
     elif self.login_mode == 22012:
         return self.cisco_ssh_login2()
     elif self.login_mode == 22020:
         return self.h3c_ssh_login1()
     elif self.login_mode == 22022:
         return self.h3c_ssh_login2()
     elif self.login_mode == 22030:
         return self.huawei_ssh_login1()
     elif self.login_mode == 22032:
         return self.huawei_ssh_login2()
     elif self.login_mode == 23010 or self.login_mode == 23040:
         return self.cisco_tel_login1()
     elif self.login_mode == 23011 or self.login_mode == 23041:
         return self.cisco_tel_login2()
     elif self.login_mode == 23012 or self.login_mode == 23042:
         return self.cisco_tel_login3()
     elif self.login_mode == 23020 or self.login_mode == 23030:
         return self.h3c_tel_login1()
     elif self.login_mode == 23021 or self.login_mode == 23031:
         return self.h3c_tel_login2()
     elif self.login_mode == 23022 or self.login_mode == 23032:
         return self.h3c_tel_login3()
     elif self.login_mode == 22052:
         return self.junos_ssh_login1()
     elif self.login_mode == 22062:
         return self.dell_ssh_login1()
     elif self.login_mode == 23062:
         return self.dell_tel_login1()
     elif self.login_mode == 22072:
         return self.linux_ssh_login1()
     else:
         logger.error(self.ip + ' Error login_mode!')
         return None
Beispiel #7
0
def send_mail(sub, content):
    receiver = envi['mail_receiver']
    if receiver == ['']:
        logger.warn('mail[' + sub + '] fail, empty receive list!')
        return False

    sender = "netmon<%s>" % envi['smtp_usr']
    msg = MIMEText(content, 'plain', 'gbk')
    msg['Subject'] = Header(sub, 'gbk')
    msg['From'] = sender

    try:
        smtp = smtplib.SMTP()
        smtp.connect(envi['smtp_server'])
        smtp.login(envi['smtp_usr'], envi['smtp_pwd'])
        smtp.sendmail(sender, receiver, msg.as_string())
        smtp.quit()
    except Exception as e:
        logger.error(str(e))
        return False
    else:
        logger.info('success sending mail [' + sub + '] to ' + str(receiver))
        return True
Beispiel #8
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import threading, class_netsav
from db_fun import xgetall
from my_log import logger
from netmon_env import envi

#//支持最大线程数
thread_count = envi['threads']

#//获取router表的所有有效条目数
sql = "select app from router where app>1"
all = len(xgetall(sql))
if all == 0:
    logger.warn('No device for config-saving!')
    exit()


#//新建调用函数
def c_save(ip):
    r = class_netsav.NetSav(ip)
    r.save()


start = 0
while start < all:
    sql = "select rip from router where app>1 limit %d, %d" % (start,
                                                               thread_count)
    ret = xgetall(sql)
    threads = []
Beispiel #9
0
# -*- coding: utf-8 -*-

import threading, class_netmon
from db_fun import xgetall
from my_log import logger
from netmon_env import envi

#//支持最大线程数
thread_count = envi['threads']

#//获取router表的所有条目数
sql = "select rip from router where app=3"
ret = xgetall(sql)
all = len(ret)
if all == 0:
    logger.warn('No router for monitoring!')
    exit()


###//新建调用函数
def rt_mon(ip):
    r = class_netmon.NetMon(ip)
    r.mon()


###//新建多线程进行监控
start = 0
end = thread_count
while start < all:
    #print '----------'
    threads = []
Beispiel #10
0
    def mon(self):
        if self.target == ():
            logger.warn(self.ip + ' No target to monitor!')
            return

        if device_netmon.get(self.corp) == None:
            logger.error(self.ip + ' Error : unsupported device to monitor!')
            return

        obj = self.login()
        if obj == None:
            logger.error(self.ip +
                         ' Error : monitor failed due to login error!')
            return

        success = device_netmon.get(self.corp).get('succ')
        fail = device_netmon.get(self.corp).get('fail')
        pingCmd = device_netmon.get(self.corp).get('ping')

        list_tdes = []
        list_rtts = []
        try:
            for line in self.target:
                cmd = pingCmd % line[0]
                logger.debug(self.ip + ' ' + cmd)
                obj.sendline(cmd)
                rtt = 0
                i = obj.expect([success, fail, pexpect.TIMEOUT], timeout=10)
                if i == 2:
                    logger.error(self.ip + " Command runs abnormal!")
                    obj.close()
                    return
                if i == 1:
                    if line[2] > 0:
                        msg = self.name + line[1] + u':线路中断'
                        my_alert(msg, line[1])
                if i == 0:
                    rtt = int(obj.after.split(' ')[2]) + 1
                    if line[2] == 0:
                        msg = self.name + line[1] + u':线路恢复' + str(rtt) + 'ms'
                        my_alert(msg, line[1])
                    elif line[2] > RTT_MAX:
                        if rtt < RTT_MAX:
                            msg = self.name + line[1] + u': 线路延时恢复' + str(
                                rtt) + 'ms'
                            my_alert(msg, line[1])
                    else:
                        if rtt > RTT_MAX:
                            msg = self.name + line[1] + u': 线路延时过高' + str(
                                rtt) + 'ms'
                            my_alert(msg, line[1])

                list_tdes.append(line[1])
                list_rtts.append(rtt)
            obj.close()
            #批量更新线路延迟数据rtt,减少数据库访问
            mupdate(list_tdes, list_rtts)
        except Exception as e:
            logger.error(self.ip + ' ' + str(e))
            obj.close()
            return
        else:
            logger.info(self.ip + " monitoring finished!")
            return
    def train(self):

        self.train_data = processors[self.task_name](self.tokenizer, "train", self.config)
        self.dev_data = processors[self.task_name](self.tokenizer, "dev", self.config)

        logger.info("---"*20)
        self.batch_size = self.train_data.batch_size
        self.nums_tags = len(self.train_data.get_tags())
        self.tag_to_id = self.train_data.tag_to_id
        self.id_to_tag = self.train_data.id_to_tag
        self.train_length = len(self.train_data.data)

        self.dev_batch = self.dev_data.iteration()
        self.dev_length = len(self.dev_data.data)

        logger.info("-"*50)
        logger.info("train data:\t %s", self.train_length)
        logger.info("dev data:\t %s", self.dev_length)
        logger.info("nums of tags:\t %s", self.nums_tags)
        logger.info("tag_to_id: {}".format(self.tag_to_id))
        logger.info("id_to_tag: {}".format(self.id_to_tag))

        self.creat_model()
        with tf.Session() as sess:
            with tf.device("/cpu:0"):
                # 是否加载训练模型
                if not os.path.exists(self.checkpoint_dir):
                    os.makedirs(self.checkpoint_dir)
                ckpt = tf.train.get_checkpoint_state(self.checkpoint_dir)
                if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
                    logger.info("restore model")    # 加载预训练模型
                    self.saver.restore(sess, ckpt.model_checkpoint_path)
                else:
                    sess.run(tf.global_variables_initializer())

                t_vars = tf.trainable_variables()
                (assignment_map, initialized_variable_names) = \
                    modeling.get_assignment_map_from_checkpoint(t_vars,
                                                             self.init_checkpoint)
                tf.train.init_from_checkpoint(self.init_checkpoint, assignment_map)

                self.model_input = {
                    "input_ids": self.input_ids,
                    "segment_ids": self.segment_ids,
                    "input_mask": self.input_mask,
                    "dropout": self.dropout
                }

                self.model_output = {
                    "logits": self.logits,
                    "length": self.length,
                    "pre_paths": self.paths
                }

                for i in range(self.max_epoch):
                    logger.info("-"*50)
                    logger.info("epoch {}".format(i))
                    self.steps = 0

                    for batch in self.train_data.get_batch():
                        self.steps += 1
                        global_steps, loss, logits, acc, length = self.train_step(sess, batch)

                        if self.steps % 1 == 0:
                            logger.info("[->] epoch {}: step {}/{}\tloss {:.4f}\tacc {:.5f}".format(
                                i,self.steps, len(self.train_data.batch_data), loss, acc))

                        if self.steps % 20 == 0:
                            self.evaluate(sess)

                        if self.steps - self.last_improved > self.require_improvement:
                            logger.warn("No optimization for a long time, auto-stopping...")
                            break
                logger.info("training finished!!!")
    task_name = ARGS.task
    if task_name not in processors:
        print("Task not found: %s" % (task_name))
        exit()

    if ARGS.entry == "train":

        para = {
            "lstm_dim": 128,
            "max_epoch": 40,
            "train_batch": 16,
            "dev_batch": 256,
            "require_improvement": 1000
        }

        logger.warn("--------" * 10)
        logger.warn("\npara : \n {para}".format(
            para=json.dumps(para, indent=4, ensure_ascii=False)))
        base_config = {"task_name": task_name,
                        "mode": "bert" ,
                        "lstm_dim": 128,
                        "embedding_size": 50,
                        "max_epoch": 10,
                        "train_batch": 16,
                        "dev_batch": 128,
                        "learning_rate": 5e-5,
                        "require_improvement": 500,
                        "bert_config": "bert_model/bert_config.json",
                        "init_checkpoint": "bert_model/bert_model.ckpt",
                        "vocab_dir": "bert_model/vocab.txt",
                        "checkpoint_dir": "./result/{task_name}/ckpt_model/{model_version}".format(task_name=task_name,model_version = time.strftime('%Y%m%d')),# %Y%m%d%H%M%S