def __init__(self, group=None, target=None, name=None, args=(), kwargs=None): super(Sql, self).__init__(group=group, target=target, name=name, args=args, kwargs=kwargs) self._logger.debug( Tools.create_log_msg( self.__class__.__name__, None, LogCommon.IS_SUBCLASS.format(self.__class__.__name__, issubclass(Sql, Backend)))) self._logger.info( Tools.create_log_msg(logmsg.SQLBACKEND, None, logmsg.SQLBACKEND_STARTED)) self.DeviceTasks = None if c.conf.BACKEND.Sqlite.AutoCreateDb: task_plugins = Tools.get_task_plugins() self.DeviceTasks = Sql.create_tables(dbname='devicetasks', fields=task_plugins, refName='Tasks')
def first_attempt(self, conn, conn_addr): self._logger.info(Tools.create_log_msg(logmsg.OSSH_SERVICE, conn_addr[0], logmsg.OSSH_CONN_ATTEMPT.format( c.oss_seen_devices[conn_addr[0]]['attempt'], conn_addr))) try: transport = paramiko.Transport(conn) transport.connect(username=c.conf.YAPT.DeviceUsr, password=Tools.get_password(pwd_type=c.YAPT_PASSWORD_TYPE_DEVICE)) with SCPClient(transport=transport) as scp: scp.put(c.conf.SERVICES.Ossh.LocalConfigFile, remote_path=c.conf.SERVICES.Ossh.RemoteConfigFile) transport.close() conn.close() except (BadHostKeyException, AuthenticationException, SSHException) as e: self._logger.info(Tools.create_log_msg(logmsg.OSSH_SERVICE, conn_addr[0], logmsg.OSSH_FILE_PROV_FAILED.format(conn_addr[0], e.message))) self._logger.info(Tools.create_log_msg(logmsg.OSSH_SERVICE, conn_addr[0], logmsg.OSSH_CLOSE_CONN)) conn.close() c.oss_seen_devices_lck.acquire() try: del c.oss_seen_devices[conn_addr[0]] finally: c.oss_seen_devices_lck.release() return
def probe_device_not_alive(self, device, timeout): """ :param device: :param timeout: :return: """ alive = device.deviceConnection.probe(timeout=5) probe_attemps = self.grp_cfg.TASKS.Provision.Software.RebootProbeCounter probe_cntr = 0 while alive: if probe_cntr <= probe_attemps: alive = device.deviceConnection.probe(1) probe_cntr += 1 Tools.emit_log(task_name=self.task_name, sample_device=device, message=logmsg.SW_PROBE_DEV.format(timeout)) self.update_task_state( new_task_state=c.TASK_STATE_REBOOTING, task_state_message=logmsg.SW_PROBE_WAIT_REBOOT.format( str(probe_cntr))) time.sleep(timeout) else: self.update_task_state( new_task_state=c.TASK_STATE_FAILED, task_state_message=c.TASK_STATE_MSG_FAILED) break return alive
def update_device_task_state(self, device_serial=None, is_callback=None, task_name=None, task_state=None): key = {task_name: task_state['taskStateMsg']} try: query = self.DeviceTasks.update(**key). \ where(self.DeviceTasks.owner == device_serial) except KeyError as ke: self._logger.info( Tools.create_log_msg(logmsg.SQLBACKEND, None, 'Key <{0}> not found'.format(ke.message))) return False, 'Key <{0}> not found'.format(ke.message) try: rows = query.execute() message = AMQPMessage( message_type=c.AMQP_MSG_TYPE_DEVICE_UPDATE_TASK_STATE, payload=[device_serial, task_name, task_state], source=c.AMQP_PROCESSOR_BACKEND) self.amqpCl.send_message(message=message) database.close() return True, rows except OperationalError as oe: self._logger.info( Tools.create_log_msg(logmsg.SQLBACKEND, None, oe.message)) database.close() return False, oe.message
def get_config_template_data(self, serialnumber=None, templateName=None, groupName=None, isRaw=None): status, data = self.get_group_data(serialnumber=serialnumber, groupName=groupName) if status: grp_cfg = Tools.create_config_view(config_type=c.CONFIG_TYPE_GROUP, stream=data) t_dir = grp_cfg.TASKS.Provision.Configuration.DeviceConfTemplateDir t_file = grp_cfg.TASKS.Provision.Configuration.DeviceConfTemplateFile if isRaw: try: with open(t_dir + t_file) as tfile: t_data = tfile.read() return True, t_data except IOError as ioe: return False, ioe.message else: try: env = Environment(autoescape=False, loader=FileSystemLoader(t_dir), trim_blocks=True, lstrip_blocks=True) self.logger.info( Tools.create_log_msg(self.name, serialnumber, 'Found template <{0}>)'.format(t_file))) return True, env.get_template(t_file) except (TemplateNotFound, IOError) as err: self.logger.info(Tools.create_log_msg(self.name, serialnumber, 'Error({0}): {1} --> {2})'.format(err.errno, err.strerror, err.filename))) return False, err else: return status, data
def __action_func_inner(self, hostname, modelname, param): self.mylog.info("------模块:{mod},主机:{host}".format(host=hostname, mod=modelname)) host = self.hosts["HOST"][hostname] Tools().packHost(self.hosts["PUBLIC"], host) ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if host["connect_type"] == 1: # 用户名密码 登录 connect = Tools().connectWithPWD(ssh, host["ip"], host["username"], host["password"]) else: # rsa 密码文件登录 pkey = paramiko.RSAKey.from_private_key_file( password=host["passphrase"], filename=host["key_filename"]) connect = Tools().connectWithRSA(ssh, host["ip"], host["username"], pkey) if connect: if modelname == "CheckConnect": self.mylog.info("连接成功") else: m = importlib.import_module("model." + modelname + "." + modelname) m.ModelClass(self.mylog).action(ssh, hostname, param, host) ssh.close() else: self.mylog.cri("连接失败:" + host["ip"])
def __init__(self): self.__tools = Tools() self.__config = self.__tools.read_config() self.repos = self.__config['repos'] self.threads = self.__config['threads'] self.grid_size = self.__config['grid_size'] self.repos_per_page = int(math.pow(self.grid_size, 2))
def v2_runner_on_ok(self, res): self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS, task_state_message=res._task.get_name()) Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device, message=logmsg.PLAYBOOK_TASK_OK.format( res._task.get_name()))
def v2_runner_on_unreachable(self, res): self.update_task_state(new_task_state=c.TASK_STATE_FAILED, task_state_message=res._task.get_name()) Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device, message=logmsg.ERROR_UNREACHABLE.format( res._task.get_name()))
def __init__( self, group=None, target=None, name=None, args=(), kwargs=None, ): super(TailfSvc, self).__init__(group=group, target=target, name=name, args=args, kwargs=kwargs) self.logger = c.logger self.log_file = args[0] self.logger.info( Tools.create_log_msg('LOGVIEWER', None, 'Successfully started Logviewer service')) self.logger.info( Tools.create_log_msg( 'LOGVIEWER', None, 'Observing log file <{0}>'.format(self.log_file))) self.clp = ClientProcessor(exchange=c.conf.AMQP.Exchange, routing_key=c.AMQP_PROCESSOR_UI, queue=c.AMQP_PROCESSOR_UI)
def get_group_data_file(self, serialnumber=None, group=None, isRaw=None): group_file_path = c.conf.STORAGE.Local.DeviceGrpFilesDir filename = group + c.CONFIG_FILE_SUFFIX_GROUP if os.path.exists(group_file_path + filename) and os.path.isfile(group_file_path + filename): try: with open(group_file_path + filename, 'r') as fp: datavars = yaml.safe_load(fp) self.logger.info( Tools.create_log_msg(self.name, serialnumber, logmsg.LOCAL_DEV_CFG_FILE_OK.format(filename))) return True, datavars except IOError: self.logger.info(Tools.create_log_msg(self.name, serialnumber, logmsg.LOCAL_GRP_CFG_FILE_NOK.format(serialnumber, group_file_path + filename))) return False, None else: self.logger.info( Tools.create_log_msg(self.name, serialnumber, logmsg.LOCAL_GRP_CFG_FILE_MISS.format(group_file_path + filename))) return False, None
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None): super(FileServiceThread, self).__init__(group=group, target=target, name=name, args=args, kwargs=kwargs) self.file_path = args[0].strip() self.log_plugin = args[1] self.status = args[2] self.watched_dir = os.path.split(self.file_path)[0] self.logger = c.logger self.patterns = [self.file_path] self._stop_service = threading.Event() __event_handler = FileHandler(self.log_plugin, patterns=self.patterns) self.logger.info( Tools.create_log_msg(logmsg.FILESVC, None, logmsg.FILESVC_INIT)) self.logger.info( Tools.create_log_msg( logmsg.FILESVC, None, logmsg.FILESVC_WATCHED_DIR.format(self.watched_dir))) self.logger.info( Tools.create_log_msg( logmsg.FILESVC, None, logmsg.FILESVC_PATTERN.format(', '.join(self.patterns)))) self.observer = Observer() self.observer.schedule(__event_handler, self.watched_dir, recursive=True)
def __init__(self): self.__tools = Tools() self.__config = self.__tools.read_config() self.__travis_token = self.__config['travis_token'] self.__github_token = self.__config['github_token'] self.__travis_client = Travis(self.__travis_token) self.__github_client = Github(self.__github_token)
def prepare_vnf_boostrap_config(self, serialnumber=None, grp_cfg=None, vnf_type=None): now = datetime.datetime.now().strftime('%Y-%m-%d-%H%M') status, data = Tools.get_config(lookup_type=c.CONFIG_LOOKUP_TYPE_GET_DEVICE_CFG, serialnumber=None, deviceOsshId=serialnumber) if status: if vnf_type == c.VNF_TYPE_JUNIPER: data['device']['ossh_secret'] = data['device']['ossh_secret'] data['device']['ossh_ip'] = c.conf.SERVICES.Ossh.ServiceBindAddress data['device']['ossh_port'] = c.conf.SERVICES.Ossh.ServiceListenPort heading = "## Last changed: " + now + "\n" data['heading'] = heading status, template = Tools.get_config(lookup_type=c.CONFIG_LOOKUP_TYPE_GET_TEMPLATE, serialnumber=None, deviceOsshId=serialnumber, path=data['yapt']['bootstrap_template_dir'], file=data['yapt']['bootstrap_template_file']) if status: config = template.render(data, deviceId=serialnumber) _device_config_file = '{0}-bootstrap-{1}.conf'.format(serialnumber, now) target = open(grp_cfg.TASKS.Provision.Configuration.ConfigFileHistory + _device_config_file, 'w') target.write(config) target.close() return _device_config_file else: self.logger.info(Tools.create_log_msg(logmsg.CONF_DEV_CFG, serialnumber, logmsg.CONF_DEV_CFG_DEV_DATA_ERROR)) return None
def handler(self, chan, host, port): sock = socket.socket() try: sock.connect((host, port)) except Exception as e: self.logger.info(Tools.create_log_msg(logmsg.SSHFWD, self.sample_device.deviceSerial, logmsg.SSHFWD_REQ_FAILED.format(host, port, e))) return self.logger.info(Tools.create_log_msg(logmsg.SSHFWD, self.sample_device.deviceSerial, logmsg.SSHFWD_REQ_CONNECTED.format(chan.origin_addr, chan.getpeername(), (host, port)))) while True: r, w, x = select.select([sock, chan], [], []) if sock in r: data = sock.recv(1024) if len(data) == 0: break chan.send(data) if chan in r: data = chan.recv(1024) if len(data) == 0: break sock.send(data) chan.close() sock.close() self.logger.info(Tools.create_log_msg(logmsg.SSHFWD, self.sample_device.deviceSerial, logmsg.SSHFWD_REQ_CLOSED.format(chan.origin_addr)))
def __init__(self, smarthome): if '.'.join(VERSION.split('.', 2)[:2]) <= '1.5': self.logger = logging.getLogger(__name__) self._project = self.get_parameter_value('project') self._apikey = self.get_parameter_value('apikey') self._tools = Tools() self.lock = threading.Lock()
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None): """ :param target: :param name: OSSHServer thread name :param args: [0]=logmodule name, [1]=source_plugin :return: """ super(OSSHServiceThread, self).__init__(group=group, target=target, name=name, args=args, kwargs=kwargs) self._logger = c.logger self._logmodule = args[0] self._normalizer = args[1] self.status = args[2] self._stop_service = threading.Event() self._ssh_server_bind_address = c.conf.SERVICES.Ossh.ServiceBindAddress self._ssh_server_listen_port = c.conf.SERVICES.Ossh.ServiceListenPort self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: self._sock.bind((self._ssh_server_bind_address, self._ssh_server_listen_port)) except socket.error as se: self._logger.info(Tools.create_log_msg(logmsg.OSSH_SERVICE, self._ssh_server_bind_address, logmsg.OSSH_BIND_FAILED.format(se.strerror))) self._logger.info( Tools.create_log_msg(logmsg.OSSH_SERVICE, self._ssh_server_bind_address, logmsg.OSSH_BIND_FAILED_1)) sys.exit(1)
def create_hornet_queue(self, queue): URI = 'api/hornet-q/queues' HEADER = {'Content-Type': 'application/hornetq.jms.queue+xml'} BODY = '<queue name="{0}"><durable>false</durable></queue>'.format( queue) response = self.post(URI, HEADER, BODY) # Check status code to ensure Queue is present on space server if response is not None: if response.status_code == 201: self.logger.info( Tools.create_log_msg( logmsg.SPACE, None, logmsg.SPACEPLG_Q_CREATED.format(queue))) return True elif response.status_code == 412: self.logger.info( Tools.create_log_msg( logmsg.SPACE, None, logmsg.SPACEPLG_Q_ALREADY.format(queue))) return True else: self.logger.info( Tools.create_log_msg( logmsg.SPACE, None, logmsg.SPACEPLG_Q_FAILED.format(queue))) return False else: return False
def __init__(self, normalizer, svc_cfg): super(Ossh, self).__init__(normalizer=normalizer, svc_cfg=svc_cfg) self.logger.debug(Tools.create_log_msg(logmsg.OSSH_SERVICE, None, LogCommon.IS_SUBCLASS.format(logmsg.OSSH_SERVICE, issubclass(Ossh, Service)))) self.logger.info(Tools.create_log_msg(logmsg.OSSH_SERVICE, None, logmsg.OSSH_START)) self.ossh_svc_t = None
def v2_runner_on_failed(self, res, ignore_errors=False): self.update_task_state(new_task_state=c.TASK_STATE_FAILED, task_state_message=res._task.get_name()) Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device, message=logmsg.PLAYBOOK_ERROR.format( res._task.get_name())) return
def __init__(self, normalizer, svc_cfg): super(Webhook, self).__init__(normalizer=normalizer, svc_cfg=svc_cfg) self._stop_service = multiprocessing.Event() self.p = None self.logger.debug(Tools.create_log_msg(logmsg.WEBHOOK_SERVICE, None, LogCommon.IS_SUBCLASS.format(logmsg.WEBHOOK_SERVICE, issubclass(Webhook, Service)))) self.logger.info(Tools.create_log_msg(logmsg.WEBHOOK_SERVICE, None, logmsg.WEBHOOK_START))
def send_message(self, message, routing_key): if message is not None and isinstance(message, AMQPMessage): Tools.amqp_send_to_logger(routing_key=routing_key, body_decoded=message) self.send_message_amqp(jsonpickle.encode(message), routing_key=routing_key) else: Tools.amqp_send_error_to_logger(routing_key=routing_key, body_decoded=message)
def run(self): try: self._sock.listen(5) self._logger.info(Tools.create_log_msg(logmsg.OSSH_SERVICE, None, logmsg.OSSH_LISTEN.format(self._ssh_server_bind_address, self._ssh_server_listen_port))) self.status = c.SVC_STARTED except Exception as e: self._logger.info( Tools.create_log_msg(logmsg.OSSH_SERVICE, None, logmsg.OSSH_LISTEN_FAILED.format(e))) sys.exit(1) while not self._stop_service.is_set(): sock, sock_addr = self._sock.accept() sock.settimeout(60) self._logger.info( Tools.create_log_msg(logmsg.OSSH_SERVICE, sock_addr[0], logmsg.OSSH_NEW_CONN.format(sock_addr))) isTrusted, deviceId = self.check_for_dmi(sock, sock_addr) if isTrusted: if sock_addr[0] not in c.oss_seen_devices: c.oss_seen_devices_lck.acquire() try: c.oss_seen_devices[sock_addr[0]] = {'attempt': 1, 'rebooted': False, 'socket': sock} finally: c.oss_seen_devices_lck.release() thr = threading.Thread(target=self.first_attempt, args=(sock, sock_addr,)) thr.start() elif sock_addr[0] in c.oss_seen_devices and c.oss_seen_devices[sock_addr[0]]['attempt'] == 1 and not \ c.oss_seen_devices[sock_addr[0]]['rebooted']: c.oss_seen_devices_lck.acquire() try: c.oss_seen_devices[sock_addr[0]]['attempt'] = c.oss_seen_devices[sock_addr[0]][ 'attempt'] + 1 c.oss_seen_devices[sock_addr[0]]['socket'] = sock finally: c.oss_seen_devices_lck.release() thr = threading.Thread(target=self.second_attempt, args=(sock, sock_addr, deviceId,)) thr.start() else: self._logger.info(Tools.create_log_msg(logmsg.OSSH_SERVICE, sock_addr[0], logmsg.OSSH_CONN_LIMIT.format(sock_addr[0]))) sock.close() else: self._logger.info(Tools.create_log_msg(logmsg.OSSH_SERVICE, sock_addr[0], logmsg.OSSH_ERROR_VERIFY)) sock.close()
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None): super(ServiceProcessor, self).__init__(group=group, target=target, name=name, args=args, kwargs=kwargs) self._logger.debug(Tools.create_log_msg(self.__class__.__name__, None, LogCommon.IS_SUBCLASS.format(self.__class__.__name__, issubclass(ServiceProcessor, AMQPBlockingServerAdapter)))) self._logger.info(Tools.create_log_msg(self.__class__.__name__, None, 'Starting {0}'.format(name))) self.registry = ServicePluginFactory(c.conf.SERVICES.Plugins).registry
def init(self, **params): self.deviceIP = cherrypy.request.headers['Remote-Addr'] self.sn_nr = params['uid'] status, data = Tools.get_config( lookup_type=c.CONFIG_LOOKUP_TYPE_GET_DEVICE_CFG, serialnumber=self.sn_nr, deviceOsshId=None) if status: try: self.device_type = data['yapt']['device_type'] self.service_chain = data['yapt']['service_chain'] except KeyError as ke: self.logger.info('{0} {1}'.format( logmsg.PHS_SERVICE + ':', logmsg.PHS_CONF_KEY_ERROR.format(ke.message))) return _boostrap_init_file = c.conf.SERVICES.Phs.InitConfPath + self.device_type + '.xml' if os.path.isfile(_boostrap_init_file): response = None try: env = Environment(autoescape=select_autoescape(['xml']), loader=FileSystemLoader( c.conf.SERVICES.Phs.InitConfPath), trim_blocks=True, lstrip_blocks=True) response = env.get_template(self.device_type + '.xml').render( serial=self.sn_nr) except (TemplateNotFound, IOError) as err: self.logger.info( Tools.create_log_msg( logmsg.PHS_SERVICE, self.sn_nr, logmsg.PHS_TEMPLATE_ERROR.format( err.errno, err.strerror, err.filename))) return response else: self.logger.info( Tools.create_log_msg( logmsg.PHS_SERVICE, self.sn_nr, logmsg.PHS_BOOSTRAP_FILE_FAILED.format( _boostrap_init_file))) else: self.logger.info( Tools.create_log_msg(logmsg.PHS_SERVICE, self.sn_nr, logmsg.PHS_DEV_CONF_FAILED))
def dep_init_configuration(self): self.logger.info( Tools.create_log_msg( self.task_name, self.sample_device.deviceSerial, 'Processing configuration task deps for configuration task')) self.logger.info( Tools.create_log_msg(self.task_name, self.sample_device.deviceSerial, logmsg.INIT_EXTEND_CFG)) self.sample_device.deviceConnection.bind(cu=Config)
def notification(self, **params): cherrypy.response.status = 201 body = cherrypy.request.body.read() xml_body = etree.fromstring(body) ns_orig = 'http://juniper.net/zerotouch-bootstrap-server' for item in xml_body.iter('{' + ns_orig + '}notification-type'): if item.text == c.PHS_NOTIFICATION_CONF_APPLIED: self.logger.info( Tools.create_log_msg(logmsg.PHS_SERVICE, self.sn_nr, logmsg.PHS_STAGE1_SUCCESS).format( params['uid'])) elif item.text == c.PHS_NOTIFICATION_CONF_FAILED: self.logger.info( Tools.create_log_msg( logmsg.PHS_SERVICE, self.sn_nr, logmsg.PHS_STAGE1_FAILED.format(params['uid']))) elif item.text == c.PHS_NOTIFICATION_BOOTSTRAP_COMPLETED: self.logger.info( Tools.create_log_msg( logmsg.PHS_SERVICE, self.sn_nr, logmsg.PHS_BOOTSTRAP_SUCCESS.format(params['uid']))) if c.SERVICEPLUGIN_OSSH in self.service_chain: self.logger.info( Tools.create_log_msg( logmsg.PHS_SERVICE, self.sn_nr, logmsg.PHS_SEC_SVC.format(c.SERVICEPLUGIN_OSSH, c.SERVICEPLUGIN_OSSH))) return else: sample_device = self._source_plugin.run_normalizer( datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), device=self.deviceIP) # NFX JDM facts return empty serial number so we have to add it here if self.device_type == 'nfx': sample_device.deviceSerial = self.sn_nr message = AMQPMessage( message_type=c.AMQP_MSG_TYPE_DEVICE_ADD, payload=sample_device, source=c.AMQP_PROCESSOR_SVC) self._source_plugin.send_message(message=message) elif item.text == c.PHS_NOTIFICATION_BOOTSTRAP_FAILED: self.logger.info( Tools.create_log_msg( logmsg.PHS_SERVICE, self.sn_nr, logmsg.PHS_BOOSTRAP_FAILED.format(params['uid'])))
def prepare_device_config(self, sample_device=None): version = sample_device.softwareVersion now = datetime.datetime.now().strftime('%Y-%m-%d-%H%M') grp_cfg = Tools.create_config_view( config_type=c.CONFIG_TYPE_GROUP, stream=sample_device.deviceGroupData) if c.SERVICEPLUGIN_OSSH in sample_device.deviceServicePlugin: sample_device.deviceConfigData['device'][ 'ossh_secret'] = sample_device.deviceConfigData['device'][ 'ossh_secret'] sample_device.deviceConfigData['device'][ 'ossh_ip'] = c.conf.SERVICES.Ossh.ServiceBindAddress sample_device.deviceConfigData['device'][ 'ossh_port'] = c.conf.SERVICES.Ossh.ServiceListenPort if sample_device.deviceConfigData: heading = "## Last changed: " + now + "\n" heading += "version " + version + ";" sample_device.deviceConfigData['heading'] = heading status, data = Tools.get_config( lookup_type=c.CONFIG_LOOKUP_TYPE_GET_TEMPLATE, sample_device=sample_device) if status: config = data.render(sample_device.deviceConfigData) sample_device.deviceConfiguration = config _device_config_file = '{0}-{1}.conf'.format( sample_device.deviceSerial, now) target = open( grp_cfg.TASKS.Provision.Configuration.ConfigFileHistory + _device_config_file, 'w') target.write(sample_device.deviceConfiguration) target.close() return { 'status': True, 'sample_device': sample_device, 'configfilename': _device_config_file } else: return { 'status': False, 'sample_device': sample_device, 'configfilename': data } else: self.logger.info( Tools.create_log_msg(logmsg.CONF_DEV_CFG, sample_device.deviceSerial, logmsg.CONF_DEV_CFG_DEV_DATA_ERROR)) return None
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None): super(TaskProcessor, self).__init__(group=group, target=target, name=name, args=args, kwargs=kwargs) self._logger.debug(Tools.create_log_msg(self.__class__.__name__, None, LogCommon.IS_SUBCLASS.format(self.__class__.__name__, issubclass(TaskProcessor, AMQPBlockingServerAdapter)))) self._logger.info(Tools.create_log_msg(self.__class__.__name__, None, 'Starting {0}'.format(name))) from lib.tasks.tasktools import Configuration self._configurator = Configuration() self._backendp = BackendClientProcessor(exchange='', routing_key=c.AMQP_RPC_BACKEND_QUEUE) self._svcp = ServiceClientProcessor(exchange='', routing_key=c.AMQP_RPC_SERVICE_QUEUE)
def run_task(self): self.logger.info( Tools.create_log_msg( self.task_name, self.sample_device.deviceSerial, logmsg.POLICY_INIT.format(self.sample_device.deviceName))) try: with open( c.conf.JUNOSSPACE.TemplateDir + self.grp_cfg.TASKS.Provision.Policy.PolicyTemplate, 'r') as f: template = Template(f) BODY = template.render( name=self.sample_device.deviceName, type=self.grp_cfg.TASKS.Provision.Policy.LookupType) response = c.SRC.add_fw_policy(BODY) except IOError as ioe: self.logger.info( Tools.create_log_msg( self.task_name, self.sample_device.deviceSerial, logmsg.POLICY_TEMP_FILE_NOK.format( self.grp_cfg.TASKS.Provision.Policy.PolicyTemplate, ioe.message))) self.sample_device.deviceTasks.taskState[ self.task_name] = c.TASK_STATE_FAILED self.shared[c.TASK_SHARED_STATE] = c.TASK_STATE_RESULT_FAILURE return if response.status_code == 500: self.logger.info( Tools.create_log_msg( self.task_name, self.sample_device.deviceSerial, logmsg.POLICY_CREATE_NOK.format( self.sample_device.deviceName, response.status_code, response.text))) self.sample_device.deviceTasks.taskState[ self.task_name] = response.text self.shared[c.TASK_SHARED_STATE] = c.TASK_STATE_RESULT_FAILURE elif response.status_code == 200: self.sample_device.deviceTasks.taskState[ self.task_name] = c.TASK_STATE_DONE self.shared[c.TASK_SHARED_STATE] = c.TASK_STATE_RESULT_DONE time.sleep(c.conf.JUNOSSPACE.RestTimeout) else: self.logger.info( Tools.create_log_msg( self.task_name, self.sample_device.deviceSerial, logmsg.POLICY_UNKNOWN_CODE.format(response.status_code)))
class PaidLeave: def __init__(self, target): self.target = target self.tools = Tools() if SEND_EMAIL: self.email = Email() def get_overtime(self): """ 当月新增 """ data = {} # {user: time} month_str = self.tools.get_month_str(self.target) filename = "data/%s/overtime.xlsx" % month_str _data = self.tools.get_excel_data(filename, ["姓名", "起始时间", "结束时间"], 1) i = 0 for name in _data.get("姓名"): if not name: continue start_time_val = _data.get("起始时间")[i] end_time_val = _data.get("结束时间")[i] if type(start_time_val) == datetime: start_time = start_time_val else: start_time = datetime.strptime(start_time_val, "%Y-%m-%d %H:%M") if type(end_time_val) == datetime: end_time = end_time_val else: end_time = datetime.strptime(end_time_val, "%Y-%m-%d %H:%M") del_time = round((end_time - start_time).seconds / 3600, 1) # someone may has multiple overtime record, accumulate those data _time = data.get(name, 0) data[name] = _time + del_time i += 1 return data def get_last_remaining(self): """ 上月剩余 """ data = {} # {user: time} month_str = self.tools.get_month_str(self.tools.get_last_month_dt(self.target)) filename = "data/%s/all.xlsx" % month_str _data = self.tools.get_excel_data(filename, ["姓名", "剩余可用"]) i = 0 for name in _data.get("姓名"): _time = data.get(name, 0) data[name] = _time + _data.get("剩余可用")[i] i += 1 return data def get_used_overtime(self): """ 当月已用 """ data = {} # {user: time} month_str = self.tools.get_month_str(self.target) filename = "data/%s/leave.xlsx" % month_str excel_data = self.tools.get_excel_data(filename, ["发起人姓名", "请假天数", "请假类型", "审批结果"]) i = 0 for name in excel_data.get("发起人姓名"): if "倒休" in excel_data.get("请假类型")[i] and "同意" in excel_data.get("审批结果")[i]: time = data.get(name, 0) data[name] = time + 8 * float(excel_data.get("请假天数")[i]) i += 1 return data def get_paid(self): """ 当月支付 """ data = {} # {user: time} month_str = self.tools.get_month_str(self.target) filename = "data/%s/overtime.xlsx" % month_str excel_data = self.tools.get_excel_data(filename, ["姓名", "起始时间", "结束时间", "是否支付"], 1) i = 0 if not len(excel_data): return {} for name in excel_data.get("姓名"): if not name: continue if excel_data.get("是否支付")[i] == 1: start_time_val = excel_data.get("起始时间")[i] end_time_val = excel_data.get("结束时间")[i] if type(start_time_val) == datetime: start_time = start_time_val else: start_time = datetime.strptime(start_time_val, "%Y-%m-%d %H:%M") if type(end_time_val) == datetime: end_time = end_time_val else: end_time = datetime.strptime(end_time_val, "%Y-%m-%d %H:%M") del_time = round((end_time - start_time).seconds / 3600, 1) # someone may has multiple overtime record, accumulate those data _time = data.get(name, 0) data[name] = _time + del_time i += 1 return data def get_all_users_data(self): users = {} with open("data/user.json") as fp: users = json.load(fp) return users def work(self): user_data = self.get_all_users_data() last_remaining = self.get_last_remaining() paid = self.get_paid() overtime = self.get_overtime() used_overtime = self.get_used_overtime() for user_guid, user in user_data.items(): name = user.get("name") user["paid"] = paid.get(name, 0) user["overtime"] = overtime.get(name, 0) user["used_overtime"] = used_overtime.get(name, 0) user["last_remaining"] = last_remaining.get(name, 0) user["remaining"] = user["last_remaining"] + user["overtime"] - user["used_overtime"] - user["paid"] self.generate_excel(user_data) def generate_excel(self, user_data): first_date = self.target.replace(day=1) last_end_date = first_date - timedelta(days=1) last_end_date_str = "%s月%s日" % (last_end_date.month, last_end_date.day) excel_data = [["姓名", "截止%s剩余" % last_end_date_str, "%s月份新增" % self.target.month, "%s月份已用" % self.target.month, "%s月份支付" % self.target.month, "剩余可用"], ] index = 0 user_data_list = user_data.values() user_data_list = sorted(user_data_list, key=name_sortor) for user in user_data_list: if user.get("paid") == 0 and user.get("overtime") == 0 \ and user.get("used_overtime") == 0 and user.get("last_remaining") == 0 \ and user.get("remaining") == 0: continue last_month_date = self.tools.get_last_month_dt(self.target) old_end_day = last_end_date.day after_month = self.target + relativedelta(months=1) end_date = after_month - timedelta(days=1) params = { "name": user.get("name"), "last_remaining": user.get("last_remaining"), "overtime": user.get("overtime"), "used_overtime": user.get("used_overtime"), "paid": user.get("paid"), "remaining": user.get("remaining"), "year": self.target.year, "month": self.target.month, "end_day": end_date.day, "old_month": last_month_date.month, "old_end_day": old_end_day } content = TEMPLATE.format(**params) print("==========================================================") print(content) if SEND_EMAIL: print("----------------------------------------------------------") receiver = user.get("email").replace("linuxdeepin.com", "deepin.com") print("sending email to: ", receiver) try: if user.get("name") in USER_FILTER: print("user is in the filter list, skip sending...") else: subject = "%s年%s月调休统计" % (self.target.year, self.target.month) self.email.send(receiver, subject, content) except SMTPRecipientsRefused as e: print("failed to sending email") print(e) except Exception as e: print("failed to sending email") print(e) print("finish.") print("==========================================================") col = [user.get("name"), user.get("last_remaining"), user.get("overtime"), user.get("used_overtime"), user.get("paid"), user.get("remaining")] excel_data.append(col) index += 1 self.tools.write_to_execl("data/%s/all.xlsx" % self.tools.get_month_str(self.target), excel_data)
def __init__(self, target): self.target = target self.tools = Tools() if SEND_EMAIL: self.email = Email()