def __init__(self, *args): QDialog.__init__(self, *args) self.setupUi(self) self.setWindowIcon(QIcon(":/icons/qtsixa.png")) self.b_edit.setEnabled(False) self.b_remove.setEnabled(False) self.listDev.setColumnWidth(0, 150) self.connect(self.listDev, SIGNAL("itemSelectionChanged()"), self.func_changedListDev) self.connect(self.listProf, SIGNAL("currentRowChanged(int)"), self.func_changedListProf) self.connect(self.tabWidget, SIGNAL("currentChanged(int)"), self.func_changedTab) self.connect(self.b_add, SIGNAL("clicked()"), self.func_Add) self.connect(self.b_remove, SIGNAL("clicked()"), self.func_Remove) self.connect(self.b_edit, SIGNAL("clicked()"), self.func_Edit) if not os.path.exists(os.getenv("HOME") + "/.qtsixa2/profiles/"): os.mkdir(os.getenv("HOME") + "/.qtsixa2/profiles/") if not os.path.exists(os.getenv("HOME") + "/.qtsixa2/pics/"): os.mkdir(os.getenv("HOME") + "/.qtsixa2/pics/") if not os.path.exists(os.getenv("HOME") + "/.qtsixa2/.setup_profiles"): os.system("cp /usr/share/qtsixa/profiles/* " + os.getenv("HOME") + "/.qtsixa2/profiles/") if os.path.exists(os.getenv("HOME") + "/.qtsixa2/profiles/KDE"): os.mknod(os.getenv("HOME") + "/.qtsixa2/.setup_profiles") if not os.path.exists(os.getenv("HOME") + "/.qtsixa2/.setup_pics"): os.system("cp /usr/share/qtsixa/pics/* " + os.getenv("HOME") + "/.qtsixa2/pics/") if os.path.exists(os.getenv("HOME") + "/.qtsixa2/pics/KDE.png"): os.mknod(os.getenv("HOME") + "/.qtsixa2/.setup_pics") self.func_refreshList()
def check_dirs(): if not os.path.isdir(templates_dir): os.mkdir(templates_dir) if not os.path.isdir(test_dir): os.mkdir(test_dir) if not os.path.isfile(state_file_path): os.mknod(state_file_path)
def _generate_zip(self, path, package, version, buf): filename = get_filename(package, version) filepath = os.path.join(path, filename) os.mknod(filepath) with open(filepath, 'w') as f: f.write(buf) return filename
def data2val_txt(val_txt_path,filename,class_id): if not os.path.exists(val_txt_path): os.mknod(val_txt_path) val_txt = open(val_txt_path,'w') val_txt.write(filename + ' ' +str(class_id)) val_txt.write('\n') val_txt.close()
def pick_training_set(dataset_name, roidb, rel_db): global trigger_int pick_dir = "./line_pick/" PR_file = pick_dir + dataset_name if not os.path.isdir(pick_dir): os.mkdir(pick_dir) if not os.path.isfile(PR_file): os.mknod(PR_file) PR_obj = open(PR_file, "a+") PR_obj.write("[") PR_obj.flush() for i in range(len(roidb)): image_path = roidb[i]["image"] boxes = roidb[i]["boxes"] vis_image(image_path, boxes) #print rel_db[i]["image"] #print roidb[i]["image"] #print trigger_int #print "reldb: ", str(rel_db[i]) if trigger_int == 1: print "pick", rel_db[i]["image"] PR_obj.write(str(rel_db[i]) + ",") PR_obj.flush() else: print "discard" PR_obj.write("]") PR_obj.flush() PR_obj.close()
def create_test_files(self): """Create a minimal test case including all supported file types """ # File self.create_regular_file('empty', size=0) # 2600-01-01 > 2**64 ns os.utime('input/empty', (19880895600, 19880895600)) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('flagfile', size=1024) # Directory self.create_regular_file('dir2/file2', size=1024 * 80) # File owner os.chown('input/file1', 100, 200) # File mode os.chmod('input/file1', 0o7755) os.chmod('input/dir2', 0o555) # Block device os.mknod('input/bdev', 0o600 | stat.S_IFBLK, os.makedev(10, 20)) # Char device os.mknod('input/cdev', 0o600 | stat.S_IFCHR, os.makedev(30, 40)) # Hard link os.link(os.path.join(self.input_path, 'file1'), os.path.join(self.input_path, 'hardlink')) # Symlink os.symlink('somewhere', os.path.join(self.input_path, 'link1')) if xattr.is_enabled(): xattr.setxattr(os.path.join(self.input_path, 'file1'), 'user.foo', b'bar') xattr.setxattr(os.path.join(self.input_path, 'link1'), 'user.foo_symlink', b'bar_symlink', follow_symlinks=False) # FIFO node os.mkfifo(os.path.join(self.input_path, 'fifo1')) if has_lchflags: os.lchflags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP)
def extract_item(self, item, restore_attrs=True, dry_run=False): if dry_run: if b'chunks' in item: for _ in self.pipeline.fetch_many([c[0] for c in item[b'chunks']], is_preloaded=True): pass return dest = self.cwd if item[b'path'].startswith('/') or item[b'path'].startswith('..'): raise Exception('Path should be relative and local') path = os.path.join(dest, item[b'path']) # Attempt to remove existing files, ignore errors on failure try: st = os.lstat(path) if stat.S_ISDIR(st.st_mode): os.rmdir(path) else: os.unlink(path) except OSError: pass mode = item[b'mode'] if stat.S_ISDIR(mode): if not os.path.exists(path): os.makedirs(path) if restore_attrs: self.restore_attrs(path, item) elif stat.S_ISREG(mode): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) # Hard link? if b'source' in item: source = os.path.join(dest, item[b'source']) if os.path.exists(path): os.unlink(path) os.link(source, path) else: with open(path, 'wb') as fd: ids = [c[0] for c in item[b'chunks']] for data in self.pipeline.fetch_many(ids, is_preloaded=True): fd.write(data) fd.flush() self.restore_attrs(path, item, fd=fd.fileno()) elif stat.S_ISFIFO(mode): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) os.mkfifo(path) self.restore_attrs(path, item) elif stat.S_ISLNK(mode): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) source = item[b'source'] if os.path.exists(path): os.unlink(path) os.symlink(source, path) self.restore_attrs(path, item, symlink=True) elif stat.S_ISCHR(mode) or stat.S_ISBLK(mode): os.mknod(path, item[b'mode'], item[b'rdev']) self.restore_attrs(path, item) else: raise Exception('Unknown archive item type %r' % item[b'mode'])
def exportData(self): print("export data begin...\n") print self.url begin = time.time() try: os.remove(self.file_name) except: os.mknod(self.file_name) print (self.url) msg = urllib2.urlopen(self.url).read() print(msg) obj = json.loads(msg) num = obj["hits"]["total"] start = 0 end = num / self.size + 1 while start < end: tmpData = self.data tmpData["from"] = start * self.size tmpData["size"] = self.size request = urllib2.Request(self.url, headers={'Content-Type': 'application/json'}, data=json.dumps(tmpData)) # msg = urllib2.urlopen(self.request + "?from=" + str(start * self.size) + "&size=" + str(self.size)).read() msg = urllib2.urlopen(request).read() self.writeFile(msg) start += 1 print("export data end!!!\n total consuming time:" + str(time.time() - begin) + "s")
def init_config_defaults(self): assert False, "TODO: implementing default values for existing settings " if self.settings.config_file: rc_file = self.settings.config_file else: rc_file = os.path.join(os.path.expanduser('~'), '.'+self.DEFAULT_RC) assert not os.path.exists(rc_file), "File exists: %s" % rc_file os.mknod(rc_file) self.settings = confparse.load_path(rc_file) if config_key: setattr(settings, config_key, confparse.Values()) self.rc = getattr(rc, config_key) else: self.rc = settings "Default some global settings: " self.settings.set_source_key('config_file') self.settings.config_file = Application.DEFAULT_RC "Default program specific settings: " self.rc.dbref = Application.DEFAULT_DB v = raw_input("Write new config to %s? [Yn]") if not v.strip() or v.lower().strip() == 'y': self.settings.commit() print "File rewritten. " else: print "Not writing file. "
def dowload_file(self,filepath,key_name,bucket_name): all_bucket_name_list = [i.name for i in self.conn.get_all_buckets()] if bucket_name not in all_bucket_name_list: print 'Bucket %s is not exist,please try again' % (bucket_name) return else: bucket = self.conn.get_bucket(bucket_name) all_key_name_list = [i.name for i in bucket.get_all_keys()] if key_name not in all_key_name_list: print 'File %s is not exist,please try again' % (key_name) return else: key = bucket.get_key(key_name) if not os.path.exists(os.path.dirname(filepath)): print 'Filepath %s is not exists, sure to create and try again' % (filepath) return if os.path.exists(filepath): while True: d_tag = raw_input('File %s already exists, sure you want to cover (Y/N)?' % (key_name)).strip() if d_tag not in ['Y', 'N'] or len(d_tag) == 0: continue elif d_tag == 'Y': os.remove(filepath) break elif d_tag == 'N': return os.mknod(filepath) try: key.get_contents_to_filename(filepath) except Exception: pass
def create_test_files(self): """Create a minimal test case including all supported file types """ # File self.create_regual_file('empty', size=0) self.create_regual_file('file1', size=1024 * 80) # Directory self.create_regual_file('dir2/file2', size=1024 * 80) # File owner os.chown('input/file1', 100, 200) # File mode os.chmod('input/file1', 0o7755) os.chmod('input/dir2', 0o555) # Block device os.mknod('input/bdev', 0o600 | stat.S_IFBLK, os.makedev(10, 20)) # Char device os.mknod('input/cdev', 0o600 | stat.S_IFCHR, os.makedev(30, 40)) if xattr.is_enabled(): xattr.setxattr(os.path.join(self.input_path, 'file1'), 'user.foo', b'bar') # Hard link os.link(os.path.join(self.input_path, 'file1'), os.path.join(self.input_path, 'hardlink')) # Symlink os.symlink('somewhere', os.path.join(self.input_path, 'link1')) # FIFO node os.mkfifo(os.path.join(self.input_path, 'fifo1'))
def log_data(self, field_var_name, filename, tid=None): """ log data into a file Changelog 20160308: insert external tid for data to have same tid """ if self.log_flag: filename_new = filename + time.strftime("%Y%m%d") + '.csv' if not os.path.isfile(filename_new): # write new file with header # create new file os.mknod(filename_new) # chmod os.chmod(filename_new, 0o755) fil = open(filename_new, 'w+') # write header # select items to write header = ",".join(field_var_name) fil.write(header+"\n") else: # append if not os.access(filename_new, os.W_OK): # chmod os.chmod(filename_new, 0o755) else: fil = open(filename_new, 'a+') # pass register_dict to file write # check if have external tid if tid is not None: self.register_dict['tid'] = str(tid) field_var = [str(self.register_dict[i]) for i in field_var_name] output_text = ",".join(field_var) # print output_text fil.write(output_text+"\n") fil.close() else: # no log pass
def _create_expiring_tracker_object(self, object_path): try: # Check if gsexpiring volume is present in ring if not any(d.get('device', None) == self.expiring_objects_account for d in self.object_ring.devs): raise Exception("%s volume not in ring" % self.expiring_objects_account) # Check if gsexpiring is mounted. expiring_objects_account_path = \ os.path.join(self.devices, self.expiring_objects_account) mount_check = self._diskfile_router['junk'].mount_check if mount_check and not do_ismount(expiring_objects_account_path): raise Exception("Path %s doesn't exist or is not a mount " "point" % expiring_objects_account_path) # Create object directory object_dir = os.path.dirname(object_path) try: mkdirs(object_dir) except OSError as err: mkdirs(object_dir) # handle race # Create zero-byte file try: os.mknod(object_path) except OSError as err: if err.errno != errno.EEXIST: raise except Exception as e: self.logger.error("Creation of tracker object %s failed: %s" % (object_path, str(e)))
def fusion_num(fusion_file, result_dir): fusion_str = "" train_list = [] test_list = [] for f in os.listdir(result_dir): f_l = f.split("_") if f_l[len(f_l) - 1] == "train": train_list.append(f) elif f_l[len(f_l) - 1] == "test": test_list.append(f) train_list.sort() test_list.sort() for re_file in train_list: re_obj = open(result_dir + "/" + re_file) re_content = re_obj.read() fusion_str += re_file + "\n" + re_content + "\n\n" for re_file in test_list: re_obj = open(result_dir + "/" + re_file) re_content = re_obj.read() fusion_str += re_file + "\n" + re_content + "\n\n" if not os.path.isfile(fusion_file): os.mknod(fusion_file) fu_fobj = open(fusion_file, "w") fu_fobj.write(fusion_str) fu_fobj.close()
def reinit_comlog( loggername, loglevel, logfile, logtype, maxlogsize, bprint_console, gen_wf=False ): # too many arg pylint: disable=R0913 """ 重新设置comlog, 参与意义同init_comlog. reinit_comlog会重新设置整个进程的参数, 但请注意loggername一定不能与 原来的loggername相同,相同的loggername会raise ValueError """ global G_INITED_LOGGER if loggername in G_INITED_LOGGER: msg = 'loggername:%s has been already initalized!!!' % loggername raise ValueError(msg) G_INITED_LOGGER.append(loggername) loggerman = _LoggerMan() loggerman._reset_logger(logging.getLogger(loggername)) if os.path.exists(logfile) is False: if platforms.is_linux(): os.mknod(logfile) else: with open(logfile, 'w+') as fhandle: fhandle.write('----Windows File Creation ----\n') elif os.path.isfile(logfile) is False: raise err.LoggerException( 'The log file exists. But it\'s not regular file' ) loggerman._config_filelogger( loglevel, logfile, logtype, maxlogsize, bprint_console, gen_wf ) # too many arg pylint: disable=w0212 info('-' * 20 + 'Log Reinitialized Successfully' + '-' * 20) return
def test_new_file_found_for_staging(self): stdout = sys.stdout try: new_stdout = StringIO() sys.stdout = new_stdout result = self.gutil._git_status() self.assertIn('nothing to commit, working directory clean', result) change_set = self.gutil.change_set new_file = self.random_filename() self.gutil.change_set = [new_file] os.mknod(new_file) self.assertNotIn('nothing to commit, working directory clean', self.gutil._git_status()) self.gutil._clean_unstaged_files() self.assertIn('nothing to commit, working directory clean', self.gutil._git_status()) new_dirname = self.random_filename(size=10) os.mkdir(new_dirname) os.mknod('{}/{}'.format(new_dirname, new_file)) self.assertNotIn('nothing to commit, working directory clean', self.gutil._git_status()) self.gutil._clean_unstaged_files() self.assertIn('nothing to commit, working directory clean', self.gutil._git_status()) self.gutil.change_set = change_set finally: sys.stdout = stdout
def setUp(self): self.test_file = self.params.get('tmp_file', default='/tmp/dummy') self.duration = self.params.get('duration', default='30') self.threads = self.params.get( 'threads', default=cpu.online_cpus_count()) self.size = self.params.get( 'memory_to_test', default=int(0.9 * memory.meminfo.MemFree.m)) smm = SoftwareManager() for package in ['gcc', 'libtool', 'autoconf', 'automake', 'make']: if not smm.check_installed(package) and not smm.install(package): self.cancel("Failed to install %s, which is needed for" "the test to be run" % package) if not os.path.exists(self.test_file): try: os.mknod(self.test_file) except OSError: self.cancel("Skipping test since test file creation failed") loc = ["https://github.com/stressapptest/" "stressapptest/archive/master.zip"] tarball = self.fetch_asset("stressapp.zip", locations=loc, expire='7d') archive.extract(tarball, self.workdir) self.sourcedir = os.path.join(self.workdir, 'stressapptest-master') os.chdir(self.sourcedir) process.run('./configure', shell=True) build.make(self.sourcedir)
def get_fs_info(self, path): fs_type = None uuid = None label = None devpth = None tmpd = None try: st_dev=os.stat(path).st_dev dev=os.makedev(os.major(st_dev),os.minor(st_dev)) tmpd=tempfile.mkdtemp() devpth=("%s/dev" % tmpd) os.mknod(devpth,0o400 | stat.S_IFBLK ,dev) except: raise ret = { } pairs = { 'LABEL' : 'label', 'UUID' : 'uuid' , 'FS_TYPE' : 'fs_type' } for (blkid_n, my_n) in pairs.items(): cmd = [ 'blkid', '-s%s' % blkid_n, '-ovalue', devpth ] print(cmd) try: output = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0] ret[my_n]=output.rstrip() except Exception as e: os.unlink(devpth) os.rmdir(tmpd) raise UnsupportedException("Unable to determine %s for %s" % (blkid_n, path)) os.unlink(devpth) os.rmdir(tmpd) return(ret)
def insert_module(): print '****** Insert Module ******\n' print '1: insmod chdev.ko' subprocess.call(['sudo', 'insmod', 'chdev.ko']) print 'success\n' ps = subprocess.Popen(('dmesg'), stdout=subprocess.PIPE) output = subprocess.check_output(('tail', '-n 5'), stdin=ps.stdout) ps.wait() numbers = [ int(n) for n in output.split() if n.isdigit()] major = numbers[1]; print 'MAJOR NUMBER:', major, '\n' print '2: mknode' filename = "/dev/mychdev" mode = 0777|stat.S_IFCHR dev = os.makedev(major, 0) os.mknod(filename, mode, dev) subprocess.call('sudo chmod 777 /dev/mychdev', shell=True) print 'success'
def __create_minimal_dev(self): """Create a minimal /dev so that we don't corrupt the host /dev""" origumask = os.umask(0000) devices = ( ("null", 1, 3, 0666), ("urandom", 1, 9, 0666), ("random", 1, 8, 0666), ("full", 1, 7, 0666), ("ptmx", 5, 2, 0666), ("tty", 5, 0, 0666), ("zero", 1, 5, 0666), ) links = ( ("/proc/self/fd", "/dev/fd"), ("/proc/self/fd/0", "/dev/stdin"), ("/proc/self/fd/1", "/dev/stdout"), ("/proc/self/fd/2", "/dev/stderr"), ) for (node, major, minor, perm) in devices: if not os.path.exists(self._instroot + "/dev/" + node): os.mknod(self._instroot + "/dev/" + node, perm | stat.S_IFCHR, os.makedev(major, minor)) for (src, dest) in links: if not os.path.exists(self._instroot + dest): os.symlink(src, self._instroot + dest) os.umask(origumask)
def write_script(script, project_dict, template, filename=False): """Writes the scripts""" import stat #os.chdir("/lustre/naasc/elastufk/Imaging/test/") #script directory ... /calibrated or Imaging/ project_path = project_dict['project_path'] os.chdir(project_path) if template == False: if project_dict['project_type'] == 'Imaging': os.chdir('sg_ouss_id/group_ouss_id/member_ouss_id/calibrated/') else: os.chdir('Imaging/') else: filename = 'scriptForImaging_template.py' if not filename: if os.path.isfile('scriptForImaging.py') == False: mode = 0666|stat.S_IRUSR os.mknod('scriptForImaging.py', mode) filename = 'scriptForImaging.py' else: mode = 0666|stat.S_IRUSR os.mknod(filename, mode) imscript = open(filename, 'w') imscript.writelines(script) imscript.close() print '\n\nYour ' + filename + ' is in ' + os.getcwd()
def setDynamicNetwork(): """设置动态网络的信息到配置文件""" if QString(getLinuxOperationSystemType()).contains("Ubuntu"): return setUbuntuDynamicNetwork() if not os.path.exists("/etc/sysconfig/network-scripts/"): os.system("mkdir -p /etc/sysconfig/network-scripts") if not os.path.exists("/etc/sysconfig/network-scripts/ifcfg-br0"): os.mknod("/etc/sysconfig/network-scripts/ifcfg-br0") #delBrCmd = "sed -i '/BOOTPROTO/,$'d /etc/sysconfig/network-scripts/ifcfg-br0" delBrCmd = "echo > /etc/sysconfig/network-scripts/ifcfg-br0" #content = "BOOTPROTO=dhcp" content = "BOOTPROTO=dhcp\\nDEVICE=br0\\nONBOOT=yes\\nTYPE=Bridge\\nPEERNTP=yes\\ncheck_link_down(){\\n return 1; \\n}" #addCmd = "sed -i '$ a\\%s' /etc/sysconfig/network-scripts/ifcfg-%s" % (content, ethNameList[0]) addCmd = "sed -i '$ a\\%s' /etc/sysconfig/network-scripts/ifcfg-br0" % (content) delOk = os.system(delBrCmd) addOk = os.system(addCmd) os.system(str("persist /etc/sysconfig/network-scripts/ifcfg-br0")) if delOk != 0 or addOk != 0: return False return True
def make_dir_files(root, dirs, files): make_dirs(dirs) for dirs, files in dirs_files: for d in dirs: os.mkdir(os.path.join(root, d)) for f in files: os.mknod(os.path.join(root, f))
def makeDMNode(root="/"): major = minor = None for (fn, devname, val) in (("/proc/devices", "misc", "major"), ("/proc/misc", "device-mapper", "minor")): f = open(fn) lines = f.readlines() f.close() for line in lines: try: (num, dev) = line.strip().split(" ") except: continue if dev == devname: s = "%s = int(num)" % (val,) exec s break # print "major is %s, minor is %s" %(major, minor) if major is None or minor is None: return mkdirChain(root + "/dev/mapper") try: os.mknod(root + "/dev/mapper/control", stat.S_IFCHR | 0600, os.makedev(major, minor)) except: pass
def create_file(data_path, file_list, file_name): """ 创建训练文件train.txt/test.txt :param data_path: 图片文件的上级路径 :param file_list: synset生成的list :param train_file_name: 标签表 :return: """ try: cnt = 0 if not os.path.isfile(file_name): os.mknod(file_name) file_txt = open(file_name, mode='wr') for file_name in file_list: print file_name file_path = os.path.join(data_path, file_name) print file_path if not os.path.isdir(file_path): print file_path, '目录不存在' continue for path in os.listdir(file_path): file_txt.write(os.path.join(file_name, path) + " " + str(cnt) + "\n") cnt += 1 except Exception, e: print "执行失败", e
def sw_svlan_check(sw_file='sw_test.txt', olt_result_file='result/olt.txt', sw_result_file='result/sw.txt', fail_file='result/fail.log'): for f in [fail_file, olt_result_file, sw_result_file]: if os.path.exists(f): os.remove(f) os.mknod(f) with open(sw_file) as devices: sw = {} for device in devices: sip, olt = [x.strip() for x in device.split(',', 1)] sw.setdefault(sip, set()).add(olt) for k, v in sw.items(): svlan_olt = {} for i in v: mark = "fail" records = {} olt_ip, factory, area = [x.strip() for x in i.split(',')] if factory.lower() == 'zte': mark, records = zte(olt_ip) elif factory.lower() == 'hw': mark, records = huawei(olt_ip) olt_check_out(i, mark, records, fail_file=fail_file, result_file=olt_result_file) if mark == 'success': for svlan in records.keys(): svlan_olt.setdefault(svlan, set()).add(i) sw_check_out(k, svlan_olt, result_file=sw_result_file) with open(sw_file, 'a') as fsw: fsw.write('all devices checked\n') print 'all devices checked'
def olt_svlan_check(olts_file='olt_test.txt', fail_file='result/fail.log', result_file='result/olt.txt'): """TODO: Docstring for svlan. :olts_file: TODO :result_file: TODO :returns: TODO """ for f in [fail_file, result_file]: if os.path.exists(f): os.remove(f) os.mknod(f) with open(olts_file) as olts: for olt in olts: mark = "fail" records = {} olt = olt.strip('\n') print olt ip, factory, area = [x.strip() for x in olt.split(',')] if factory.lower() == "zte": mark, records = zte(ip) elif factory.lower() == "hw": mark, records = huawei(ip) olt_check_out(olt, mark, records, result_file=result_file, fail_file=fail_file)
def __init__(self): self.jobType2Tested = { 'tps' : 'n', 'srpm' : 'n', 'tier1' : 'n', 'tier2' : 'n', 'regression' : 'n', 'fj' : 'n', 'virt' : 'n', } self.type2Tested = '' self.errataName = '' self.errataLname = '' self.yang = '' self.__parseArgs() ErrataInfo.__init__(self, self.errataName) self.resultPath = "./result" self.jobStatePath = '%s/%s.%s' %(self.resultPath, \ self.errataId, 'jobstate') if not os.path.exists(self.jobStatePath): os.mknod(self.jobStatePath) self.jobState = ConfigObj(self.jobStatePath, encoding="utf8")
def data2train_txt(train_txt_path,train_path): train_image_path = [] if os.path.exists(train_txt_path): print 'train_txt_path already exist' sys.exit() os.mknod(train_txt_path) train_txt = open(train_txt_path,'w') class_name = os.listdir(train_path) class_num = len(os.listdir(train_path)) #print class_num i = 0 for every_class in class_name: if i == class_num: print 'all classes have been write' break cur_class_path = train_path + every_class + '/' for item in os.listdir(cur_class_path): cur_img_path = train_path+every_class +'/'+ item train_image_path.append(cur_img_path + ' ' + str(every_class)) #train_txt.write(cur_img_path + ' ' + str(every_class)) #train_txt.write('\n') i += 1 random.shuffle(train_image_path) for item in train_image_path: train_txt.write(item) train_txt.write('\n') train_txt.close()
def edit_config_file(): if os.path.isfile(_SETTINGS_FILE) != True: os.mknod(_SETTINGS_FILE) show_help_dlg("<b>No <i>.sshplus</i> config file found, we created one for you!\n\nPlease edit the file and reload the config.</b>\n\n%s" % \ _EDIT_CONFIG, error=True) os.spawnvp(os.P_NOWAIT, 'xdg-open', ['xdg-open', _SETTINGS_FILE]) os.wait3(os.WNOHANG)
def mknod(self, path, mode, dev): self._assert_access(path, os.W_OK) os.mknod("." + path, mode, dev)
temp = input("Set the initial position?(Y/N)") if temp == 'Y' or temp == 'y': break Quat_Relative_Zero_Point = Set_Initial_Pos() #获得初始状态值 RSs2Js = cal_RSsJs(Quat_Relative_Zero_Point[0, 0], Quat_Relative_Zero_Point[0, 1], Quat_Relative_Zero_Point[0, 2], Quat_Relative_Zero_Point[0, 3]) # 是否记录数据 Flag_Data_Record = input("Record the data?(Y/N)") if Flag_Data_Record == 'Y' or Flag_Data_Record == 'y': Flag_Data_Record = True # 获取当前时间作为文件名,建立空白.csv文档 Current_Time = time.strftime('%Y%m%d_%H%M%S', time.localtime(time.time())) os.mknod('%s.csv' % Current_Time) else: Flag_Data_Record = False '''plt.figure('Pos') plt.ion() plt.figure("Euler Angle") plt.ion() plt.ylabel('Angle/deg') plt.xlabel('counts/times') plt.title('Euler angle') plt.ylim(-100, 100)''' time_start = time.time() while (1): Get_Quat(Quat)
rtscts=1) print(ser.isOpen()) # Step II : write the datas into the file CSV while capturing # 1, create a folder path_dir = path_root + "/" + dir_name if dir_name not in os.listdir(path_root): os.mkdir(path_dir) # 2, create the file CSV path_file = path_dir + '/' + FILL_NAME + '.csv' content_title = "time," + Sensor_for_save + "," + "\n" # the title of the table if os.path.exists(path_file) is not True: os.mknod(path_file) with io.open(path_file, 'w') as file_Object: file_Object.write(content_title) time_mem = 0 while True: data_recvc = ser.readline().decode('utf-8') if check_json_format(data_recvc): if data_recvc is b"": pass elif data_recvc is not b"": data_dict = json.loads(data_recvc) # print('sensor_name : ', data_dict['sensor_name'], end=" ") # print('time : ', data_dict['time'], end=" ") # print('value : ', data_dict['value'])
metavar='f', type=str, action="store", dest="filename", default="scoring.csv") result = parser.parse_args() # now we get the filename for our new csv if result.filename is None or not result.filename: filename = "/scoring.csv" else: filename = "/" + result.filename # make sure our outfile exists and is empty if os.path.exists(path + filename, '0751'): os.remove(path + filename, 751) os.mknod(path + filename) # open file with open(path + "/scoring.txt") as read, open(path + filename, "a") as outfile: # first, print a header line to our file outfile.write( "capability name, widely deployed, used by tools, used by clients, future, complete, stable, discoverable, documented, required in last release, foundational, atomic, proximity, non-admin\n" ) # read in every line in the file for line in read: if ":" in line and "*" in line: # convert line to proper csv format line = line.replace("] [", ",").replace("[", ",").replace("]*", "") line = line.split(",")[:-1] line = ",".join(line) line = " ".join(line.split())
def dents(ubifs, inodes, dent_node, path='', perms=False): inode = inodes[dent_node.inum] dent_path = os.path.join(path, dent_node.name) if dent_node.type == UBIFS_ITYPE_DIR: try: if not os.path.exists(dent_path): os.mkdir(dent_path) if perms: set_file_perms(dent_path, inode) except Exception as e: ubifs.log.write('DIR Fail: %s' % e) if 'dent' in inode: for dnode in inode['dent']: dents(ubifs, inodes, dnode, dent_path, perms) elif dent_node.type == UBIFS_ITYPE_REG: try: if inode['ino'].nlink > 1: if 'hlink' not in inode: inode['hlink'] = dent_path buf = process_reg_file(ubifs, inode, dent_path) write_reg_file(dent_path, buf) else: os.link(inode['hlink'], dent_path) else: buf = process_reg_file(ubifs, inode, dent_path) write_reg_file(dent_path, buf) if perms: set_file_perms(dent_path, inode) except Exception as e: ubifs.log.write('FILE Fail: %s' % e) elif dent_node.type == UBIFS_ITYPE_LNK: try: os.symlink('%s' % inode['ino'].data, dent_path) except Exception as e: ubifs.log.write('SYMLINK Fail: %s : %s' % (inode['ino'].data, dent_path)) elif dent_node.type in [UBIFS_ITYPE_BLK, UBIFS_ITYPE_CHR]: try: dev = struct.unpack('<II', inode['ino'].data)[0] if perms: os.mknod(dent_path, inode['ino'].mode, dev) if perms: set_file_perms(path, inode) else: write_reg_file(dent_path, str(dev)) if perms: set_file_perms(dent_path, inode) except Exception as e: ubifs.log.write('DEV Fail: %s : %s' % (dent_path, e)) elif dent_node.type == UBIFS_ITYPE_FIFO: try: os.mkfifo(dent_path, inode['ino'].mode) if perms: set_file_perms(dent_path, inode) except Exception as e: ubifs.log.write('FIFO Fail: %s : %s' % (dent_path, e)) elif dent_node.type == UBIFS_ITYPE_SOCK: try: write_reg_file(dent_path, '') if perms: set_file_perms(dent_path, inode) except Exception as e: ubifs.log.write('SOCK Fail: %s' % dent_path)
def create_log_save_path(target_path): top_path = os.path.split(target_path)[0] if not os.path.exists(top_path): os.mkdir(top_path) if not os.path.exists(target_path): os.mknod(target_path)
def _setup_devices(self): if self.config['internal_dev_setup']: util.rmtree(self.make_chroot_path("dev"), selinux=self.selinux, exclude=self.mounts.get_mountpoints()) util.mkdirIfAbsent(self.make_chroot_path("dev", "pts")) util.mkdirIfAbsent(self.make_chroot_path("dev", "shm")) prevMask = os.umask(0000) devFiles = [ (stat.S_IFCHR | 0o666, os.makedev(1, 3), "dev/null"), (stat.S_IFCHR | 0o666, os.makedev(1, 7), "dev/full"), (stat.S_IFCHR | 0o666, os.makedev(1, 5), "dev/zero"), (stat.S_IFCHR | 0o666, os.makedev(1, 8), "dev/random"), (stat.S_IFCHR | 0o444, os.makedev(1, 9), "dev/urandom"), (stat.S_IFCHR | 0o666, os.makedev(5, 0), "dev/tty"), (stat.S_IFCHR | 0o600, os.makedev(5, 1), "dev/console"), (stat.S_IFCHR | 0o666, os.makedev(5, 2), "dev/ptmx"), (stat.S_IFCHR | 0o666, os.makedev(10, 237), "dev/loop-control"), (stat.S_IFCHR | 0o600, os.makedev(10, 57), "dev/prandom"), (stat.S_IFCHR | 0o600, os.makedev(10, 183), "dev/hwrng"), ] for i in range(self.config['dev_loop_count']): devFiles.append( (stat.S_IFBLK | 0o666, os.makedev(7, i), "dev/loop{loop_number}".format(loop_number=i))) kver = os.uname()[2] self.root_log.debug("kernel version == %s", kver) for i in devFiles: src_path = "/" + i[2] chroot_path = self.make_chroot_path(i[2]) if util.cmpKernelVer( kver, '2.6.18') >= 0 and src_path == '/dev/ptmx': continue # create node, but only if it exist on host too # except for loop devices, which only show up on the host after they are first used if os.path.exists(src_path) or "loop" in src_path: try: os.mknod(chroot_path, i[0], i[1]) except OSError as e: # If mknod gives us a permission error, fall back to a different # strategy of using a bind mount from root to host. This won't # work for the loop devices, so just skip them in this case. if e.errno == errno.EPERM: if os.path.exists(src_path): self.mounts.add_device_bindmount(src_path) continue else: raise # Further adjustments if we created a new node instead of bind-mounting # an existing one: # set context. (only necessary if host running selinux enabled.) # fails gracefully if chcon not installed. if self.selinux: util.do( ["chcon", "--reference=" + src_path, chroot_path], raiseExc=0, shell=False, env=self.env) if src_path in ('/dev/tty', '/dev/ptmx'): os.chown(chroot_path, pwd.getpwnam('root')[2], grp.getgrnam('tty')[2]) os.symlink("/proc/self/fd/0", self.make_chroot_path("dev/stdin")) os.symlink("/proc/self/fd/1", self.make_chroot_path("dev/stdout")) os.symlink("/proc/self/fd/2", self.make_chroot_path("dev/stderr")) if os.path.isfile(self.make_chroot_path('etc', 'mtab')) or \ os.path.islink(self.make_chroot_path('etc', 'mtab')): os.remove(self.make_chroot_path('etc', 'mtab')) os.symlink("../proc/self/mounts", self.make_chroot_path('etc', 'mtab')) # symlink /dev/fd in the chroot for everything except RHEL4 if util.cmpKernelVer(kver, '2.6.9') > 0: os.symlink("/proc/self/fd", self.make_chroot_path("dev/fd")) os.umask(prevMask) os.symlink("pts/ptmx", self.make_chroot_path('/dev/ptmx'))
def test_context_basics(): with tempfile.TemporaryDirectory() as temp_dir: def cmdb(*paths): return os.path.join(temp_dir, 'cmdb', *paths) # setting fake cmdb os.mkdir(cmdb()) os.mknod(cmdb('root.json')) os.makedirs(cmdb('accounts')) os.makedirs(cmdb('accounts', 'tenant')) os.makedirs(cmdb('accounts', 'tenant-second')) os.makedirs(cmdb('accounts', 'account-name-1', 'config')) os.makedirs(cmdb('accounts', 'account-name-2', 'config')) os.mknod(cmdb('accounts', 'tenant', 'tenant.json')) os.mknod(cmdb('accounts', 'tenant-second', 'tenant.json')) os.makedirs(cmdb('product', 'config')) os.makedirs(cmdb('product', 'config', 'solutions', 'environment')) os.makedirs( cmdb('product', 'config', 'solutions', 'environment', 'segment')) with open( cmdb('product', 'config', 'solutions', 'environment', 'environment.json'), 'wt+') as f: json.dump({"Name": "environment_name"}, f) with open( cmdb('product', 'config', 'solutions', 'environment', 'segment', 'segment.json'), 'wt+') as f: json.dump({"Name": "segment_name"}, f) with open(cmdb('product', 'config', 'product.json'), 'wt+') as f: json.dump({"Name": "product_name"}, f) with open(cmdb('accounts', 'account-name-1', 'config', 'account.json'), 'wt+') as f: json.dump({"Name": "account-name-1"}, f) with open(cmdb('accounts', 'account-name-2', 'config', 'account.json'), 'wt+') as f: json.dump({"Name": "account-name-2"}, f) context = Context(cmdb()) assert context.root_dir == cmdb() with pytest.raises(NoRootFileError): Context(temp_dir) with pytest.raises(MultipleTenantsFoundError): AccountLevel(cmdb('accounts', 'account-name-1')) os.remove(cmdb('accounts', 'tenant-second', 'tenant.json')) account_level = AccountLevel(cmdb('accounts', 'account-name-1')) assert account_level.props['Name'] == 'account-name-1' assert account_level.tenant_dir == cmdb('accounts', 'tenant') assert account_level.level_file_path == cmdb('accounts', 'account-name-1', 'config', 'account.json') tenant_level = TenantLevel(cmdb('accounts', 'tenant')) assert tenant_level.level_file_path == cmdb('accounts', 'tenant', 'tenant.json') with pytest.raises(NoLevelFileError): ProductLevel(cmdb('accounts', 'account-name-1'), config=dict(account="account-name-1")) with pytest.raises(MultipleAccountsFoundError): ProductLevel(cmdb('product')) product_level = ProductLevel(cmdb('product'), config=dict(account="account-name-1")) assert product_level.tenant_dir == cmdb('accounts', 'tenant') assert product_level.account_dir == cmdb('accounts', 'account-name-1') product_level = ProductLevel(cmdb('product'), config=dict(account="account-name-2")) assert product_level.tenant_dir == cmdb('accounts', 'tenant') assert product_level.account_dir == cmdb('accounts', 'account-name-2') with pytest.raises(SpecifiedAccountNotFoundError): ProductLevel(cmdb('product'), config=dict(account="account-name-3")) environment_level = EnvironmentLevel( cmdb('product', 'config', 'solutions', 'environment'), config=dict(account='account-name-1')) assert environment_level.tenant_dir == cmdb('accounts', 'tenant') assert environment_level.account_dir == cmdb('accounts', 'account-name-1') assert environment_level.props['Environment'] == 'environment' segment_level = SegmentLevel(cmdb('product', 'config', 'solutions', 'environment', 'segment'), config=dict(account='account-name-2')) assert segment_level.tenant_dir == cmdb('accounts', 'tenant') assert segment_level.account_dir == cmdb('accounts', 'account-name-2') assert segment_level.props['Environment'] == 'environment' assert segment_level.props['Segment'] == 'segment' os.remove(cmdb('accounts', 'account-name-1', 'config', 'account.json')) product_level = ProductLevel(cmdb('product'), config=dict(account='account-name-2')) assert product_level.tenant_dir == cmdb('accounts', 'tenant') assert product_level.account_dir == cmdb('accounts', 'account-name-2') with pytest.raises(SpecifiedAccountNotFoundError): ProductLevel(cmdb('product'), config=dict(account='account-name-1')) product_level = ProductLevel(cmdb('product')) assert product_level.tenant_dir == cmdb('accounts', 'tenant') assert product_level.account_dir == cmdb('accounts', 'account-name-2') os.remove(cmdb('accounts', 'account-name-2', 'config', 'account.json')) with pytest.raises(NoAccountsFoundError): ProductLevel(cmdb('product')) os.remove(cmdb('accounts', 'tenant', 'tenant.json')) with pytest.raises(NoTenantFoundError): ProductLevel(cmdb('product'))
def mknod(self, path, mode, dev): print(f"SSS->mknod: {path}\n") return os.mknod(self.get_full_path(path), mode, dev)
import torch.optim as optim import torchvision import torchvision.transforms as transforms from torch.optim.lr_scheduler import MultiStepLR, ReduceLROnPlateau from models import SSIM from models import * from models.Discriminator import DiscriminatorNet, DiscriminatorNet_mnist from models.HidingUNet import UnetGenerator, UnetGenerator_mnist # save code each time if args.train: cur_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) args.save_path += cur_time + '/' os.makedirs(args.save_path + 'images', exist_ok=True) os.makedirs(args.save_path + 'checkpiont', exist_ok=True) os.makedirs(args.save_path + 'models', exist_ok=True) os.mknod(args.save_path + "models/main.py") os.mknod(args.save_path + "models/HidingUNet.py") os.mknod(args.save_path + "models/Discriminator.py") shutil.copyfile('main.py', args.save_path + "models/main.py") shutil.copyfile('models/HidingUNet.py', args.save_path + 'models/HidingUNet.py') shutil.copyfile('models/Discriminator.py', args.save_path + 'models/Discriminator.py') # device = 'cuda' if torch.cuda.is_available() else 'cpu' # Preparing Data print('==> Preparing data..') if args.dataset == 'cifar10': transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(),
def _make_dev_file(file_abs_path, dev, mode): devmajor = os.major(dev) devminor = os.minor(dev) new_dev = os.makedev(devmajor, devminor) os.mknod(file_abs_path, mode, new_dev)
print(e) print('用户信息导出到 \033[31;1m{}\033[0m 成功'.format(_ExportCsv)) if _Linfo[0] != 'find' and _Linfo[0] != 'search' and _Linfo[0] != 'exit' and _Linfo[0] != 'quit' and _Linfo[0] != 'export': print('输入指令错误,请输入 help 查看帮助.') def _ExitSystem(): _QuaryResult = str(input('请问是否退出菜单[Yy|Nn):').lower()) if _QuaryResult == 'n': pass else: sys.exit('已成功退出菜单.') # 判断用户信息文件是否存在 if os.path.exists(_StuInfo) == False: os.mknod(_StuInfo) while True: _UCount += 1 # 用户密码次数校验 if _UCount > 3: sys.exit('用户名或密码输入次数大于3次,程序退出.') else: _InUser = input('Username: '******'Password: '******'用户名或密码错误.') else: print('登录成功.') while True: # 选择菜单防重复登录 print('\n菜单: \n'
if os.path.exists("/share/.download-failed"): print( "Previous pod failed to download the data. Exiting with failure..." ) exit(1) # If there is some data in incoming_dir but "/share/.download-succeeded" doesn't exist, it is a failure case # Exit with error code immediately if os.path.exists(incoming_dir) and len( os.listdir(incoming_dir)) > 0 and not os.path.exists( '/share/.download-succeeded'): print( "Some data was downloaded, but '/share/.download-succeeded' file doesn't exist. Exiting with failure..." ) exit(1) # Download the data if "/share/.download-succeeded" does not exist if not os.path.exists('/share/.download-succeeded'): try: print("Lock acquired. Downloading data from Swift...") getData(containerName=os.environ.get('SWIFT_KEY'), in_dir=incoming_dir, out_dir=os.environ.get('OUTGOING_DIR')) os.mknod('/local/.download-pod') except Exception as err: print("Failed to download the data:", err) # Create a failed file, if download failed to complete os.mknod("/share/.download-failed") exit(1) # Create a success file, if download completed successfully os.mknod("/share/.download-succeeded") print("Data downloaded!")
valid_acc, valid_roc, str(datetime.timedelta(seconds=time.time() - start_time))[:7])) scheduler.step(valid_roc) early_stopping(valid_roc, model, logger) if early_stopping.early_stop: logger.info("========== Early stopping...==========") break logger.info(f"===> The BEST AUC score in folder {fold} is: {early_stopping.best_score:.3f}") return valid_preds.cpu() if __name__ == "__main__": FILE = f"../logs/log_imgsize_{IMG_SIZE}_eff_b{EFFICIENT_TYPE}.log" if not os.path.exists(FILE): os.mknod(FILE) logger = get_logger(FILE) df = pd.read_csv("../input/train_folds.csv") df["oof"] = 0 for fold_i in range(5): df.loc[df.kfold == fold_i, "oof"] = trainer(fold_i) oof_auc = roc_auc_score(df.target, df.oof) logger.info(f"############## OOF AUC score is {oof_auc:.3f} ##############") OOF_NAME = f"oof_imgsize_{IMG_SIZE}_eff_b{EFFICIENT_TYPE}" df.to_csv(f"../ensembles/oofs/{OOF_NAME}.csv", index=None)
def main(): # Set up logging. Show error messages by default, show debugging # info if specified. log = logger('loadwatcher', args.debug) log.debug('loadwatcher - starting execution at %s' % datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) log.debug('Parsed arguments: %s' % args) # initialize cpu_percent calculator [(p.pid, p.info['cpu_percent']) for p in psutil.process_iter(attrs=['cpu_percent'])] time.sleep(1) userutil = {} for p in psutil.process_iter(attrs=['name', 'username', 'cpu_percent']): if p.info['username'] != 'root' and p.info['cpu_percent'] > minpercent: if p.info['username'] in userutil: userutil[p.info['username']] += p.info['cpu_percent'] else: userutil[p.info['username']] = p.info['cpu_percent'] # if user is idle, delete stub /tmp/loadwatcher.py_USER.stub for fl in glob.glob(tempfile.gettempdir() + '/' + os.path.basename(__file__) + '_*.stub'): if not fl[20:-5] in userutil: log.info('deleting stub %s ...' % fl) os.unlink(fl) hostname = socket.gethostname() for user, percent in userutil.items(): log.debug('user:%s, percent:%s' % (user, percent)) # see if we need to kill anything if percent > killpercent: try: if user != '': os.spawnlp(os.P_NOWAIT, 'killall', '-9', '-v', '-g', '-u', user) log.info('executed killall -9 -v -g -u %s' % user) to = user if args.onlybcc and args.bcc != '': to = args.bcc send_mail([to,], "%s: Your jobs have been removed!" % (hostname.upper()), "%s, your CPU utilization on %s is currently %s %%!\n\n" \ "For short term jobs you can use no more than %s %%\n" \ "or %s CPU cores on the Rhino machines.\n" \ "We have removed all your processes from this computer.\n" \ "Please try again and submit batch jobs\n" \ "or use the 'grabnode' command for interactive jobs.\n\n" \ "see http://scicomp.fhcrc.org/Gizmo%%20Cluster%%20Quickstart.aspx\n" \ "or http://scicomp.fhcrc.org/Grab%%20Commands.aspx\n" \ "or http://scicomp.fhcrc.org/SciComp%%20Office%%20Hours.aspx\n" \ "\n" % (user, hostname, int(percent), maxpercent, maxpercent/100), bcc=[args.bcc,]) log.info('Sent kill notification email to %s' % user) else: log.warning('Nobody to send emails to') except: e = sys.exc_info()[0] sys.stderr.write( "Error in send_mail while sending to '%s': %s\n" % (user, e)) log.error("Error in send_mail while sending to '%s': %s\n" % (user, e)) if args.erroremail: send_mail( [ args.erroremail, ], "Error - loadwatcher", "Please debug email notification to user '%s', Error: %s\n" % (user, e)) else: sys.stderr.write( 'no option --error-email given, cannot send error status via email\n' ) log.error( 'no option --error-email given, cannot send error status via email\n' ) continue # see if we need to send a warning stub = os.path.join(tempfile.gettempdir(), os.path.basename(__file__) + '_' + user + '.stub') if percent > maxpercent: if os.path.exists(stub): log.info('stub %s already exists, not sending email' % stub) continue try: if user != '': to = user if args.onlybcc and args.bcc != '': to = args.bcc send_mail([to,], "%s: You are using too many CPU cores!" % (hostname.upper()), "%s, your CPU utilization on %s is currently %s %%!\n\n" \ "For short term jobs you can use no more than %s %%\n" \ "or %s CPU cores on the Rhino machines.\n" \ "Please reduce your load now and submit batch jobs\n" \ "or use the 'grabnode' command for interactive jobs.\n\n" \ "see http://scicomp.fhcrc.org/Gizmo%%20Cluster%%20Quickstart.aspx\n" \ "or http://scicomp.fhcrc.org/Grab%%20Commands.aspx\n" \ "or http://scicomp.fhcrc.org/SciComp%%20Office%%20Hours.aspx\n" \ "\n" % (user, hostname, int(percent), maxpercent, maxpercent/100), bcc=[args.bcc,]) os.mknod(stub) log.info('Sent warning email to %s' % user) else: log.warning('Nobody to send emails to') except: e = sys.exc_info()[0] sys.stderr.write( "Error in send_mail while sending to '%s': %s\n" % (user, e)) log.error("Error in send_mail while sending to '%s': %s\n" % (user, e)) if args.erroremail: send_mail( [ args.erroremail, ], "Error - loadwatcher", "Please debug email notification to user '%s', Error: %s\n" % (user, e)) else: sys.stderr.write( 'no option --error-email given, cannot send error status via email\n' ) log.error( 'no option --error-email given, cannot send error status via email\n' )
def mknod(self, path, mode, dev): # print "mknod:", path, mode, dev return os.mknod(self._full_path(path), mode, dev)
def run(test, params, env): """ Test the virsh pool commands (1) Define a given type pool (2) List pool with '--inactive --type' options (3) Dumpxml for the pool (4) Undefine the pool (5) Define pool by using the XML file in step (3) (6) Build the pool(except 'disk' type pool For 'fs' type pool, cover --overwrite and --no-overwrite options (7) Start the pool (8) List pool with '--persistent --type' options (9) Mark pool autostart (10) List pool with '--autostart --type' options (11) Restart libvirtd and list pool with '--autostart --persistent' options (12) Destroy the pool (13) Unmark pool autostart (14) Repeat step (11) (15) Start the pool (16) Get pool info (17) Get pool uuid by name (18) Get pool name by uuid (19) Refresh the pool For 'dir' type pool, touch a file under target path and refresh again to make the new file show in vol-list. (20) Check pool 'Capacity', 'Allocation' and 'Available' Create a over size vol in pool(expect fail), then check these values (21) Undefine the pool, and this should fail as pool is still active (22) Destroy the pool (23) Delete pool for 'dir' type pool. After the command, the pool object will still exist but target path will be deleted (24) Undefine the pool """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") source_format = params.get("source_format", "") source_name = params.get("pool_source_name", "gluster-vol1") source_path = params.get("pool_source_path", "/") new_pool_name = params.get("new_pool_name", "") build_option = params.get("build_option", "") source_initiator = params.get("source_initiator", "") same_source_test = "yes" == params.get("same_source_test", "no") customize_initiator_iqn = "yes" == params.get("customize_initiator_iqn", "no") # The file for dumped pool xml poolxml = os.path.join(data_dir.get_tmp_dir(), "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) vol_name = params.get("volume_name", "temp_vol_1") # Use pool name as VG name status_error = "yes" == params.get("status_error", "no") vol_path = os.path.join(pool_target, vol_name) ip_protocal = params.get('ip_protocal', 'ipv4') source_protocol_ver = params.get('source_protocol_ver', "no") if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": test.cancel("Gluster pool is not supported in current" " libvirt version.") if not libvirt_version.version_compare(4, 7, 0): if pool_type == "iscsi-direct": test.cancel("iSCSI-direct pool is not supported in current" "libvirt version.") if source_initiator and not libvirt_version.version_compare(6, 10, 0): test.cancel("Source_initiator option is not supported in current" " libvirt_version.") if source_protocol_ver == "yes" and not libvirt_version.version_compare( 4, 5, 0): test.cancel("source-protocol-ver is not supported on current version.") def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable result = virsh.pool_list(option, ignore_status=True) utlv.check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)", str(result.stdout.strip())) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: test.fail("Unexpected pool '%s' exist." % pool_name) if not expect_error and not found: test.fail("Expect pool '%s' doesn't exist." % pool_name) def check_vol_list(vol_name, pool_name): """ Check volume from the list :param vol_name: Name of the volume :param pool_name: Name of the pool """ found = False # Get the volume list stored in a variable result = virsh.vol_list(pool_name, ignore_status=True) utlv.check_exit_status(result) output = re.findall(r"(\S+)\ +(\S+)", str(result.stdout.strip())) for item in output: if vol_name in item[0]: found = True break if found: logging.debug("Find volume '%s' in pool '%s'.", vol_name, pool_name) else: test.fail("Not find volume '%s' in pool '%s'." % (vol_name, pool_name)) def is_in_range(actual, expected, error_percent): deviation = 100 - (100 * (float(actual) / float(expected))) logging.debug("Deviation: %0.2f%%", float(deviation)) return float(deviation) <= float(error_percent) def check_pool_info(pool_info, check_point, value): """ Check the pool name, uuid, etc. :param pool_info: A dict include pool's information :param key: Key of pool info dict, available value: Name, UUID, State Persistent, Autostart, Capacity, Allocation, Available :param value: Expect value of pool_info[key] """ if pool_info is None: test.fail("Pool info dictionary is needed.") val_tup = ('Capacity', 'Allocation', 'Available') if check_point in val_tup and float(value.split()[0]): # As from bytes to GiB, could cause deviation, and it should not # exceed 1 percent. if is_in_range(float(pool_info[check_point].split()[0]), float(value.split()[0]), 1): logging.debug("Pool '%s' is '%s'.", check_point, value) else: test.fail("Pool '%s' isn't '%s'." % (check_point, value)) else: if pool_info[check_point] == value: logging.debug("Pool '%s' is '%s'.", check_point, value) else: test.fail("Pool '%s' isn't '%s'." % (check_point, value)) # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Run Testcase pvt = utlv.PoolVolumeTest(test, params) kwargs = { 'image_size': '1G', 'pre_disk_vol': ['100M'], 'source_name': source_name, 'source_path': source_path, 'source_format': source_format, 'persistent': True, 'ip_protocal': ip_protocal, 'emulated_image': "emulated-image", 'pool_target': pool_target, 'source_initiator': source_initiator, 'source_protocol_ver': source_protocol_ver } params.update(kwargs) try: _pool = libvirt_storage.StoragePool() # Step (1) # Pool define pvt.pre_pool(**params) # Step (2) # Pool list option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) # Step (3) # Pool dumpxml xml = virsh.pool_dumpxml(pool_name, to_file=poolxml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Update pool name if new_pool_name: if "/" in new_pool_name: new_pool_name = new_pool_name.replace("/", "\/") logging.debug(new_pool_name) p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name) p_xml.name = new_pool_name del p_xml.uuid poolxml = p_xml.xml logging.debug("XML after update pool name:\n%s" % p_xml) # Update host name if same_source_test: s_xml = p_xml.get_source() s_xml.host_name = "192.168.1.1" p_xml.set_source(s_xml) poolxml = p_xml.xml logging.debug("XML after update host name:\n%s" % p_xml) if customize_initiator_iqn: initiator_iqn = params.get("initiator_iqn", "iqn.2018-07.com.virttest:pool.target") p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name) s_node = p_xml.xmltreefile.find('/source') i_node = ET.SubElement(s_node, 'initiator') ET.SubElement(i_node, 'iqn', {'name': initiator_iqn}) p_xml.xmltreefile.write() poolxml = p_xml.xml logging.debug('XML after add Multi-IQN:\n%s' % p_xml) # Step (4) # Undefine pool if not same_source_test: result = virsh.pool_undefine(pool_name) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) # Step (5) # Define pool from XML file result = virsh.pool_define(poolxml, debug=True) # Give error msg when exit status is not expected if "/" in new_pool_name and not result.exit_status: error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=639923 " error_msg += "is helpful for tracing this bug." logging.error(error_msg) if "." in new_pool_name and result.exit_status: error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1333248 " error_msg += "is helpful for tracing this bug." logging.error(error_msg) if same_source_test and not result.exit_status: error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1171984 " error_msg += "is helpful for tracing this bug." logging.error(error_msg) utlv.check_exit_status(result, status_error) if not result.exit_status: # Step (6) # Build pool # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool # disk/fs pool: as prepare step already make label and create filesystem # for the disk, use '--overwrite' is necessary # logical_pool: build pool will fail if VG already exist, BZ#1373711 if new_pool_name: pool_name = new_pool_name if pool_type != "logical": result = virsh.pool_build(pool_name, build_option, ignore_status=True) utlv.check_exit_status(result) # Step (7) # Pool start result = virsh.pool_start(pool_name, debug=True, ignore_status=True) utlv.check_exit_status(result) # Step (8) # Pool list option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (9) # Pool autostart result = virsh.pool_autostart(pool_name, ignore_status=True) utlv.check_exit_status(result) # Step (10) # Pool list option = "--autostart --type %s" % pool_type check_pool_list(pool_name, option) # Step (11) # Restart libvirtd and check the autostart pool utils_libvirtd.unmark_storage_autostarted() utils_libvirtd.libvirtd_restart() option = "--autostart --persistent" check_pool_list(pool_name, option) # Step (12) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: test.fail("Destroy pool % failed." % pool_name) # Step (13) # Pool autostart disable result = virsh.pool_autostart(pool_name, "--disable", ignore_status=True) utlv.check_exit_status(result) # Step (14) # Repeat step (11) utils_libvirtd.libvirtd_restart() option = "--autostart" check_pool_list(pool_name, option, True) # Step (15) # Pool start # When libvirtd starts up, it'll check to see if any of the storage # pools have been activated externally. If so, then it'll mark the # pool as active. This is independent of autostart. # So a directory based storage pool is thus pretty much always active, # and so as the SCSI pool. if pool_type not in ["dir", 'scsi']: result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) # Step (16) # Pool info pool_info = _pool.pool_info(pool_name) logging.debug("Pool '%s' info:\n%s", pool_name, pool_info) # Step (17) # Pool UUID result = virsh.pool_uuid(pool_info["Name"], ignore_status=True) utlv.check_exit_status(result) check_pool_info(pool_info, "UUID", result.stdout.strip()) # Step (18) # Pool Name result = virsh.pool_name(pool_info["UUID"], ignore_status=True) utlv.check_exit_status(result) check_pool_info(pool_info, "Name", result.stdout.strip()) # Step (19) # Pool refresh for 'dir' type pool if pool_type == "dir": os.mknod(vol_path) result = virsh.pool_refresh(pool_name) utlv.check_exit_status(result) check_vol_list(vol_name, pool_name) # Step (20) # Create an over size vol in pool(expect fail), then check pool: # 'Capacity', 'Allocation' and 'Available' # For NFS type pool, there's a bug(BZ#1077068) about allocate volume, # and glusterfs pool not support create volume, so not test them if pool_type != "netfs": vol_capacity = "10000G" vol_allocation = "10000G" result = virsh.vol_create_as("oversize_vol", pool_name, vol_capacity, vol_allocation, "raw") utlv.check_exit_status(result, True) new_info = _pool.pool_info(pool_name) check_items = ["Capacity", "Allocation", "Available"] for i in check_items: check_pool_info(pool_info, i, new_info[i]) # Step (21) # Undefine pool, this should fail as the pool is active result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result, expect_error=True) check_pool_list(pool_name, "", False) # Step (22) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: test.fail("Destroy pool % failed." % pool_name) # Step (23) # Pool delete for 'dir' type pool if pool_type == "dir": for f in os.listdir(pool_target): os.remove(os.path.join(pool_target, f)) result = virsh.pool_delete(pool_name, ignore_status=True) utlv.check_exit_status(result) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if os.path.exists(pool_target): test.fail("The target path '%s' still exist." % pool_target) result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result, True) # Step (24) # Pool undefine result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up try: pvt.cleanup_pool(**params) utlv.setup_or_cleanup_iscsi(False) except exceptions.TestFail as detail: logging.error(str(detail)) if multipathd_status: multipathd.start() if os.path.exists(poolxml): os.remove(poolxml)
myConf['window'] = feedback('How many codes before or after $now should be valid? (1 allows $now +/- 1)', int, '1') myConf['timer'] = feedback('After how many seconds should the challenge prompt time out?', int, '30') if feedback('Should code reuse be prevented? [Y/n]', bool, 'yes', '? '): myConf['last'] = "0" if feedback('Should attempts be ratelimited? [Y/n]', bool, 'yes', '? '): myConf['ratelimit'] = { 'window' : 1, 'counter' : 0, 'limit' : feedback('How many attempts should be allowed per 30 seconds?', int, '3') } doScratch = feedback('How many scratch codes should be created? (0 for none)', int, '2') if doScratch: myConf['scratch'] = [] for x in range(0,doScratch): myConf['scratch'].append(str(base64.b32encode(os.urandom(6))[0:10], 'ascii')) myConf['url'] = "https://www.google.com/chart?chs=200x200&chld=M|0&cht=qr&chl=otpauth://totp/{0}@{1}%3Fsecret%3D{2}".format(os.environ['USER'], os.uname()[1], myConf['secret']) if myFile is not None: if not os.path.exists(os.path.expanduser(myFile)): os.mknod(os.path.expanduser(myFile), 0o600) elif os.stat(os.path.expanduser(myFile))[0] != 0o100600: print('Warning: The permissions on {0} appear to be overly permissive'.format(myFile)) handle = open(os.path.expanduser(myFile), 'w') yaml.dump(myConf, handle) print(myConf['url']) print(myConf['scratch']) else: print(yaml.dump(myConf))
def vcd_parser(self, fw): tick = -1 # current tick tb_counter = -1 # length of testbench in operation code def_state = True # definition state sym2sig = {} # {symbolic_in_vcd: signal_name} pos2val = {} # {position(bit): signal(1|0|z|x)} path = os.path.join(self.path, self.file_list['VCD']) write_path = os.path.join(self.path, self.file_list['BIN']) regex1 = re.compile(r'\$var .+ \d+ (.) (.+) \$end', re.I) # match signal name regex2 = re.compile(r'#(\d+)') # match period regex3 = re.compile(r'([0|1|x|z])(.)') # match testbench if not os.path.exists(write_path): os.mknod(write_path) with open(path, "r") as f: for line in f.readlines(): # end of file if line == '$dumpoff': break # definition stage, return sym2sig = {symbol:signal, ...} if def_state: m1 = regex1.match(line) if m1: sym2sig[m1.group(1)] = m1.group(2) else: if re.match(r'\$upscope', line): def_state = False continue else: # match next tick; write last tick to file m2 = regex2.match(line) if m2: vcd_tick = m2.group(1) while True: write_content( fw, pos2val) # Write testbench to binary file. tb_counter += 1 tick += 1 if tick == int(vcd_tick): break continue # match testbench m3 = regex3.match(line) if m3: value = m3.group(1) key = m3.group(2) pos2val[self.sig2pos.setdefault(sym2sig[key], None)] = value if sym2sig[key] in self.entri_dict: entri = sym2sig[key] if pos2val[self.sig2pos[entri]] == '1': self.sig2pio[self.entri_dict[entri]] = 'output' else: self.sig2pio[self.entri_dict[entri]] = 'input' write_length(fw, tb_counter) write_mask(fw, self.sig2pos, self.sig2pio) write_operator(fw, TESTBENCH_OP, 0) tb_counter = 0 write_length(fw, tb_counter)
import os, shutil # create file save directory ppath = '/var/www/bl/upload/' cpath = ['s/i', 's/a', 'm/i', 'm/a', 'p/a', 'p/p', 'o'] if os.path.exists(ppath): shutil.rmtree('ppath') for p in cpath: os.makedirs(ppath + p, mode=0777) # create log file os.mkdir('/var/log/django') os.mknod('/var/log/django/debug.log', mode=0777) # init memcached #os.system('killall memcached') #os.system('memcached -d')
def extract_item(self, item, restore_attrs=True, dry_run=False): if dry_run: if b'chunks' in item: for _ in self.pipeline.fetch_many( [c[0] for c in item[b'chunks']], is_preloaded=True): pass return dest = self.cwd if item[b'path'].startswith('/') or item[b'path'].startswith('..'): raise Exception('Path should be relative and local') path = os.path.join(dest, item[b'path']) # Attempt to remove existing files, ignore errors on failure try: st = os.lstat(path) if stat.S_ISDIR(st.st_mode): os.rmdir(path) else: os.unlink(path) except OSError: pass mode = item[b'mode'] if stat.S_ISDIR(mode): if not os.path.exists(path): os.makedirs(path) if restore_attrs: self.restore_attrs(path, item) elif stat.S_ISREG(mode): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) # Hard link? if b'source' in item: source = os.path.join(dest, item[b'source']) if os.path.exists(path): os.unlink(path) os.link(source, path) else: with open(path, 'wb') as fd: ids = [c[0] for c in item[b'chunks']] for data in self.pipeline.fetch_many(ids, is_preloaded=True): fd.write(data) fd.flush() self.restore_attrs(path, item, fd=fd.fileno()) elif stat.S_ISFIFO(mode): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) os.mkfifo(path) self.restore_attrs(path, item) elif stat.S_ISLNK(mode): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) source = item[b'source'] if os.path.exists(path): os.unlink(path) os.symlink(source, path) self.restore_attrs(path, item, symlink=True) elif stat.S_ISCHR(mode) or stat.S_ISBLK(mode): os.mknod(path, item[b'mode'], item[b'rdev']) self.restore_attrs(path, item) else: raise Exception('Unknown archive item type %r' % item[b'mode'])
def _mknod(path, name, mode, major, minor): os.mknod(os.path.join(path, name), mode=(stat.S_IMODE(mode) | stat.S_IFCHR), device=os.makedev(major, minor))
#!/usr/bin/env python3 import os import shutil # 操作文件系统 os.chdir('/tmp') os.mkdir('example') os.chdir('example') os.mknod('test') with open('test','w') as f: f.write('foo bar') os.listdir('/tmp/example') with open('test') as f: print(f.read()) os.remove('test') os.chdir('/tmp') shutil.rmtree('example')
def handle(self, *args, **options): lockfile = os.path.join(settings.BASE_DIR, 'cron.lock') try: if not os.path.exists(lockfile): time_th = datetime.now() - timedelta(minutes=4) hosts_to_check = Host.objects.filter(updated__lt=time_th) stats = [] for host in Host.objects.all(): stat_all = History.objects.filter(host=host) success = History.objects.filter(host=host, status='success') try: stats.append((len(success) / len(stat_all)) * 100) except: pass stat_combined = 0 for stat in stats: stat_combined += stat global_stat = (stat_combined / len(stats)) if os.name == 'posix': os.mknod(lockfile) elif os.name == 'nt': open(lockfile, 'w+').close() for host in hosts_to_check: link = "<a href='" url = "http://" + settings.ALLOWED_HOSTS[0] url += reverse('monitor:detail', kwargs={'host_id': host.id}) link += url + "'>" + url + "</a>" if os.name == 'nt': proc = subprocess.Popen( ['ping', '-n', '3', host.ip4address], stdout=subprocess.PIPE) elif os.name == 'posix': proc = subprocess.Popen( ['ping', '-c', '3', host.ip4address], stdout=subprocess.PIPE) try: res = proc.communicate() except: pass details = res[0].decode('utf-8') host.status_detail = details.replace('\r\n', '<br>') host.status_detail = host.status_detail.replace( '\n', '<br>') stat_all = History.objects.filter(host=host) success = History.objects.filter(host=host, status='success') stat_line = '' try: stat = (len(success) / len(stat_all)) * 100 stat_line = "Reachable for {}% of checks in the last 24 hours".format( int(stat)) except: stat_line = "" stat_line += "<br> Global Average {}%".format( int(global_stat)) if '100% packet loss' in details or '100% loss' in details: if host.status == 'UP': host.status = 'WARNING' elif host.status == 'WARNING': message = "Failed to verify status of " + host.name message += "<br><br>" + link message += "<br><br>" + stat_line host.status = 'UNREACHABLE' if settings.EMAIL_NOTIFY: send_mail('WARNING: ' + host.name + ' UNREACHABLE', '', settings.EMAIL_HOST_USER, settings.EMAIL_TO, html_message=message) if settings.HIPCHAT_NOTIFY: hipchat.send(message, 'red') else: host.last_seen = datetime.now() if host.status == 'WARNING': host.status = 'UP' message = "Successfully connected to " + host.name message += "<br><br>" + link message += "<br><br>" + stat_line if settings.EMAIL_NOTIFY: send_mail('RECOVERY: ' + host.name, '', settings.EMAIL_HOST_USER, settings.EMAIL_TO, html_message=message) if settings.HIPCHAT_NOTIFY: hipchat.send(message, 'green') elif host.status == 'UNREACHABLE': host.status = 'WARNING' host.save() tags = { 'UP': 'success', 'WARNING': 'warning', 'UNREACHABLE': 'danger' } history = History(host=host, status=tags[host.status]) history.save() os.remove(lockfile) time_th = datetime.now() - timedelta(hours=24) History.objects.filter(stamp__lt=time_th).delete() except Exception as e: os.remove(lockfile) print(e)
def check_urls_for_vuln(filename, filename_rawurl, savesearch, verboseactive): """Check if URLs are vuln to SQLi.""" print('\n\n\n' + bc.HEADER) print('\t[*] Checking URLs for vuln') print('\n' + bc.ENDC) # Base input if filename_rawurl != '0': urlfile = filename_rawurl if not os.path.isfile(urlfile): print(bc.FAIL + '\t[*] URL file does not exist or no vuln urls.') print(bc.FAIL + ' Exiting') return None if savesearch == 'y': if not os.path.isfile(filename): os.mknod(filename) else: print('\t[!] File already exists!') print('\t[!] Append to file? Press enter for yes. (y/n)') appendtofile = input('\t-> ' + bc.WARN + 'wmd' + bc.ENDC + '@' + bc.WARN + 'fileExists:' + bc.ENDC + ' ') if appendtofile == 'n': print('\t[!] User disallowed appending to resultfile') print('\t[!] Please try again with another filename') print('\t[!] Exiting\n\n') return None else: filename = '0' print(bc.ENDC + '\n\t[*]::Reading file\n') print('\t[*] Connecting\n') # ================================= # Loop through urls and add a qoute # ================================= with open(urlfile) as fileorg: for line in fileorg: checkMY1 = 0 checkMY2 = 0 checkMY3 = 0 checkMY4 = 0 checkMS1 = 0 checkMS2 = 0 checkMS3 = 0 checkOR1 = 0 checkOR2 = 0 checkOR3 = 0 checkPO1 = 0 checkPO2 = 0 try: # Get data url = line + "'" print('\t[' + time.strftime('%H:%M:%S') + '] [*] ' + line.strip('\n')) # Loading random useragent uas = LoadUserAgents() ua = random.choice(uas) # select a random user agent headers = {'Connection': 'close', 'User-Agent': ua} r = requests.get(url, headers=headers) soup = BeautifulSoup(r.text, 'lxml') # Check if vuln - might updated indicationstrings according to # MySQL checkMY1 = len( soup.find_all(text=re.compile( 'check the manual that corresponds to your MySQL'))) checkMY2 = len(soup.find_all(text=re.compile('SQL syntax'))) checkMY3 = len( soup.find_all(text=re.compile( 'server version for the right syntax'))) checkMY4 = len( soup.find_all( text=re.compile('expects parameter 1 to be'))) # Microsoft SQL server checkMS1 = len( soup.find_all(text=re.compile( 'Unclosed quotation mark before the character string')) ) checkMS2 = len( soup.find_all(text=re.compile( 'An unhanded exception occurred during the execution')) ) checkMS3 = len( soup.find_all(text=re.compile( 'Please review the stack trace for more information'))) # Oracle Errors checkOR1 = len( soup.find_all( text=re.compile('java.sql.SQLException: ORA-00933'))) checkOR2 = len( soup.find_all( text=re.compile('SQLExceptionjava.sql.SQLException'))) checkOR3 = len( soup.find_all(text=re.compile( 'quoted string not properly terminated'))) # Postgre SQL checkPO1 = len(soup.find_all(text=re.compile('Query failed:'))) checkPO2 = len( soup.find_all(text=re.compile( 'unterminated quoted string at or near'))) # Verbose level 1 if verboseactive == '1': print('\t[V] Check1 MySQL found: ' + str(checkMY1)) print('\t[V] Check2 MySQL found: ' + str(checkMY2)) print('\t[V] Check3 MySQL found: ' + str(checkMY3)) print('\t[V] Check4 MySQL found: ' + str(checkMY4)) print('\t[V] Check5 MS SQL found: ' + str(checkMS1)) print('\t[V] Check6 MS SQL found: ' + str(checkMS2)) print('\t[V] Check7 MS SQL found: ' + str(checkMS3)) print('\t[V] Check8 Oracle found: ' + str(checkOR1)) print('\t[V] Check9 Oracle found: ' + str(checkOR2)) print('\t[V] Check10 Oracle found: ' + str(checkOR3)) print('\t[V] Check11 Postgre found: ' + str(checkPO1)) print('\t[V] Check12 Postgre found: ' + str(checkPO2)) # Verbose level 2 if verboseactive == '2': checkverMY1 = soup.find(text=re.compile( 'check the manual that corresponds to your MySQL')) checkverMY2 = soup.find(text=re.compile(r'SQL syntax')) checkverMY3 = soup.find(text=re.compile( r'server version for the right syntax')) checkverMY4 = soup.find( text=re.compile('expects parameter 1 to be')) print('\t[V] Check1 MySQL found: ' + str(checkverMY1).replace('\n', ' ').replace( '\r', '').replace('\t', '').replace(' ', '')) print('\t[V] Check2 MySQL found: ' + str(checkverMY2).replace('\n', ' ').replace( '\r', '').replace('\t', '').replace(' ', '')) print('\t[V] Check3 MySQL found: ' + str(checkverMY3).replace('\n', ' ').replace( '\r', '').replace('\t', '').replace(' ', '')) print('\t[V] Check4 MySQL found: ' + str(checkverMY4).replace('\n', ' ').replace( '\r', '').replace('\t', '').replace(' ', '')) checkverMS1 = soup.find(text=re.compile( 'Unclosed quotation mark before the character string')) checkverMS2 = soup.find(text=re.compile( 'An unhanded exception occurred during the execution')) checkverMS3 = soup.find(text=re.compile( 'Please review the stack trace for more information')) print('\t[V] Check5 MS SQL found: ' + str(checkverMS1).replace('\n', ' ').replace( '\r', '').replace('\t', '').replace(' ', '')) print('\t[V] Check6 MS SQL found: ' + str(checkverMS2).replace('\n', ' ').replace( '\r', '').replace('\t', '').replace(' ', '')) print('\t[V] Check7 MS SQL found: ' + str(checkverMS3).replace('\n', ' ').replace( '\r', '').replace('\t', '').replace(' ', '')) checkverOR1 = soup.find( text=re.compile('java.sql.SQLException: ORA-00933')) checkverOR2 = soup.find( text=re.compile('SQLExceptionjava.sql.SQLException')) checkverOR3 = soup.find(text=re.compile( 'quoted string not properly terminated')) print('\t[V] Check8 Oracle found: ' + str(checkverOR1).replace('\n', ' ').replace( '\r', '').replace('\t', '').replace(' ', '')) print('\t[V] Check9 Oracle found: ' + str(checkverOR2).replace('\n', ' ').replace( '\r', '').replace('\t', '').replace(' ', '')) print('\t[V] Check10 Oracle found: ' + str(checkverOR3).replace('\n', ' ').replace( '\r', '').replace('\t', '').replace(' ', '')) checkverPO1 = soup.find(text=re.compile('Query failed:')) checkverPO2 = soup.find(text=re.compile( 'unterminated quoted string at or near')) print('\t[V] Check11 Postgre found: ' + str(checkverPO1).replace('\n', ' ').replace( '\r', '').replace('\t', '').replace(' ', '')) print('\t[V] Check12 Postgre found: ' + str(checkverPO2).replace('\n', ' ').replace( '\r', '').replace('\t', '').replace(' ', '')) # If X is vuln if (checkMY1 > 0 or checkMY2 > 0 or checkMY3 > 0 or checkMY4 > 0 or checkMS1 > 0 or checkMS2 > 0 or checkMS3 > 0 or checkOR1 > 0 or checkOR2 > 0 or checkOR3 > 0 or checkPO1 > 0 or checkPO2): print(bc.OKGREEN + '\n' + ' Possible vuln url!' + '\n' + '\t[' + time.strftime('%H:%M:%S') + '] [+] ' + line + bc.ENDC + '\n') if savesearch == 'y': with open(filename, 'a') as file: file.write(line) else: print(bc.WARNING + '\t[' + time.strftime('%H:%M:%S') + '] [-] ' + line + bc.ENDC) # Skip X or/and exit except KeyboardInterrupt: print(bc.FAIL + '\t[X] ' + line + bc.ENDC) print( '\t[!] Quit? Press enter for continue, or n for quit (y/n)' ) quitnow = input('\t-> ' + bc.WARN + 'wmd' + bc.ENDC + '@' + bc.WARN + 'quit:' + bc.ENDC + ' ') if quitnow == 'y': print(bc.ENDC + '\t[!] Exiting\n\n') return None else: print(bc.ENDC + '\t[!] Continuing\n\n') # Bad X except: print(bc.FAIL + '\t[X] ' + line + bc.ENDC) # ================================= # Done - sum it up # ================================= print('\n\t[+] Done scanning urls') if savesearch == 'y': with open(filename) as f: resultsnumber = sum(1 for _ in f) print('\t[+] Scraping saved in file: ' + filename) print('\t[+] Total saved urls: ' + str(resultsnumber)) if resultsnumber == 0: print('\t[+] No vuln urls, exiting\n\n') return None print('\t[!] Run vuln urls through SQLmap (y/n)?') checkurls = input('\t-> ' + bc.WARN + 'wmd' + bc.ENDC + '@' + bc.WARN + 'runSQLmap:' + bc.ENDC + ' ') if checkurls == 'y': scan_urls_sqlmap(filename) else: print(bc.ENDC + '\t[!] Exiting\n\n') return None
"""Initialize the Figit working directory.""" # TODO: test if src directory exists. For now we assume it does. wd = os.path.abspath(args[2]) src = args[1] if src.count(':') < 1: utils.quit(USAGE) if os.path.abspath(src) == wd: utils.quit("Working directory and source cannot be the same.") if not os.path.exists(join(wd, '.figit')): os.makedirs(join(wd, '.figit')) os.chdir(wd) confile = open(join(".figit", "config"), 'w') confile.write("wd:%s\nsrc:%s\n" % (os.getcwd(), src)) confile.close() for node in ("manifest", "RunBefore", "RunAfter"): os.mknod(join(".figit", node)) VCS = utils.get_vcs(VCSNAME, wd, INSTALLBRANCH) print VCS.initdb() utils.quit() conf = utils.getconf() src = conf['src'] port = conf['port'] wd = conf['wd'] user = conf['user'] hosts = conf['hosts'] os.chdir(wd) if cmmd == 'addhost': """Append a new distribution host's name to the config file."""
print os.getcwd() print os.curdir # os.path.getsize('demo180104.py') a = os.path.isfile('demo180104.py') # 判断是不是文件 print a print os.listdir('Test') # 关于目录的操作都在os.path下面。 print os.path.isdir('Test') file_path = 'devon' create_dir = 'devon' if os.path.exists(file_path): print "这个目录已经存在,将被删除。" path_header = os.getcwd() # 获取文件所在目录 shutil.rmtree(file_path) # 删除一个非空的目录,如果是空目录,则使用os.rmdir('devon') print "被删除的目录为%s/%s" % (path_header, file_path) logger.debug('记录错误日志。') if os.path.isdir('Test'): os.mkdir(create_dir) os.chdir(create_dir) # 切换目录 os.system('touch testfile.txt') os.mknod("test.txt") # 创建一个空文件 with open('test.txt', 'a+') as f: f.write('this is testing contents.') os.remove('testfile.txt') # 删除一个文件 print os.getcwd() print os.listdir('..')
document.write(" " + Description_zh) document.write('\n') document.write("- ### Extended Description") document.write('\n') #document.write(Extended_Description_en) #document.write('\n') document.write(" " + Extended_Description_zh) document.write('<br>\n\n') document.close() if __name__ == '__main__': start_index = 0 cwe_config = configparser.ConfigParser() time_start = time.time() print("正在查询最新版本存档") official_cwe_xmlfile = prepare_cwec_file() if not os.path.exists('./document'): os.mkdir('./document') cwe_config.read('cwe.config', encoding='utf8') start_index = cwe_config.getint("base", "index_ID") w_id = cwe_config.getint("base", "id") print("开始进行翻译并写入文档") if start_index == 0: os.mknod('./document/CWE翻译计划.md') write_header() Analysis_xml(official_cwe_xmlfile, start_index, cwe_config) print("翻译时间: ", str(time.time() - time_start) + "秒")
oFW.write("\necho \"Reconstruct mesh\"") oFW.write("\nrunApplication reconstructParMesh -constant") oFW.write("\n\n#Delete processor folders") oFW.write("\necho \"Deleting Processors\"") oFW.write("\nrm -rf proc*") oFW.write("\n\nmv 0.org 0") oFW.close() # In[ ]: ##Move STL files to constant/triSurface oF = cwd + "/" src = os.listdir(oF) dst = cwd + "/constant/triSurface/" for files in src: if files.endswith('.stl'): shutil.move(os.path.join(oF, files), os.path.join(dst, files)) # In[ ]: os.mkdir("Misc") os.rename("Input.VSGR", "Misc/Input.VSGR") os.rename("MasterSTLList", "Misc/MasterSTLList") try: os.mknod("case.foam") except: oFW = open(cwd + "/case.foam", "w") oFW.close()
while (True): [command, name, path] = getInput(prog) if command == 'quit': break if command == 'create_dir': try: os.mkdir(os.path.join(path, name)) # creating new directory except OSError: print(f"Creation of the directory {name} in {path} failed") else: print(f"Successfully created the directory {path}") elif command == 'create_file': try: os.mknod(os.path.join(path, name)) # creating new file except OSError: print(f"Creation of the file {name} in {path} failed") else: print(f"Successfully created the file {name} in {path}") elif command == 'delete': try: os.remove(os.path.join(path, name)) # removing exsisting file except OSError: print(f"Deletion of the file {name} in {path} failed") else: print(f"Successfully deleted the file {name} in {path}") elif command == 'find': try: