def cleanUselessLck(): for filename in os.list(lckdir): filepath = os.path.join(lckdir, filename) try: if os.path.isfile(filepath): os.unlink(filepath) except Exception, e: print e
def RegressionMain(file_path="/home/sap/IdeaProjects/XAI/April/weights", trainStep=3, predStep=3, self=None): self.file_path = file_path #self.linear = linear allFiles = os.list(file_path) filteredFiles = [i for i in allFiles if i.startswith('clustered')]
def fit(self): '进行层位分割' hor = ReadHor(self.horadress) hor.read() # 读取层位信息 horData = hor.filedata LineNameIn = self.action1(hor.LineName, os.list(self.foldadress)) #得到共有的文件名 for linename in LineNameIn: fileadress = self.foldadress + '/' + linename + '.txt' #循环设定需要读取扥文件名 Wave = ReadSeisWave(fileadress) WaveHead = Wave.getHead() WaveData = Wave.getNum() CDP_TB = horData.loc[horData['LineName' == linename], ['CDP', self.Top, self.Bot]] WaveHor = pd.DataFrame(self.action2(WaveData, WaveHead, CDP_TB), columns=self.HorName) WaveHor.to_excel( '/' + '/'.join(self.foldadress.strip().split('/')[:-1]) + self.HorName + '/' + linename + '.xlsx')
def main(): # parse the arguments parser = argparse.ArgumentParser(description='Process some integers.') # required parameters parser.add_argument("func", default='help', type=str, help="train/test/help") parser.add_argument("--data_dir", default="data", type=str, required=False) parser.add_argument("--task_name", default=None, type=str, required=False) parser.add_argument("--tag", default=None, type=str, required=False) parser.add_argument("--input_dir", default=None, type=str, required=False) parser.add_argument("--output_dir", default=None, type=str, required=False) parser.add_argument("--model_name", default="bert-base-uncased", type=str, required=False) args = parser.parse_args() # do the func if args.func == "help": print("train to generate model, test to evaluate model") else: # gather parameters tag = args.tag if tag == None: tag = args.tag = str(uuid.uuid1()) print("params: {}\ntag: {}".format(str(args), tag)) device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = args.n_gpu = torch.cuda.device_count() logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger.warning("device: %s, n_gpu: %s", device, n_gpu) set_seed(args) args.task_name = args.task_name.lower() # TODO task specific settings num_labels = None if args.func == "train": pass # train on the task # gather parameters config = BertConfig.from_pretrained() output_dir = args.output_dir = args.output_dir if args.output_dir else "model" if os.path.exists(output_dir) and os.list(output_dir): raise ValueError("Output dir exists") config = BertConfig.from_pretrained(args.model_name, num_labels=num_labels, finetuning_task=args.task_name) tokenizer = BertTokenizer.from_pretrained(args.model_name, do_lower_case="uncased" in args.model_name) model = BertForSequenceClassification.from_pretrained( args.model_name, from_tf=False, config=config) elif args.func == "test": pass # test on the task else: raise NotImplementedError
for i in range(len(x)): if x[-(x + 1)] > 0: break t = x[-(x + 1)] return t def find_miles(x): ori = find_nonzero(x, 0) des = find_nonzero(x, 1) distance = des - ori return distance '''didi car-hailing PHEV''' didi_list = os.list("data/didi") sum_didi = [] ################### run1 = [] run2 = [] run3 = [] a = lambda x: sum(x == 1) b = lambda x: sum(x == 2) c = lambda x: sum(x == 3) for didi_id in didi_list: one_vehicle = pd.read_csv("/data/didi/" + didi_id) one_vehicle['time'] = pd.to_datetime(one_vehicle['time']).apply( lambda x: x.date()) # year,month,day agg = one_vehicle.groupby('time').agg({"vehiclestatus": [a, b, c]}) agg = agg.apply(lambda x: x / sum(x), axis=1) name = agg.columns r1 = list(agg[name[0]])
# coding:utf-8 import re import time import datetime import redis import sys import os input = sys.argv[-1] rds = redis.Redis(host='redis-logcount.yg.hunantv.com', port=8889) log_list = [] if input == '1': file = datetime.date.today() log_list.append('/mfs/logs/eru/odan/web-macvlan/' + str(file) + '.log') elif input == '2': log_list = os.list("/mfs/logs/eru/odan/web-macvlan") else: filename = '/mnt/mfs/logs/eru/odan/web-macvlan/' + input with open(filename) as f: date = '' for line in f: t = re.search(r'^\[([\d-]+\s+[\d:]+)', line) if t: datetime = time.strptime(t.group(1), '%Y-%m-%d %H:%M:%S') timestamp = '%s' % time.mktime(datetime) if not date: date = time.strftime('%Y-%m-%d', datetime) rds.hincrby(date, timestamp, 1) data = rds.hgetall(date) for key in sorted(data.keys()): print key, data[key]
import numpy as np import pandas as pd import keras from keras.callbacks import Callback from keras.datasets import cifar10 from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score from sklearn.model_selection import train_test_split # In[2]: os.list('/home/zxt/test/iWildCam') # ## 1. Loading the 32x32 dataset # In[5]: # Split data between train and test sets: x_train = np.load('/home/zxt/test/iWildCam/X_train.py') x_test = np.load('/home/zxt/test/iWildCam/X_test.npy') y_train = np.load('/home/zxt/test/iWildCam/y_train.npy') print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples')
for i in range(len(x)): if x[-(x + 1)] > 0: break t = x[-(x + 1)] return t def find_miles(x): ori = find_nonzero(x, 0) des = find_nonzero(x, 1) distance = des - ori return distance '''didi car-hailing PHEV''' didi_list = os.list("data/didi") sum_didi = [] ################### run1 = [] run2 = [] run3 = [] a = lambda x: sum(x == 1) b = lambda x: sum(x == 2) c = lambda x: sum(x == 3) for didi_id in didi_list: one_vehicle = pd.read_csv("/data/didi/" + didi_id) one_vehicle['time'] = pd.to_datetime(one_vehicle['time']).apply( lambda x: x.date()) #year,month,day agg = one_vehicle.groupby('time').agg({"runmodel": [a, b, c]}) agg = agg.apply(lambda x: x / sum(x), axis=1) name = agg.columns r1 = list(agg[name[0]])
def list(cls): """Liste les sauvegardes disponnibles""" '\n'.join(i.replace('.txt', '') for i in os.list(cls._save_dir))
def do_dir(self, args): try: # if using input from a file if args[0] == '<': try: # gets the contents of the given directory data = os.listdir(self.from_file(args[1])) data = ' '.join(data) try: # if using input from file and overwrite if args[2] == '>': try: # outputs contents of the directory to the given file self.overwrite(args[3], data) self.line() except IndexError: # if no filename is given print("Error: No filename given") self.line() # if using input from file and append elif args[2] == '>>': try: # outputs contents of the directory to the given file self.append(args[3], data) self.line() except IndexError: # If no filename is given print("Error: No filename given") self.line() # if using input from file and no output except IndexError: # print contents of directory joined by 3 spaces print(data) self.line() except FileNotFoundError: # if the directory doesn't exist print(f"Error: '{args[1]}' no such file") self.line() except IndexError: # if no file given for input print("Error: No file given") self.line() # if using overwrite with no directory listed elif args[0] == '>': # gets the contents of the current directory data = os.listdir(self.cwd) data = ' '.join(data) try: # outputs contents of current dir to file self.overwrite(args[1], data) self.line() except IndexError: # if no filename given print("Error: No filename given") self.line() # if using append with no directory listed elif args[0] == '>>': # gets the contents of the current directory data = os.listdir(self.cwd) data = ' '.join(data) try: # outputs content is current dir to file self.append(args[1], data) self.line() except IndexError: # if no filename given print("Error: No filename given") # if using overwrite with a given directory elif args[1] == '>': try: # gets contents of given directory data = os.listdir(args[0]) data = ' '.join(data) try: # outputs content of given dir to file self.overwrite(args[2], data) self.line() except IndexError: # if no filename given print('Error: No filename given') except FileNotFoundError: # if the directory doesn't exist print(f"Error: '{args[0]}' no such directory") self.line() # if using append with a given directory elif args[1] == '>>': try: # gets contents of given directory data = os.list(args[0]) data = ' '.join(data) try: # outputs contents of given dir to file self.append(args[2], data) self.line() except IndexError: # if no filename given print("Error: No filename given") self.line() except FileNotFoundError: # if the directory doesn't exist print(f"Error: '{args[0]}' no such directory") self.line() except IndexError: try: # If using dir from a given directory without output data = os.listdir(args[0]) data = ' '.join(data) # prints contents of the given directory separated by 3 spaces print(data) self.line() except IndexError: # if using dir with no arguments data = os.listdir(os.getcwd()) # prints the contents of the current directory separated by 3 spaces print(' '.join(data)) self.line() except FileNotFoundError: # if the given file doesn't exist print('Error: No such directory') self.line()
def list_files(directory): return os.list(directory)
import HTSeq import numpy import pylab as plt import os CH = [str(x) for x in range(1,20)] + ['X', 'Y'] gtffile = HTSeq.GFF_Reader( "Mus_musculus.GRCm38.82.gtf" ) tsspos = set() for feature in gtffile: if feature.type == "exon" and feature.attr["exon_number"] == "1": if feature.iv.chrom in CH: tsspos.add( feature.iv.start_d_as_pos ) Fs = os.list('.') for F in Fs: if F[-4:] == '.bam': bamfile = HTSeq.BAM_Reader( "Tspan8_negative_MHCII_high_rep1_HQ.bam" ) bamfile = HTSeq.BAM_Reader(F) halfwinwidth = 1000 coverage = HTSeq.GenomicArray( "auto", stranded=False, typecode="i" ) for almnt in bamfile: if almnt.aligned: #### method 1 if almnt.inferred_insert_size > 0: #iv = HTSeq.GenomicInterval( almnt.iv.chrom, almnt.iv.start, almnt.iv.start + almnt.inferred_insert_size, "." ) #coverage[ almnt.iv ] += 1
image_bl=cv2.blur(image, ksize=(k,k)) cv2.imshow(str(k), image_bl) cv2.waitKey(0) return def resize(fname, width, height): image = cv2.imread(fname) cv2.imshow('Original Image', image) cv2.waitKey(0) org_h, org_w=image.shape[0:2] if org_w>= org_h: new_image=cv2.resize(image, (width, height)) else: new_image=cv2.resize(image, (height, width)) return fname, new_image listOfFiles=os.list('.') pattern="*.jpg" n=len(sys.argv) if n==3: width =int(sys.rgv[1]) height =int(sys.rgv[2]) else: width =1280 height =960 if not os.path,exists('new_folder'): os.makedirs('new_folder') for filename in listOfFiles: if fnmatch.fnmatch(filename, pattern): filename, new_image=resize(filename,width,height) cv2.imwrite("new_folder" + filename,new_image) #cv2.imshow('resized image', new_image)
import pandas as pd from pandas import ExcelWriter from pandas import ExcelFile from os import listdir as list file_list= (list("C:\\Users\\dia5cob\\Desktop\\NAP\\NAP-Forms")) df = pd.DataFrame(file_list) writer = ExcelWriter("C:\\Users\\dia5cob\\Desktop\\NAP\\NAP_Forms_list.xlsx") df.to_excel(writer, 'Sheet1', index=False) writer.save() writer.close()
a = open('name').read() print(a) #定位读写 #seek第二个参数0表示文件的开头,1表示当前的位置,2表示末尾,第一个参数+向右-向左调 f = open(filename) f.seek(2,0) #从文件的开头跳两个字节 使用seek可以重新读取 , f.tell() #当前的位置 ##文件的常用操作 import os os.rename('oldname.txt','filename.txt') #重命名 os.remove('name.txt') #删除文件 os.mkdir('file') #创建文件夹 os.rmdir('file') #删除文件夹 os.getwd() #返回绝对路径 os.chdir('') #改变默认路径 os.list("./") #获取当前路径下的目录列表 ##批量重命名 import os folder_name = "file" #输入文件夹中的名称 file_names =os.listdir(folder_name) #列出文件夹中所有文件的名字 for name in file_names: print(name) old_file_name = folder_name+"/"+name new_file_name = folder_name+"/"+"[OK]-"+name #新名字 os.rename(old_file_name,new_file_name) #pickle储存Python的原生对象 import pickle D = [2,3,4] F = open('name.pkl','wb') pickle.dump(D,F) F.close()
print "hello" print "1+2+3" import os dir() os.list()
for i in os.listdir(test): print (dir_path+"\\"+i) if ".txt" == i[-4:]: os.remove(dir_path+"\\"+i) # 取出某个目录内,1小时内新建的所有文件名。 #算法:遍历这个目录,取到所有的文件 #每个文件用stat取到创建时间 #用创建时间和当前时间去比对,是否小于3600 #放到一个列表里面 import os import time result=[] current_timestamp=time.time()获取当前的时间戳 for i in os.list('e:\\test'): if os.path.isfile(i): if current_timestamp-os.stat('e:\\test'+i).st_ctime<=3600: result.append('e:\\test'+'\\'+i) #小练习,把所有的txt文件干掉。 #新建一个空的子目录xxx,放在某个层级下,,把它删掉 #encoding=utf-8 import os import os.path dir_count=0 file_count=0
import fnmatch import os for file in os.list('.'): if fnmatch.fnmatch(file,'*.xml'): print file
import os from tqdm import tqdm from random import shuffle list_images = os.list('./dataset') if not os.path.exists('./data'): os.mkdir('./data') for x in list_images: label = x.split('_')[0] print(label) if not os.path.exists(os.path.join('./data',label)): os.mkdir(os.path.join('./data',label)) os.rename(os.path.join('./dataset',x), os.path.join('./data',label,x))
base_dir = '' train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') #Directory with our cat picture train_cats_dir = os.path.join(train_dir, 'cats') # Directory with our dogs picture train_dog_dir = os.path.join(train_dir, 'dogs') #Directory with our cat picture validation_cats_dir = os.path.join(validation_dir, 'cats') # Directory with our dogs picture validation_dog_dir = os.path.join(validation_dir, 'dogs') train_cat_fnames = os.list(train_cats_dir) print(train_cat_fnames[:10]) train_dog_fnames = os.listdir(train_dogs_dir) train_dog_fnames.sort() print(train_dog_fnames[:10]) print('total training cat images', len(os.listdir(train_cats_dir))) print('total training cat images', len(os.listdir(train_dogs_dir))) print('total training cat images', len(os.listdir(validation_cats_dir))) print('total training cat images', len(os.listdir(validation_cats_dir))) import matplotlib.pyplot as plt import matplotlib.image as mpimg #Parameters for our graph