def load_file(self, net, net_weight_file): # address the issue of DataParallel contents = torch.load(net_weight_file,map_location='cpu') # ('/home/data/dl_processor/net_params.pkl') state_dict = contents['state_dict'] net = my_load(net, state_dict, self.strict) if self.config.net['resume'] and self.config.train['start_epoch'] == 1: self.config.train['start_epoch'] = contents['epoch'] + 1 if self.save_freq == 0: if os.path.exists(os.path.join(self.save_dir, 'best.pkl')): self.best_score = torch.load(os.path.join(self.save_dir, 'best.pkl'))['loss'] shutil(net_weight_file, os.path.join(self.save_dir,'starter.pkl')) return net, self.config
def _convert03to05(dbfile): """ Convert files between 0.3 and 0.5 to 0.5.x This is necessary because older versions used column named date in the file table which is an sqlite function, and that column contained an incorrectly formatted date time string. This converter changes the name of that column and converts the dates to datetime in the database. """ # Create a backup copy p, f = os.path.split(dbfile) n, e = os.path.splitext(f) bu = os.path.join(p, n + '_backup0.3' + e) shutil.copyfile(dbfile, bu) try: with sqlite3.connect(dbfile) as con: ov = con.execute( 'SELECT AppFileVersion FROM AppData').fetchone()[0] # Create the new File table con.execute('DROP TABLE IF EXISTS TMP') cfq = ( 'CREATE TABLE TMP (FilId INTEGER PRIMARY KEY AUTOINCREMENT, ' 'tagged INTEGER, filename TEXT, directory TEXT, ' 'filedate DATETIME, hash TEXT, thumbnail BLOB, ' 'importTimeUTC DATETIME DEFAULT CURRENT_TIMESTAMP)') con.execute(cfq) # Copy the data from the old table to the new, converting the date mq = ('INSERT INTO TMP ' 'SELECT FilId, tagged, filename, directory, ' 'case substr(date, 5, 1) WHEN ":" THEN substr(date, 1, 4) ' '|| "-" || substr(date,6,2) || "-" || substr(date, 9,2) ' '|| " " || substr(date, 12) ELSE date END filedate, ' 'hash, thumbnail, importTimeUTC FROM File') con.execute(mq) # Remove the old table and rename the new con.execute('DROP TABLE File') con.execute('ALTER TABLE TMP RENAME TO File') # Update the AppFileVersion u = 'UPDATE AppData SET AppFileVersion = ?' con.execute(u, (__release__, )) except Exception as err: print('There was an error converting {} to 0.5'.format(n + e)) traceback.print_exc() shutil(bu, dbfile) return False, str(err) return (True, "Converted {} to {}".format(ov, __release__))
def setUp(self): args = argparse.Namespace() args.sequence_db = self.find_data("base", "test_1.fasta") args.db_type = 'gembase' args.models_dir = self.find_data('models') args.res_search_dir = tempfile.gettempdir() args.log_level = 0 self.cfg = Config(MacsyDefaults(), args) if os.path.exists(self.cfg.working_dir()): shutil(self.cfg.working_dir()) os.makedirs(self.cfg.working_dir()) self.model_name = 'foo' self.model_location = ModelLocation(path=os.path.join(args.models_dir, self.model_name)) self.profile_factory = ProfileFactory(self.cfg)
def write_tracks_minimal(self, file): # appending lines if new frames have been found, could take temp_path = os.path.join(os.path.split(file)[0], "_temp_out.txt") with open(file, "r") as f, open(temp_path, "w") as f_temp: for line in f: l1 = line.split("\t") frames = [int(x.strip().split(",[")[0]) for x in l1[1:]] t_id = int(l1[0]) if t_id in self.tracks_dict_rev.keys(): new_l = "\t".join([ str(f) + "," + str(det) for f, det in self.tracks_dict_rev[t_id].items() if f not in frames ]) line = line.strip() + "\t" + new_l + "\n" if len( new_l) > 0 else line f_temp.write(line) shutil(f_temp, file)
def load_file(self, net, net_weight_file): # address the issue of DataParallel contents = torch.load( net_weight_file, map_location="cpu") # ('/home/data/dl_processor/net_params.pkl') state_dict = contents["state_dict"] net = my_load(net, state_dict, self.strict) if self.config["net"]["resume"] and self.config["train"][ "start_epoch"] == 1: self.config["train"]["start_epoch"] = contents["epoch"] if self.save_freq == 0: if os.path.exists(os.path.join(self.save_dir, "best.pkl")): self.best_score = torch.load( os.path.join(self.save_dir, "best.pkl"))["loss"] shutil(net_weight_file, os.path.join(self.save_dir, "starter.pkl")) # net.encoder_state_dict = contents["encoder_state_dict"] # net.encoder.load_state_dict(contents["encoder_state_dict"][0]) return net, self.config
def restart_calculations(): general_path = pathlib.Path.cwd() new_dir_number = 1 for cur_dir in general_path.iterdir(): if cur_dir.is_dir(): if "-" in cur_dir.name: number = int(cur_dir.name.split("-")[0]) if new_dir_number < number: new_dir_number = number + 1 work_dir = general_path / (str(number) + "opt") work_dir.mkdir() coords = get_last_geom(general_path) input_file = "" for cur_file in general_path.iterdir(): if cur_file.is_file(): if ".inp" in cur_file.name: input_file = cur_file shutil(str(input_file), str(work_dir / "opt.inp")) shutil(str(general_path / "Addition_output/opt.RasOrb"), str(work_dir / "pr_orb.RasOrb")) with open("opt.xyz", "w") as out_geom_file: out_geom_file.writelines(coords)
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--spanbert_config_path", default="/home/lixiaoya/spanbert_base_cased/config.json", type=str) parser.add_argument( "--bert_tf_ckpt_path", default="/home/lixiaoya/cased_L-12_H-768_A-12/bert_model.ckpt", type=str) parser.add_argument( "--spanbert_pytorch_bin_path", default="/home/lixiaoya/spanbert_base_cased/pytorch_model.bin", type=str) parser.add_argument("--output_spanbert_tf_dir", default="/home/lixiaoya/tf_spanbert_base_case", type=str) parser.add_argument("--seed", default=2333, type=int) args = parser.parse_args() random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) tf.set_random_seed(args.seed) torch.cuda.manual_seed_all(args.seed) os.makedirs(args.output_spanbert_tf_dir, exist_ok=True) try: shutil(args.spanbert_config_path, args.output_spanbert_tf_dir) except: print("#=#" * 30) print("copy spanbert_config from {} to {}".format( args.spanbert_config_path, args.output_spanbert_tf_dir)) return args
elif op.exists(archive) is True and op.isdir(archive) is False: broken = archive os.mkdir(temparc) archive = temparc elif op.exists(archive) is True and op.isdir(archive) is True: pass # arcin = archive+"/data-input" # arcout = archive+"/data-output" arcin = "{0}/{1}_data-input".format(archive, dd.today().isoformat()) arcout = "{0}/{1}_data-output".format(archive, dd.today().isoformat()) if op.exists(arcin) is False: os.mkdir(arcin) elif op.exists(arcin) is True and op.isdir(arcin) is False: shutil(arcin, "arcin-{0}".format(str(time.time()))) os.mkdir(arcin) else: pass if op.exists(arcout) is False: os.mkdir(arcout) elif op.exists(arcout) is True and op.isdir(arcout) is False: shutil(arcout, "arcout-{0}".format(str(time.time()))) os.mkdir(arcout) else: pass infiles = [] outfiles = [] readin = "{0}/README.rst".format(indir)
#!/usr/bin/python import os import shutil inDirBase = '/home/disk/funnel/impacts-website/archive_ncar/surface/Meteogram' for date in os.listdir(inDirBase): if date.startswith('20200'): inDir = inDirBase + '/' + date origDir = inDir + '/ORIG' if not os.path.isdir(origDir): makedirs(origDir) for file in os.listdir(inDir): if 'ASOS' in file: shutil(inDir + '/' + file, origDir + '/' + file)
a.append('h') print(b) print(a) #Removing even numbers from a list¶ l = list(input('enter the elements of the list')) for i in l: if int(i) % 2 == 0: l.remove(i) else: pass print(l) #Copy the contents of a file to another file import shutil path = 'C:\\Users\\naveen\\Documents\\example1.txt' target = 'C:\\Users\\naveen\\Documents\\example2.txt' shutil(path, target) f = open('example2.txt') a = f.read() print(a) #sum of all the items in a list¶ l = list(input('enter the list elements')) sum = 0 for i in l: sum += int(i) print(sum)
import os,shutil Empire_home = os.sep.join(os.path.os.getcwd().split(os.sep)[0:-1]) Empire_db = os.path.join(Empire_home, 'data'+os.sep+'empire.db') Empire_debug = os.path.join(Empire_home,'empire.debug') Empire_setup = os.path.join(Empire_home,'setup'+os.sep+'setup_database.py') Empire_download = os.path.join(Empire_home, 'download') # reset the database if os.path.exists(Empire_db): os.unlink(Empire_db) os.system('python %s' % (Empire_setup)) # remove the debug file if it exists if os.path.exists(Empire_debug): os.unlink('python %s' % (Empire_debug)) # remove the download folders if os.path.exists(Empire_download): shutil(Empire_download)
def getCtrlBox(self): "Copiy Control Box pdf into job folder. Only for Remote Jobs" ctrlPdf = ctrlDict[self.ctrl] shutil(r'C:\Users\tfountain\Desktop\Control Box\{}'.format(ctrlPdf), r'C:\Users\tfountain\Desktop\{}'.format(self.workorder))
#!/usr/bin/env python3 import argparse import wget import shutil if __name__ == "__main__": parser = argparse.ArgumentParser(description='Download dfam db') parser.add_argument('--output', help='Give an output destination /home/user/Dfam.hmm.gz') args = parser.parse_args() url = 'http://dfam.org/web_download/Current_Release/Dfam.hmm.gz' if args.output: print("We are download file to {}".format(args.output)) filename = wget.download(url) shutil(filename, args.output) else: print("We are download file to Dfam.hmm.gz") filename = wget.download(url)
def parseXmlFiles(xml_path, image_path, new_path_for_save_wrong_xml): num_pic = 0 file_dict = {id: name for id, name in enumerate(os.listdir(xml_path))} len_file_dict = len(file_dict) num_pic_st_len = True while num_pic_st_len: if num_pic >= len_file_dict - 1: num_pic_st_len = False f = file_dict[num_pic] num = 0 # print(f) if not f.endswith('.xml'): # jump off non-xml files continue labels = [] xml_file = os.path.join(xml_path, f) tree = ET.parse(xml_file) root = tree.getroot() if root.tag != 'annotation': raise Exception( 'pascal voc xml root element should be annotation, rather than {}' .format(root.tag)) for elem in root: # root.tag = Annotation if elem.tag == "filename": pic_name = elem.text[:-4] img_name = os.path.join(image_path, pic_name + '.bmp') # print(img_name) # img = cv2.imread(img_name, cv2.IMREAD_COLOR) img = cv2.imdecode( np.fromfile(u'{}'.format(img_name), dtype=np.uint8), 1) if elem.tag == "object": # elem.tag = frame,object is_append = True is_end = False for subelem in elem: # subelem.tag = name,bndbox if is_append: # if list was just appended, reintialize the bndbox bndbox = dict() is_append = False if subelem.tag == "name": bndbox["name"] = subelem.text if subelem.tag == "bndbox": # option.tag = xmin,ymin,xmax,ymax for option in subelem: if option.tag == 'xmin': x1 = int(option.text) if option.tag == 'ymin': y1 = int(option.text) if option.tag == 'xmax': x2 = int(option.text) if option.tag == 'ymax': y2 = int(option.text) is_end = True if is_end: # if all location and class of current bndbox have been read, append current bndbox to list pool num += 1 labels.append(bndbox) is_end = False is_append = True draw_caption(img, (x1, y1, x2, y2), bndbox["name"] + ":" + pic_name) # print(img) # img = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2RGB) cv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 255, 0), thickness=2) # cv2.namedWindow("img", 1) xy = (0, 0) def on_EVENT_LBUTTONDOWN(event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDOWN: # xy = "%d,%d" % (x, y) cv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 255, 0), thickness=2) cv2.rectangle(img, (x1, y), (x2, y2), color=(255, 0, 0), thickness=2) # cv2.imshow('img', img) # cv2.imshow('img', img) # cv2.setMouseCallback("img", on_EVENT_LBUTTONDOWN) # print(xy) # xy = (0, 0) # cv2.imshow('img', img) # cv2.waitKey(0) # display new annotated img cv2.imshow('Press Enter to pass, or space to select it as wrong: ', img) # waitkey_num = cv2.waitKeyEx() if waitkey_num == 32: # space n_tmp = input( "Input 'jump' for jump back, or input anything else for select is as wrong:" ) if n_tmp == 'jump': print('Current pic number is : ', num_pic) n_tmp = input("Enter the numbers you need to jump:") num_pic = int(n_tmp) else: shutil(xml_file, os.path.join(new_path_for_save_wrong_xml, f)) cv2.destroyAllWindows() if waitkey_num == 13: # enter num_pic += 1 cv2.destroyAllWindows()
mfle = mfl.endswith fp, fe = op.splitext(mediaf) fn = fp.split("/")[-1] fm = "Drafts/{0}_orig.html".format(fn) fo = "Drafts/{0}.html".format(fn) fi = "InputFiles/{0}{1}".format(fn, fe) fz = "InputFiles/{0}.{1}".format(fn, fe.lower()) fxp = "InputFiles/{0}.png".format(fn) fxj = "InputFiles/{0}.jpg".format(fn) fxg = "InputFiles/{0}.gif".format(fn) fxm = "InputFiles/{0}.mp4".format(fn) if mfle(".png") or mfle(".gif") or mfle(".jpg") or mfle(".jpeg") or mfle(".mp4") is True: if mediaf == fi: print("File ready for upload.") else: shutil(mediaf, ) mf = open(mediaf, "rb") response = twitter.upload_media(media=mf) mfid.append(response["media_id"]) elif mfle(".md") or mfle(".txt") or mfle(".text") is True: s.call([pandoc, "-f", "markdown", "-t", "html5", "-s", "-o", fo, mediaf]) # , stdout=pipe).communicate() elif mfle(".rst") or mfle(".rest") is True: s.call([pandoc, "-f", "rst", "-t", "html5", "-s", "-o", fo, mediaf]) elif mfle(".org") is True: s.call([pandoc, "-f", "org", "-t", "html5", "-s", "-o", fo, mediaf]) elif mfle(".htm") or mfle(".xhtml") or mfle(".htmlx") is True: s.call([pandoc, "-f", "html", "-t", "html5", "-s", "-o", fo, mediaf]) elif mfle(".html") is True: if mediaf == fo: shutil.move(mediaf, fm)
file_name = url.split('/')[-1] pic_out = open('content_img_' + file_name + '.jpg', 'wb') pic_out.write(r2) pic_out.close() def shutil(url): import shutil """Use the shutil.copyfileobj the file name is shutil_img.jpg.""" sh = requests.get(url, stream=True) file_name = url.split('/')[-1] pic_out = open('shutil_img_' + file_name + '.jpg', 'wb') shutil.copyfileobj(sh.raw, pic_out) pic_out.close() url = 'https://s1.imgs.cc/img/aaaaaAlIU.jpg?_w=750' content(url) shutil(url) print( 'We can see the shutil which cannot get the picture from this page, but content can get successfully.' ) url = 'https://files.ckcdn.com/attachments/forum/201905/02/013301re4yvl9vvv191hgy.png.thumb.jpg' content(url) shutil(url) print('But if our url has the name of jpg, then both can get successfully.') print('So I will say content method is more flexible on here.')
fp, fe = op.splitext(mediaf) fn = fp.split("/")[-1] fm = "Drafts/{0}_orig.html".format(fn) fo = "Drafts/{0}.html".format(fn) fi = "InputFiles/{0}{1}".format(fn, fe) fz = "InputFiles/{0}.{1}".format(fn, fe.lower()) fxp = "InputFiles/{0}.png".format(fn) fxj = "InputFiles/{0}.jpg".format(fn) fxg = "InputFiles/{0}.gif".format(fn) fxm = "InputFiles/{0}.mp4".format(fn) if mfle(".png") or mfle(".gif") or mfle(".jpg") or mfle( ".jpeg") or mfle(".mp4") is True: if mediaf == fi: print("File ready for upload.") else: shutil(mediaf, ) mf = open(mediaf, "rb") response = twitter.upload_media(media=mf) mfid.append(response["media_id"]) elif mfle(".md") or mfle(".txt") or mfle(".text") is True: s.call( [pandoc, "-f", "markdown", "-t", "html5", "-s", "-o", fo, mediaf]) # , stdout=pipe).communicate() elif mfle(".rst") or mfle(".rest") is True: s.call([pandoc, "-f", "rst", "-t", "html5", "-s", "-o", fo, mediaf]) elif mfle(".org") is True: s.call([pandoc, "-f", "org", "-t", "html5", "-s", "-o", fo, mediaf]) elif mfle(".htm") or mfle(".xhtml") or mfle(".htmlx") is True: s.call([pandoc, "-f", "html", "-t", "html5", "-s", "-o", fo, mediaf]) elif mfle(".html") is True: if mediaf == fo: