def configure_step(self): """ Configure LAPACK for build: copy build_step.inc and set make options """ # copy build_step.inc file from examples if self.toolchain.comp_family() == toolchain.GCC: #@UndefinedVariable makeinc = 'gfortran' elif self.toolchain.comp_family() == toolchain.INTELCOMP: #@UndefinedVariable makeinc = 'ifort' else: self.log.error("Don't know which build_step.inc file to pick, unknown compiler being used...") src = os.path.join(self.cfg['start_dir'], 'INSTALL', 'make.inc.%s' % makeinc) dest = os.path.join(self.cfg['start_dir'], 'make.inc') if not os.path.isfile(src): self.log.error("Can't find source file %s" % src) if os.path.exists(dest): self.log.error("Destination file %s exists" % dest) try: shutil.copy(src, dest) except OSError, err: self.log.error("Copying %s to %s failed: %s" % (src, dest, err))
def sync(self, args): """ Synchronize rtc/repository.yaml file and each rtc repository version hash. """ options, argv = self.parse_args(args[:], self._print_alternative_rtcs) verbose = options.verbose_flag sys.stdout.write('# Writing repository.yaml for package distribution\n') sys.stdout.write('## Parsing RTC directory\n') package = admin.package.get_package_from_path(os.getcwd()) repos = [] for rtc in admin.rtc.get_rtcs_from_package(package, verbose=verbose): sys.stdout.write('### RTC %s\n' % rtc.rtcprofile.basicInfo.name) repo = admin.repository.get_repository_from_path(rtc.path, description=rtc.rtcprofile.basicInfo.description) repos.append(repo) repo_file = os.path.join(package.get_rtcpath(), 'repository.yaml') bak_file = repo_file + wasanbon.timestampstr() if os.path.isfile(bak_file): os.remove(bak_file) import shutil, yaml shutil.copy(repo_file, bak_file) dic = yaml.load(open(bak_file, 'r')) if not dic: dic = {} for repo in repos: if getattr(repo, 'url') != None: url = repo.url.strip() else: url = '' dic[repo.name] = {'repo_name' : repo.name, 'git': url, 'description':repo.description, 'hash':repo.hash} yaml.dump(dic, open(repo_file, 'w'), encoding='utf8', allow_unicode=True, default_flow_style=False) pass
def initEnKFrun(callback=None,basedir=None,site=None,siteparam=None): if not site: raise "site is a required parameter" if siteparam: param = ast.literal_eval(siteparam) else: raise "Parameters are required. Please submit Paramerers with task submission" if not basedir: raise "Basedir is required" #Create working directory newDir = basedir + "celery_data/" + str(initEnKFrun.request.id) check_call(["mkdir",newDir]) os.chdir(newDir) #copy matlab code to working directory codeDir =basedir + 'enfk_matlab/' for file in glob.glob(codeDir + '*'): shutil.copy(file, newDir) #check_call(["cp","-r",codeDir + '*',newDir]) #set inital Paramters files setup_param(newDir,param) if callback: result=subtask(callback).delay(task_id=str(initEnKFrun.request.id),wkdir=newDir) return {'task_id':result.task_id,'task_name':result.task_name} else: return newDir
def prepareSession(self): """Session test script is copied into the session directory Test script is prepared as a python module to enable execution""" shutil.copy(self.configFile, self.sessionDir) #copy the test script into the session directory shutil.copy(self.testScript, self.sessionDir) #create an __init__ file here for us to import test script as <session_id>.script.<test_script> session_init_file = os.path.join(self.sessionDir,'__init__.py') utils.Utils.touch(session_init_file) #Extract the name of the test script from input arguments #first split the input into directory name and filename.extn directoryName,fileName = os.path.split(self.testScript) #now split filename and extension moduleName = os.path.splitext(fileName)[0] self.moduleName = moduleName #the path would be <sessionDir>.script.testModule script_import_path = "'"+str(self.sessionID)+'.script.'+moduleName+"'" #make sure that the path is available in sys.path sys.path.append(self.sessionDir) _testModule = __import__(self.moduleName, globals(), locals(), [], -1) self.testModule = _testModule.testData()
def save_tmp_file(fileobj, filename, ext): if ext in IMAGES_EXT: f = open("/tmp/" + filename + ext, 'wb') shutil.copyfileobj(fileobj, f) f.close() helpers.resize_image(filename, ext) if ext != '.jpg': os.remove("/tmp/" + filename + ext) return '.jpg' if ext in ['.txt']: f = open("/tmp/" + filename + ext, 'w') shutil.copyfileobj(fileobj, f) f.close() return ext if ext in ['.dcm', '.dicom']: f = open("/tmp/" + filename + ext, 'w') shutil.copyfileobj(fileobj, f) f.close() o = dir(main_helpers.dicom) try: main_helpers.dicom.saveDicomAsImage("/tmp/" + filename + ext, "/tmp/" + filename + ext + ".thumbnail.jpg") except: shutil.copy( os.path.join(settings.BASE_DIR, 'static/img/files/dicom.png'), "/tmp/" + filename + ext + ".thumbnail.png" ) return ext f = open("/tmp/" + filename + ext, 'wb') shutil.copyfileobj(fileobj, f) f.close() return ext
def cp_r(src, dest): src = expandPath(src) dest = expandPath(dest) if os.path.isdir(src): shutil.copytree(src, dest) else: shutil.copy(src, dest)
def build_dist(self): for sdir in self.staging_dirs: if os.path.exists(sdir): shutil.rmtree(sdir) main_stage, ninja_stage = self.staging_dirs modules = [os.path.splitext(os.path.split(x)[1])[0] for x in glob(os.path.join('mesonbuild/modules/*'))] modules = ['mesonbuild.modules.' + x for x in modules if not x.startswith('_')] modules += ['distutils.version'] modulestr = ','.join(modules) python = shutil.which('python') cxfreeze = os.path.join(os.path.dirname(python), "Scripts", "cxfreeze") if not os.path.isfile(cxfreeze): print("ERROR: This script requires cx_freeze module") sys.exit(1) subprocess.check_call([python, cxfreeze, '--target-dir', main_stage, '--include-modules', modulestr, 'meson.py']) if not os.path.exists(os.path.join(main_stage, 'meson.exe')): sys.exit('Meson exe missing from staging dir.') os.mkdir(ninja_stage) shutil.copy(shutil.which('ninja'), ninja_stage) if not os.path.exists(os.path.join(ninja_stage, 'ninja.exe')): sys.exit('Ninja exe missing from staging dir.')
def test_submit_with_all_values_has_no_error_messages(self): res = self.testapp.get("/") form = res.forms["deform"] form["serial"] = "ft789" form["coefficient_0"] = "100" form["coefficient_1"] = "101" form["coefficient_2"] = "102" form["coefficient_3"] = "103" # Submitting via an actual browser strips the directory # prefixes. Copy the files to temporary locations to exactly # mimic this image0_file = "resources/image0_defined.jpg" image1_file = "resources/image1_defined.jpg" shutil.copy(image0_file, "localimg0.jpg") shutil.copy(image1_file, "localimg1.jpg") # From: # http://stackoverflow.com/questions/3337736/\ # how-do-i-use-pylons-paste-webtest-with-multiple-\ # checkboxes-with-the-same-name top_index = 0 bottom_index = 1 form.set("upload", Upload("localimg0.jpg"), top_index) form.set("upload", Upload("localimg1.jpg"), bottom_index) submit_res = form.submit("submit") self.assertTrue("was a problem with" not in submit_res.body)
def test_submit_with_images_report_and_thumbnail_matches_size(self): res = self.testapp.get("/") form = res.forms["deform"] form["serial"] = "ft789" form["coefficient_0"] = "100" form["coefficient_1"] = "101" form["coefficient_2"] = "102" form["coefficient_3"] = "103" # Submitting via an actual browser strips the directory # prefixes. Copy the files to temporary locations to exactly # mimic this image0_file = "resources/image0_defined.jpg" image1_file = "resources/image1_defined.jpg" shutil.copy(image0_file, "localimg0.jpg") shutil.copy(image1_file, "localimg1.jpg") # From: # http://stackoverflow.com/questions/3337736/\ # how-do-i-use-pylons-paste-webtest-with-multiple-\ # checkboxes-with-the-same-name top_index = 0 bottom_index = 1 form.set("upload", Upload("localimg0.jpg"), top_index) form.set("upload", Upload("localimg1.jpg"), bottom_index) submit_res = form.submit("submit") res = self.testapp.get("/view_pdf/ft789") pdf_size = res.content_length self.assertTrue(size_range(pdf_size, 106456, ok_range=40000)) res = self.testapp.get("/view_thumbnail/ft789") png_size = res.content_length self.assertTrue(size_range(png_size, 217477, ok_range=40000))
def _handle_files(src, dest, root, files, filter_fun, quiet): """ Helper function used by install() """ installed = list() rel_root = os.path.relpath(root, src) if rel_root == ".": rel_root = "" new_root = os.path.join(dest, rel_root) for f in files: if not filter_fun(os.path.join(rel_root, f)): continue fsrc = os.path.join(root, f) fdest = os.path.join(new_root, f) if os.path.islink(fsrc): mkdir(new_root, recursive=True) _copy_link(fsrc, fdest, quiet) rel_path = os.path.join(rel_root, f) installed.append(rel_path) else: if os.path.lexists(fdest) and os.path.isdir(fdest): raise Exception("Expecting a file but found a directory: %s" % fdest) if sys.stdout.isatty() and not quiet: print "-- Installing %s" % fdest mkdir(new_root, recursive=True) # We do not want to fail if dest exists but is read only # (following what `install` does, but not what `cp` does) rm(fdest) shutil.copy(fsrc, fdest) return installed
def gatk_realigner(align_bam, ref_file, config, dbsnp=None, region=None, out_file=None, deep_coverage=False): """Realign a BAM file around indels using GATK, returning sorted BAM. """ runner = broad.runner_from_config(config) runner.run_fn("picard_index", align_bam) runner.run_fn("picard_index_ref", ref_file) if not os.path.exists("%s.fai" % ref_file): pysam.faidx(ref_file) if region: align_bam = subset_bam_by_region(align_bam, region, out_file) runner.run_fn("picard_index", align_bam) if has_aligned_reads(align_bam, region): variant_regions = config["algorithm"].get("variant_regions", None) realign_target_file = gatk_realigner_targets(runner, align_bam, ref_file, dbsnp, region, out_file, deep_coverage, variant_regions) realign_bam = gatk_indel_realignment(runner, align_bam, ref_file, realign_target_file, region, out_file, deep_coverage) # No longer required in recent GATK (> Feb 2011) -- now done on the fly # realign_sort_bam = runner.run_fn("picard_fixmate", realign_bam) return realign_bam elif out_file: shutil.copy(align_bam, out_file) return out_file else: return align_bam
def path_reference_copy(copy_set, report=print): """ Execute copying files of path_reference :arg copy_set: set of (from, to) pairs to copy. :type copy_set: set :arg report: function used for reporting warnings, takes a string argument. :type report: function """ if not copy_set: return import os import shutil for file_src, file_dst in copy_set: if not os.path.exists(file_src): report("missing %r, not copying" % file_src) elif os.path.exists(file_dst) and os.path.samefile(file_src, file_dst): pass else: dir_to = os.path.dirname(file_dst) if not os.path.isdir(dir_to): os.makedirs(dir_to) shutil.copy(file_src, file_dst)
def upload_files(self, files): """ Upload files to Strava """ # connect to Strava API client = Client(self.config.strava["access_token"]) for fn in files: try: upload = client.upload_activity(open(self.src_path + fn, "r"), "fit") activity = upload.wait(30, 10) # if a file has been uploaded, copy it locally, as this ensures # we don't attempt to re-upload the same activity in future if activity: shutil.copy(self.src_path + fn, self.dest_path + fn) logging.debug("new file uploaded: {0}, {1} ({2})".format( activity.name, activity.distance, fn)) except exc.ActivityUploadFailed as error: print error
def load_config_file(self, ui_info): dialog = FileDialog(action="open", wildcard="*.ini") dialog.open() if dialog.return_code == OK: if dialog.path != ui_info.ui.context["object"].project_info.config_file: shutil.copy(dialog.path, ui_info.ui.context["object"].project_info.config_file) load_config(self.pipeline, ui_info.ui.context["object"].project_info.config_file)
def speak(voicedata, num): print(str((newsnum+1) - num)) tts = gTTS(text=voicedata, lang='hi') tts.save("E:\SandBox\Python\eradioJockey/temp/"+str(num+1)+".mp3") shutil.copy('E:\SandBox\Python\eradioJockey\\assets\\alert.wav', 'temp') newname = "ren E:\SandBox\Python\eradioJockey\\temp\\alert.wav"+' '+str(num)+'a'+'.wav' os.system(newname)
def wrapper(*args, **kwargs): import shutil import glob dst = self.dst if "__tmp__" in dst: dst = dst.replace('__tmp__', 'tmp/%s/' % util.find_test_name().replace(':', '_')) src = self.src if isinstance(src, basestring): src = [self.src] files = [] for pattern in src: files.extend(glob.glob(pattern)) if len(src) > 1 or set(files) != set(src): # Glob expansion, "dst" is a prefix pairs = [(i, dst + i) for i in files] else: # Explicit source and destination names pairs = [(self.src, dst)] for src, dst in pairs: log.trace("Copying %r to %r" % (src, dst)) shutil.copy(src, dst) try: return func(*args, **kwargs) finally: for _, dst in pairs: if os.path.exists(dst): log.trace("Removing %r" % dst) os.remove(dst)
def save_config_file(self, ui_info): dialog = FileDialog(action="save as", default_filename="config.ini") dialog.open() if dialog.return_code == OK: save_config(self.pipeline, ui_info.ui.context["object"].project_info.config_file) if dialog.path != ui_info.ui.context["object"].project_info.config_file: shutil.copy(ui_info.ui.context["object"].project_info.config_file, dialog.path)
def make_epub(self, articles, extra_context={}): tmpl = env.get_template('mytemplate.xhtml.j2') output_dir = "expanded" pathlib.Path(output_dir).mkdir(exist_ok=True) print("Rendering template") compiled_articles = [] for index, record in enumerate(articles): tmpl_params = { 'title': "Article %d" % index, 'body': record.body } result = tmpl.render(tmpl_params) article_basename = "article-%.4d.xhtml" % index output_path = os.path.join(output_dir, article_basename) with open(output_path, 'w') as f: f.write(result) compilation_record = { 'id': uuid.uuid4(), 'basename': article_basename, 'article_path': output_path } compiled_articles.append(compilation_record) print("Render done") print("Creating epub structure") pathlib.Path(self.epub_root_dir).mkdir(exist_ok=True) article_subdirectory = 'articles' article_target_dir = os.path.join(self.epub_root_dir, article_subdirectory) pathlib.Path(article_target_dir).mkdir(exist_ok=True) articles_context = { 'articles': compiled_articles, 'article_subdirectory': article_subdirectory } articles_context.update(extra_context) self.epub_root_render('main.opf.j2', 'main.opf', articles_context) self.epub_root_render('mimetype.j2', 'mimetype') self.epub_root_render('nav.xhtml.j2', 'nav.xhtml', articles_context) self.epub_root_render('container.xml', 'META-INF/container.xml') for article in compiled_articles: shutil.copy(article['article_path'], article_target_dir) print("Done")
def notlambda(this): cond.acquire() SQLSupport.tc_session.close() try: shutil.copy(sql_file, new_file) except Exception, e: plog("WARN", "Error moving sql file: " + str(e))
def copy_merge_file_to_repo(a_target_file_path): # Get check sum for new merged file check_sum_name = check_sum("/".join(a_target_file_path)) # Variable for the name of the project tree. project_tree_name = g_NAME_OF_PT_PATH.split("/")[-1] # Array of the path for the repo split by a "/" a_temp_repo_path = g_NAME_OF_REPO.split("/") # variable to check for the project tree name. pt_directory_check = False # 0 ... length of the a_target_file_path for i in range (len(a_target_file_path)): # checks to see if the element for a_target_file_path at index i equals the project tree name. if(a_target_file_path[i] == project_tree_name): # Sets pt_directory_check to be true. pt_directory_check = True # checks to see if pt_directory_check has been set. if(pt_directory_check): # append the element for a_target_file_path at index i to the a_temp_repo_path a_temp_repo_path.append(a_target_file_path[i]) # appends the check sum name to the a_temp_repo_path a_temp_repo_path.append(check_sum_name) # copies the merged file to our repo shutil.copy("/".join(a_target_file_path), "/".join(a_temp_repo_path))
def copy_files(repo_file_path, target_file_path, prefix): """Copies the files from the repo to our project tree with a given prefix.""" # creates an array of file path from our given repo file path a_file_path = repo_file_path.split("/") print "a_file_path: ", a_file_path, "\n" print # Sets the file path name to be the path of our given project tree with the file name appended file_path_name = append_file_name(find_project_tree_owner(a_file_path)) # print "file_path_name: ", file_path_name, "\n" print print "target_file_path: ", target_file_path, "\n" print # Checks if the prefix is empty if prefix == "": # copies the file from our file_path to our project tree shutil.copy(file_path_name, target_file_path) else: # if the prefix is not empty # Create an array of target file path from the splitted target_file_path a_target_file_path = target_file_path.split("/") # pop the last element since its AID a_target_file_path.pop() # sets the last element to be the prefix and the name of the file a_target_file_path[-1] = prefix + a_target_file_path[-1] # creates the new conflict file from repo to project tree shutil.copy(file_path_name, "/".join(a_target_file_path))
def move_mat_file(self): """TODO """ output_dir = self._paths()[1] source = os.path.join(os.getcwd(), 'Model_internal.mat') destination = os.path.join(output_dir, 'result.mat') copy(source, destination)
def symlink(src, target): """ symlink file if possible """ if 'win' in sys.platform: shutil.copy(src, target) os.chmod(target, stat.S_IRWXU) else: os.symlink(src, target)
def __synch_my_emoticons(self): self.__reset_progressbar() self._action_callback(_("Importing your emoticons...")) listing = os.listdir(os.path.join(self.__source_path, "custom_emoticons")) percent = e3.common.PercentDone(len(listing)) actual_emoticons = 0.0 emoticons_dir = os.path.join(self.__dest_path, self.__myuser, "emoticons") #try create emoticons directory try: os.mkdir(emoticons_dir) except OSError: pass fconfig = open(os.path.join(emoticons_dir,"emoticons.info"), 'a') for infile in listing: if infile == "map": continue shutil.copy (os.path.join(self.__source_path, "custom_emoticons", infile), os.path.join(self.__dest_path, self.__myuser, "emoticons", infile[-44:]) ) fconfig.write("%s %s\n" % (infile[:-45],infile[-44:])) #write config actual_emoticons += 1.0 if percent.notify(actual_emoticons): self._prog_callback(percent.current) fconfig.close()
def run(self): dst = self.config.get_dst_folder() cdv_dst = self.config.get_cordova_dst_folder(self.key) if os.path.exists(cdv_dst): names = os.listdir(cdv_dst) for name in names: if not name.startswith('.'): name = os.path.join(cdv_dst, name) if os.path.isfile(name): os.remove(name) else: shutil.rmtree(name) names = os.listdir(dst) for name in names: if not name.startswith('.'): src = os.path.join(dst, name) copy = os.path.join(cdv_dst, name) if os.path.isfile(src): shutil.copy(src, copy) else: shutil.copytree(src, copy, ignore=shutil.ignore_patterns('.*')) for r, d, f in os.walk(cdv_dst): for files in filter(lambda x: x.endswith('.html'), f): p = os.path.join(r, files) self.replace_cordova_tag(p) self.copy_icons(dst) self.copy_splash(dst)
def main(): my_config = get_config() new_synapse_config = generate_configuration( my_config, get_zookeeper_topology(), get_all_namespaces() ) with tempfile.NamedTemporaryFile() as tmp_file: new_synapse_config_path = tmp_file.name with open(new_synapse_config_path, 'w') as fp: json.dump(new_synapse_config, fp, sort_keys=True, indent=4, separators=(',', ': ')) # Match permissions that puppet expects os.chmod(new_synapse_config_path, 0644) # Restart synapse if the config files differ should_restart = not filecmp.cmp(new_synapse_config_path, my_config['config_file']) # Always swap new config file into place. Our monitoring system # checks the config['config_file'] file age to ensure that it is # continually being updated. shutil.copy(new_synapse_config_path, my_config['config_file']) if should_restart: subprocess.check_call(SYNAPSE_RESTART_COMMAND)
def copy_file(input_file, output_dir): name, ext = os.path.basename(input_file).rsplit(".",1) output_file = os.path.join(output_dir, os.path.basename(input_file)) shutil.copy(input_file, output_file) return output_file
def build(self): """Package up a nuget file based on the default build""" if os.name != 'nt': print("Skipping Native Nuget package build, as this needs to be run on Windows") return net45_build_dir = join(self.Build_NugetDir, 'build', 'net45') os.makedirs(net45_build_dir, exist_ok=True) print('Copying Files') shutil.copy('./misc/GtkSharp.Native.targets', join(net45_build_dir, 'GtkSharp.' + self.arch + '.targets')) # Copy dlls dll_list = [] dll_list += self.Get_Dlls_Native_GTK() dll_list += self.Get_Dlls_Native_GTK_Deps() for item in dll_list: src = join(self.MingwBinPath, item) srclist = iglob(src) for fname in srclist: f_basename, f_extension = os.path.splitext(ntpath.basename(fname)) shutil.copy(fname, join(net45_build_dir, f_basename + '.dl_'))
def _add_vba_project(self): # Copy in a vbaProject.bin file. vba_project = self.workbook.vba_project vba_is_stream = self.workbook.vba_is_stream if not vba_project: return xml_vba_name = 'xl/vbaProject.bin' if not self.in_memory: # In file mode we just write or copy the VBA file. os_filename = self._filename(xml_vba_name) if vba_is_stream: # The data is in a byte stream. Write it to the target. os_file = open(os_filename, mode='wb') os_file.write(vba_project.getvalue()) os_file.close() else: copy(vba_project, os_filename) else: # For in-memory mode we read the vba into a stream. if vba_is_stream: # The data is already in a byte stream. os_filename = vba_project else: vba_file = open(vba_project, mode='rb') vba_data = vba_file.read() os_filename = BytesIO(vba_data) vba_file.close() self.filenames.append((os_filename, xml_vba_name, True))
def install(self, force=False): jujuresources.install(self.resources['oozie'], destination=self.dist_config.path('oozie'), skip_top_level=True) self.dist_config.add_users() self.dist_config.add_dirs() self.dist_config.add_packages() # ExtJS v2.2 should go under self.dist_config.path('ext22') jujuresources.fetch('ext22') src = jujuresources.resource_path('ext22') dest = self.dist_config.path('ext22') shutil.copy(src, dest) # self.dist_config.path('ext22') should also contain all files under # self.dist_config.path('oozie') / 'libtools' src = self.dist_config.path('oozie') / 'libtools' src_files = os.listdir(src) for file_name in src_files: full_file_name = os.path.join(src, file_name) if (os.path.isfile(full_file_name)): shutil.copy(full_file_name, dest) self.setup_oozie_config() self.configure_oozie_hdfs() self.set_oozie_env() self.build_oozie_sharelib() self.build_oozie_war_file() self.build_oozie_db()
def _copy_info_files(self): files = ['README', 'copyright.txt', 'LICENSE.txt'] for fname in files: shutil.copy(fname, self.temp_sources_dir)
uiddict[pidlist[idx]] = [[studyidlist[idx], seriesinstlist[idx]]] else: uiddict[pidlist[idx]].append([studyidlist[idx], seriesinstlist[idx]]) nCTs = 0 for pid in os.listdir(filepath): if nCTs % 500 == 0: print pid if pid not in uiddict: continue for studyid in os.listdir(filepath + pid + '/'): for seriesid in os.listdir(filepath + pid + '/' + studyid): hasseriesid = False for sidlist in uiddict[pid]: if studyid == sidlist[0] and seriesid == sidlist[1]: hasseriesid = True if not os.path.exists(dstpath + pid): os.mkdir(dstpath + pid) if not os.path.exists(dstpath + pid + '/' + studyid): os.mkdir(dstpath + pid + '/' + studyid) if not os.path.exists(dstpath + pid + '/' + studyid + '/' + seriesid): os.mkdir(dstpath + pid + '/' + studyid + '/' + seriesid) for fname in os.listdir(filepath + pid + '/' + studyid + '/' + seriesid): shutil.copy( filepath + pid + '/' + studyid + '/' + seriesid + '/' + fname, dstpath + pid + '/' + studyid + '/' + seriesid + '/' + fname) nCTs += 1 print nCTs
def make_zips(api_dir, dist_dir): parser = OptionParser(description='Indigo Python libraries build script') parser.add_option('--suffix', '-s', help='archive suffix', default="") (args, left_args) = parser.parse_args() # Find indigo version from get_indigo_version import getIndigoVersion version = getIndigoVersion() if not os.path.exists(dist_dir): os.mkdir(dist_dir) archive_name = "./indigo-python-%s-%s" % (version, args.suffix) dest = os.path.join(dist_dir, archive_name) if os.path.exists(dest): shutil.rmtree(dest) os.mkdir(dest) os.mkdir(os.path.join(dest, 'indigo')) shutil.copy(os.path.join(api_dir, "python", 'indigo.py'), os.path.join(dest, 'indigo', '__init__.py')) shutil.copy(os.path.join(api_dir, "plugins", "renderer", "python", "indigo_renderer.py"), dest) shutil.copy(os.path.join(api_dir, "plugins", "renderer", "python", "indigo_renderer.py"), os.path.join(dest, 'indigo', 'renderer.py')) shutil.copy(os.path.join(api_dir, "plugins", "inchi", "python", "indigo_inchi.py"), dest) shutil.copy(os.path.join(api_dir, "plugins", "inchi", "python", "indigo_inchi.py"), os.path.join(dest, 'indigo', 'inchi.py')) shutil.copy(os.path.join(api_dir, "plugins", "bingo", "python", "bingo.py"), dest) shutil.copy(os.path.join(api_dir, "plugins", "bingo", "python", "bingo.py"), os.path.join(dest, 'indigo', 'bingo.py')) shutil.copytree(os.path.join(api_dir, "libs", "shared"), os.path.join(dest, "lib"), ignore=shutil.ignore_patterns("*.lib")) shutil.copy(os.path.join(api_dir, "LICENSE"), dest) os.chdir(dist_dir) if os.path.exists(archive_name + ".zip"): os.remove(archive_name + ".zip") shutil.make_archive(archive_name, 'zip', os.path.dirname(archive_name), archive_name) shutil.rmtree(archive_name) full_archive_name = os.path.normpath(os.path.join(dist_dir, archive_name)) print('Archive {}.zip created'.format(full_archive_name))
def make_wheels(api_dir, dest): if os.path.exists(dest): shutil.rmtree(dest) os.makedirs(dest) os.makedirs(os.path.join(dest, 'indigo')) shutil.copy(os.path.join(api_dir, "LICENSE"), dest) shutil.copy(os.path.join(api_dir, "python", "indigo.py"), os.path.join(dest, 'indigo', '__init__.py')) shutil.copy(os.path.join(api_dir, "plugins", "renderer", "python", "indigo_renderer.py"), os.path.join(dest, 'indigo', 'renderer.py')) shutil.copy(os.path.join(api_dir, "plugins", "inchi", "python", "indigo_inchi.py"), os.path.join(dest, 'indigo', 'inchi.py')) shutil.copy(os.path.join(api_dir, "plugins", "bingo", "python", "bingo.py"), os.path.join(dest, 'indigo', 'bingo.py')) shutil.copy(os.path.join(api_dir, "python", "setup.py"), dest) shutil.copytree(os.path.join(api_dir, "libs", "shared"), os.path.join(dest, 'indigo', "lib"), ignore=shutil.ignore_patterns("*.lib")) cur_dir = os.path.abspath(os.curdir) os.chdir(dest) subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=win32']) subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=win_amd64']) subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=manylinux1_x86_64']) subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=manylinux1_i686']) subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=macosx_10_7_intel']) os.chdir(cur_dir)
if _4_DISTANCE ==1: print '\n'," Calculating distances to RDP... ",'\n' # 4.0 instantiate parallel workers pp = multiprocessing.Pool(processes=workers) shutil.rmtree(tempdir,ignore_errors=True) os.makedirs(tempdir) # 4.1 buffers and self-interesctions selfintersect(db,tempdir,bw,rdp,algo,par1,par2) print '\n'," -- Self-Intersections: done! "'\n' # 4.2 make concave hulls grids = glob.glob(rawgis+'grid_7*') for grid in grids: shutil.copy(grid, tempdir) concavehull(db,tempdir,sig,rdp,algo,par1,par2) print '\n'," -- Concave Hulls: done! "'\n' # 4.3 merge buffers & hulls, then push to DB merge_n_push(db,tempdir,bw,sig,rdp,algo,par1,par2) print '\n'," -- Merge and Push Back: done! "'\n' # 4.4 assemble coordinates for hull edges coords = comb_coordinates(tempdir).as_matrix() print '\n'," -- Assemble hull coordinates: done! "'\n' # 4.5 fetch BBLU, rdp, rdp centroids, & non-rdp in/out of hulls part_fetch_data = partial(fetch_data,db,tempdir,bw,sig,rdp,algo,par1,par2) matrx = pp.map(part_fetch_data,range(8,0,-1)) print '\n'," -- Data fetch: done! "'\n'
from buildsite import * from tools import * os.chdir(TranslationDirectory) if os.path.isfile("log.log"): os.remove("log.log") log = open("log.log", "w") printLog(log, "") printLog(log, "-------") printLog(log, "--- Clean phrase diff") printLog(log, "-------") printLog(log, time.strftime("%Y-%m-%d %H:%MGMT", time.gmtime(time.time()))) printLog(log, "") TranslationTools = findTool(log, ToolDirectories, TranslationToolsTool, ToolSuffix) try: subprocess.call([ TranslationTools, "clean_phrase_diff" ]) except Exception, e: printLog(log, "<" + processName + "> " + str(e)) printLog(log, "") log.close() if os.path.isfile("e4_clean_phrase_diff.log"): os.remove("e4_clean_phrase_diff.log") shutil.copy("log.log", "e4_clean_phrase_diff_" + time.strftime("%Y-%m-%d-%H-%M-GMT", time.gmtime(time.time())) + ".log") shutil.move("log.log", "e4_clean_phrase_diff.log") raw_input("PRESS ANY KEY TO EXIT")
samples.sort() spk_name = 'ptbr' lang = 'pt_br' n_skip = 0 total_dur = 0 fw = open(os.path.join(output_path, 'metadata.csv'), 'w', encoding='utf-8') i = 0 for l in samples: filename = l.split('=')[0] script = l[len(filename) + 2:].strip() wav_file = os.path.join(in_path, filename) if not os.path.exists(wav_file): print("Missing", wav_file) continue dur = librosa.get_duration(filename=wav_file) if not 1 <= dur <= 20: n_skip += 1 continue total_dur += dur shutil.copy(wav_file, os.path.join(wav_output_path, '%s_%010d.wav' % (spk_name, i))) fw.write('|'.join(['%s_%010d' % (spk_name, i), script, spk_name, lang]) + '\n') i += 1 print("%d samples, %d skipped" % (len(samples) - n_skip, n_skip)) print("Total duration: %.2f h, %.2f min" % (total_dur / 60 / 60, total_dur / 60))
def copy_artifact(tgt, version, typename, suffix='', artifact_ext=''): genmap = self.context.products.get(typename) for basedir, jars in genmap.get(tgt).items(): for artifact in jars: path = artifact_path(jar, version, suffix=suffix, artifact_ext=artifact_ext) shutil.copy(os.path.join(basedir, artifact), path)
def reorg_test(data_dir, test_dir, input_dir): mkdir_if_not_exist([data_dir, input_dir, "test", "unknown"]) for img in os.listdir(os.path.join(data_dir, test_dir)): shutil.copy(os.path.join(data_dir, test_dir, img), os.path.join(data_dir, input_dir, "test", "unknown"))
def _copy_snap_to_dest_dir(self): for fname in glob.glob(os.path.join(self.facts.temp_dir, '*.snap')): shutil.copy(fname, self.facts.build_dir_linux)
def transcode_release(flac_dir, output_dir, output_format, max_threads=None): ''' Transcode a FLAC release into another format. ''' flac_dir = os.path.abspath(flac_dir) output_dir = os.path.abspath(output_dir) flac_files = locate(flac_dir, ext_matcher('.flac')) # check if we need to resample resample = needs_resampling(flac_dir) # check if we need to encode if output_format == 'FLAC' and not resample: # XXX: if output_dir is not the same as flac_dir, this may not # do what the user expects. if output_dir != os.path.dirname(flac_dir): print "Warning: no encode necessary, so files won't be placed in", output_dir return flac_dir # make a new directory for the transcoded files # # NB: The cleanup code that follows this block assumes that # transcode_dir is a new directory created exclusively for this # transcode. Do not change this assumption without considering the # consequences! transcode_dir = get_transcode_dir(flac_dir, output_dir, output_format, resample) if not os.path.exists(transcode_dir): os.makedirs(transcode_dir) else: raise TranscodeException('transcode output directory "%s" already exists' % transcode_dir) # To ensure that a terminated pool subprocess terminates its # children, we make each pool subprocess a process group leader, # and handle SIGTERM by killing the process group. This will # ensure there are no lingering processes when a transcode fails # or is interrupted. def pool_initializer(): os.setsid() def sigterm_handler(signum, frame): # We're about to SIGTERM the group, including us; ignore # it so we can finish this handler. signal.signal(signal.SIGTERM, signal.SIG_IGN) pgid = os.getpgid(0) os.killpg(pgid, signal.SIGTERM) sys.exit(-signal.SIGTERM) signal.signal(signal.SIGTERM, sigterm_handler) try: # create transcoding threads # # Use Pool.map() rather than Pool.apply_async() as it will raise # exceptions synchronously. (Don't want to waste any more time # when a transcode breaks.) # # XXX: actually, use Pool.map_async() and then get() the result # with a large timeout, as a workaround for a KeyboardInterrupt in # Pool.join(). c.f., # http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool?rq=1 pool = multiprocessing.Pool(max_threads, initializer=pool_initializer) try: result = pool.map_async(pool_transcode, [(filename, os.path.dirname(filename).replace(flac_dir, transcode_dir), output_format) for filename in flac_files]) result.get(60 * 60 * 12) pool.close() except: pool.terminate() raise finally: pool.join() # copy other files allowed_extensions = ['.cue', '.gif', '.jpeg', '.jpg', '.log', '.md5', '.nfo', '.pdf', '.png', '.sfv', '.txt'] allowed_files = locate(flac_dir, ext_matcher(*allowed_extensions)) for filename in allowed_files: new_dir = os.path.dirname(filename).replace(flac_dir, transcode_dir) if not os.path.exists(new_dir): os.makedirs(new_dir) shutil.copy(filename, new_dir) return transcode_dir except: # Cleanup. # # ASSERT: transcode_dir was created by this function and does # not contain anything other than the transcoded files! shutil.rmtree(transcode_dir) raise
def extract_step(self): """Unpacking of files is just copying Maple binary installer to build dir.""" for f in self.src: shutil.copy(f['path'], os.path.join(self.builddir, f['name'])) f['finalpath'] = self.builddir
def copy_static_file(source, target): sourcefile = os.path.join(os.path.dirname(inspect.getfile(netdissect)), source) shutil.copy(sourcefile, target)
files_path = os.path.join(src_root, src_folder) files = os.listdir(files_path) for file in files: if fnmatch.fnmatch(file, '*_color.png'): color_images.append(file) train, test = train_test_split(color_images, train_size=0.6) test, val = train_test_split(test, test_size=0.5) """ Move Files """ # train files dst_path = os.path.join(dst_root, 'train') for file in train: temp_src_root = os.path.join(src_root, file[0:15]) temp_src = os.path.join(temp_src_root, file) temp_dst = os.path.join(dst_path, file) shutil.copy(temp_src, temp_dst) # copy and move *_color.png file temp_src = os.path.join(temp_src_root, file.replace('_color.png', '_depth.npy')) temp_dst = os.path.join(dst_path, file.replace('_color.png', '_depth.npy')) shutil.copy(temp_src, temp_dst) # copy and move *_depth.npy file temp_src = os.path.join( temp_src_root, file.replace('_color.png', '_depth_visualization.png')) temp_dst = os.path.join( dst_path, file.replace('_color.png', '_depth_visualization.png')) shutil.copy(temp_src, temp_dst) # copy and move *_depth.npy file # val files dst_path = os.path.join(dst_root, 'val') for file in val:
newfn1l = '{:08d}_1l_{}.{}'.format(i, lstr, fext) newfn2r = '{:08d}_2r_{}.{}'.format(i, rstr, fext) eachdir = 'RLR{:08d}'.format(copied) #Right/Left/Right dstfn = os.path.join(dstdir, eachdir) fu.makedir(dstfn) if (i + 1) % 10 == 0: valtxt.write(eachdir + '\n') else: traintxt.write(eachdir + '\n') dstfn0r = os.path.join(dstfn, newfn0r) dstfn1l = os.path.join(dstfn, newfn1l) dstfn2r = os.path.join(dstfn, newfn2r) shutil.copy(fnr, dstfn0r) shutil.copy(fnl, dstfn1l) shutil.copy(fnr, dstfn2r) TxtFileList = glob.glob(os.path.join(lpath, '*.txt')) for fn in TxtFileList: shutil.copy(fn, dstfn) tfcnt += copied traintxt.close() valtxt.close() print("Total {} files were copied to {}".format(tfcnt, dstdir))
# Make a smaller subset of a bigger dataset. # # Made adhoc for Plenary Sessions of the Parliament of Finland, Downloadable Version 1 dataset. import os import random import glob import shutil PICKS = 80 DEST_DIR = 'data/edus80/' DATA_DIR = 'data/eduskunta/' # Take PICKS samples from each speaker, skipping speakers that don't have enough samples. for d in os.listdir(DATA_DIR): # Each .wav file is accompanied by a .txt file. if len(os.listdir(DATA_DIR + d)) < 2*PICKS: print('skipping ' + d) continue newd = DEST_DIR + os.path.basename(d) os.makedirs(newd) #print('mkdir ' + newd) picks = random.sample(glob.glob(DATA_DIR + d + '/*wav'), k=PICKS) for f in picks: base, _ = os.path.splitext(f) #print('copy ' + DATA_DIR + d + base + ' to ' + newd) shutil.copy(base + '.wav', newd) shutil.copy(base + '.txt', newd)
import time import os import datetime import random import glob #import MySQLdb import csv import os import shutil from pytz import timezone a = ((int)(time.time())) - 3600 PATH = "/root/multisensor" #os.system("cp "+PATH+"/OZW_Log.txt "+PATH+"/Log/OZW_Log_"+str(HOUR)+"_"+str(MIN)) #print 'File Copied' while True: now = datetime.datetime.now(timezone('Asia/Kolkata')) DAY = now.day MONTH = now.month HOUR = now.hour MIN = now.minute source_f = PATH + "/OZW_Log.txt" dest_f = PATH + "/Log/OZW_Log_" + str(DAY) + "_" + str(MONTH) + "_" + str( HOUR) + "_" + str(MIN) shutil.copy(source_f, dest_f) time.sleep(1) os.chdir(PATH) os.system(PATH + "/HomeZwave") time.sleep(1)
def copy_resource(resource): try: os.remove(INVTWEAKS_BIN + resource) except OSError: pass shutil.copy(INVTWEAKS_SRC + resource, INVTWEAKS_BIN + resource)
def run(self, dssatexe, crop_threshold=0.1): """Runs DSSAT simulation.""" log = logging.getLogger(__name__) exe = dssatexe.split("/")[-1] startdt = date(self.startyear, self.startmonth, self.startday) self.readVICSoil() modelpaths = OrderedDict() pwd = os.getcwd() geoms = self._readShapefile() cropfract = self._calcCroplandFract() simstartdt = None for geom in geoms: gid, lon, lat = geom c = np.argmin(np.sqrt((lat - self.lat) ** 2 + (lon - self.lon) ** 2)) # use the soil depths from the nearest VIC pixel to the centroid depths = np.array(self.depths[c]) if cropfract[gid] >= crop_threshold: year, month, day, weather, sm, vlai = self.readVICOutput( gid, depths) vicstartdt = date(year[0], month[0], day[0]) plantdates = self._planting(lat, lon) planting = [p for p in plantdates if p >= startdt and p <= date( self.endyear, self.endmonth, self.endday)] if planting is []: planting = [plantdates[ np.argmax([(t - startdt).days for t in plantdates if (t - startdt).days < 0])]] for pi, pdt in enumerate(planting[:1]): modelpath = os.path.abspath( "{0}/{1}_{2}_{3}".format(self.path, lat, lon, pi)) modelpaths[(gid, pi)] = modelpath os.mkdir(modelpath) os.mkdir(modelpath + "/ENKF_Results") shutil.copy(dssatexe, modelpath) distutils.dir_util.copy_tree( "{0}/dssat".format(rpath.data), modelpath) if pdt > date(pdt.year, 1, 8): simstartdt = pdt - timedelta(7) else: simstartdt = pdt dz, smi = self.writeControlFile(modelpath, sm, depths, simstartdt, gid, self.lat[ c], self.lon[c], pdt, None, None) if simstartdt < vicstartdt: log.error("No input data for DSSAT corresponding to starting date {0}. Need to run VIC for these dates. Exiting...".format( simstartdt.strftime('%Y-%m-%d'))) sys.exit() ti0 = [i for i in range(len(year)) if simstartdt == date( year[i], month[i], day[i])][0] if pi + 1 < len(planting): ti1 = [i for i in range(len(year)) if ( planting[pi + 1] - timedelta(10)) == date(year[i], month[i], day[i])][0] else: ti1 = [i for i in range(len(year)) if (planting[pi] + timedelta(min(180, len(year) - (planting[pi] - date( self.startyear - 1, 12, 31)).days))) == date(year[i], month[i], day[i])][0] # len(year) - 1 self.writeWeatherFiles(modelpath, self.name, year, month, day, weather, self.elev[ c], self.lat[c], self.lon[c], ti0, ti1) self.writeSoilMoist(modelpath, year, month, day, smi, dz) self.writeLAI(modelpath, gid, viclai=vlai) self.writeConfigFile(modelpath, smi.shape[1], simstartdt, date( year[ti1], month[ti1], day[ti1])) log.info("Wrote DSSAT for planting date {0}".format( pdt.strftime("%Y-%m-%d"))) os.chdir(pwd) p = multiprocessing.Pool(multiprocessing.cpu_count()) for modelpath in modelpaths.values(): p.apply_async(_run1, (modelpath, exe, self.assimilate)) p.close() p.join() os.chdir(pwd) if simstartdt: self.save(modelpaths, simstartdt) else: log.warning("No crop areas found!") shutil.rmtree(self.path)
os.makedirs(outputdir) print "{:<20s}{:<s}".format("outputdir", outputdir) # parameter file if namespace.paramfile is None: allparams = default_parameters() paramfile = "preprocess_images.yml" with open(paramfile,'w') as fout: yaml.dump(allparams,fout) else: paramfile = namespace.paramfile.name allparams = yaml.load(namespace.paramfile) dest = os.path.join(outputdir, os.path.basename(paramfile)) if (os.path.realpath(dest) != os.path.realpath(paramfile)): shutil.copy(paramfile,dest) paramfile = dest # check that tiff files exists tiff_files = [] if namespace.images is None: namespace.images=[] for f in namespace.images: test = check_tiff_file(f) if test: tiff_files.append(f) ntiffs = len(tiff_files) if ntiffs == 0: raise ValueError("No tiff detected!") # copy metadata
# # A function that generates files that match a given filename pattern import os import shutil import fnmatch import json # Set config with open('config.json') as json_data_file: data = json.load(json_data_file) src = data["src"] dst = data["dst"] # Uncomment if your directory has an escape character # src = "source/path" # dst = "destination/path" def gen_find(filepat,top): for path, dirlist, filelist in os.walk(top): for name in fnmatch.filter(filelist,filepat): yield os.path.join(path,name) # You can change *.* to *.xslx if you are looking for xlsx files. # THis allows you to drill down what you are looking for. if __name__ == '__main__': filesToMove = gen_find("*.*",src) for name in filesToMove: shutil.copy(name, dst)
def codegen(): """ Fetches a schema from tatorapp.com if one does not exist, then use openapi-generator to generate openapi code from it. """ # Retrieve schema if it does not exist. if not os.path.exists(SCHEMA_FILENAME): response = requests.get("https://www.tatorapp.com/schema") assert response.status_code == 200 with open(SCHEMA_FILENAME, 'wb') as f: f.write(response.content) # Remove any oneOf entries from the schema, as they are not handled # well by openapi generator. with open(SCHEMA_FILENAME, 'r') as f: schema = yaml.load(f) schema = remove_oneof(schema) with open(SCHEMA_FILENAME, 'w') as f: yaml.dump(schema, f, Dumper=NoAliasDumper) # Get the git SHA ID. cmd = ['git', 'rev-parse', 'HEAD'] git_rev = subprocess.check_output(cmd).strip().decode('utf-8') # Generate code using openapi generator docker image. pwd = os.path.dirname(os.path.abspath(__file__)) cmd = [ 'docker', 'run', '-it', '--rm', '-v', f"{pwd}:/pwd", '-v', f"{pwd}/out:/out", 'openapitools/openapi-generator-cli:v4.3.1', 'generate', '-c', f'/pwd/{CONFIG_FILENAME}', '-i', f'/pwd/{SCHEMA_FILENAME}', '-g', 'python', '-o', f'/out/tator-py-{git_rev}', '-t', '/pwd/templates', ] subprocess.run(cmd, check=True) # Remove the schema. os.remove(SCHEMA_FILENAME) # Copy relevant directories into openapi. out_dir = os.path.join(pwd, 'tator/openapi') os.makedirs(out_dir, exist_ok=True) for subpath in ['README.md', 'tator_openapi', 'docs']: src = f'{pwd}/out/tator-py-{git_rev}/{subpath}' dst = os.path.join(out_dir, f'{subpath}') if os.path.isfile(src): shutil.copy(src, dst) else: if os.path.exists(dst): shutil.rmtree(dst) shutil.copytree(src, dst) pwd = os.path.dirname(os.path.abspath(__file__)) # need to delete from within docker cmd = [ 'docker', 'run', '-it', '--rm', '-v', f"{pwd}/out:/out", 'openapitools/openapi-generator-cli:v4.3.1', 'rm', '-fr', '/out/*' ] subprocess.run(cmd, check=True)
print(cmd) subprocess.run(cmd, cwd=directory, check=True) if __name__ == '__main__': args = parse_args() if not args.cab.startswith("http"): local = args.cab with tempfile.TemporaryDirectory(prefix='fwupd') as directory: if local: if not os.path.exists(local): error("%s doesn't exist" % local) if not os.path.isdir(local): shutil.copy(local, directory) else: for root, dirs, files in os.walk(local): for f in files: shutil.copy(os.path.join(root, f), directory) else: download_cab_file(directory, args.cab) if not args.disable_snap_download: download_snap(directory, args.snap_channel) if not args.disable_flatpak_download: download_flatpak(directory) if args.minimum: with open(os.path.join(directory, "minimum"), "w") as wfd:
def converter(mooc_folder, content_folder=None): """ Do converting job. """ # Mooc content location if content_folder is None: content_folder = mooc_folder # copying figures target = mooc_folder / 'generated' figures_path = target / 'html/edx/figures' os.makedirs(figures_path, exist_ok=True) for figure in content_folder.glob('w*/figures/*'): shutil.copy(figure, figures_path) html_folder = target / 'html/edx' # Temporary locations dirpath = Path(tempfile.mkdtemp()) / 'course' skeleton = mooc_folder / 'edx_skeleton' shutil.copytree(skeleton, dirpath) # Loading data from toc chapters = YAML().load(Path(mooc_folder / 'toc.yml').read_text()) # Convert the syllabus and save it in /tabs (skeleton / 'tabs' / 'syllabus.html').write_text( exportHtml.from_filename(content_folder / 'syllabus.ipynb')[0]) course_xml_path = dirpath / 'course.xml' xml_course = ElementTree.fromstring(course_xml_path.read_text()) for chapter_number, chapter in enumerate(chapters): chapter_xml = SubElement(xml_course, 'chapter', attrib=dict( url_name=f"sec_{chapter_number:02}", display_name=chapter['title'], start=START_DATE, )) for section_number, section in enumerate(chapter['sections']): section_url = f"subsec_{chapter_number:02}_{section_number:02}" sequential_xml = SubElement( chapter_xml, 'sequential', attrib={ 'url_name': section_url, 'display_name': section['title'], 'graded': ('true' if chapter_number else 'false'), }, ) if section['title'] == 'Assignments': sequential_xml.attrib['format'] = "Research" elif chapter_number: sequential_xml.attrib['format'] = "Self-check" units = split_into_units(content_folder / (section['location'] + '.ipynb')) for i, unit in enumerate(units): vertical_url = section_url + f'_{i:02}' # add vertical info to sequential_xml vertical = SubElement(sequential_xml, 'vertical', attrib=dict( url_name=vertical_url, display_name=unit.metadata.name, )) unit_output = convert_unit(unit) for (j, out) in enumerate(unit_output): out_url = vertical_url + f"_out_{j:02}" if isinstance(out, str): # adding html subelement SubElement(vertical, 'html', attrib=dict(url_name=out_url, display_name=unit.metadata.name, filename=out_url)) html_path = html_folder / (out_url + '.html') html_path.write_text(out) html_path = dirpath / 'html' / (out_url + '.html') html_path.write_text( IFRAME_TEMPLATE.format(id=out_url, url=url, js=js)) else: # adding video subelement vertical.append(out) if 'url_name' not in out.attrib: out.attrib['url_name'] = out_url course_xml_path.write_text( ElementTree.tostring(xml_course, encoding='unicode')) # Creating tar tar_filepath = target / 'import_to_edx.tar.gz' tar = tarfile.open(name=tar_filepath, mode='w:gz') tar.add(dirpath, arcname='') tar.close() # Cleaning shutil.rmtree(dirpath)
def copy_to_dst_dir(src_path, dst_dir): name_in_obj_rel = os.path.basename(src_path) dst_path = os.path.join(dst_dir, name_in_obj_rel) shutil.copy(src_path, dst_path)
dst = sys.argv[4] if dst[-1] != '/': dst += '/' else: print(\ 'Usage: '\ 'python3 generate_tx_sm_sat_files.py '\ 'count /path/to/seed.tle /path/to/seed.dat /path/to/dst/'\ ) exit() # read the seed TLE file tle_header = '' tle_line_1 = '' tle_line_2 = '' with open(tle, 'r') as infile: tle_header = infile.readline() tle_line_1 = infile.readline() tle_line_2 = infile.readline() # write out .dat files with IDs norad_id = int(tle_line_1[2:7]) for i in range(0,cnt): # calculate ID sat_id = norad_id*10000+i id_str = str(sat_id).zfill(10) # generate file name filename = 'tx-sm-sat-'+id_str+'.dat' # copy seed file with ID in file name shutil.copy(dat,dst+filename)
def copy_files(self, files, source_dir="files/"): for file_ in files: shutil.copy(os.path.join(source_dir, file_), self.working_dir)
def img_removal_by_embed(root_dir, output_dir, pb_path, node_dict, threshold=0.7, type='copy', GPU_ratio=None, dataset_range=None): # ----var img_format = {"png", 'jpg', 'bmp'} batch_size = 64 # ----collect all folders dirs = [obj.path for obj in os.scandir(root_dir) if obj.is_dir()] if len(dirs) == 0: print("No sub-dirs in ", root_dir) else: #----dataset range if dataset_range is not None: dirs = dirs[dataset_range[0]:dataset_range[1]] # ----model init sess, tf_dict = model_restore_from_pb(pb_path, node_dict, GPU_ratio=GPU_ratio) tf_input = tf_dict['input'] tf_phase_train = tf_dict['phase_train'] tf_embeddings = tf_dict['embeddings'] model_shape = [None, 160, 160, 3] feed_dict = {tf_phase_train: False} # ----tf setting for calculating distance with tf.Graph().as_default(): tf_tar = tf.placeholder(dtype=tf.float32, shape=tf_embeddings.shape[-1]) tf_ref = tf.placeholder(dtype=tf.float32, shape=tf_embeddings.shape) tf_dis = tf.sqrt( tf.reduce_sum(tf.square(tf.subtract(tf_ref, tf_tar)), axis=1)) # ----GPU setting config = tf.ConfigProto( log_device_placement=True, allow_soft_placement=True, # 允許當找不到設備時自動轉換成有支援的設備 ) config.gpu_options.allow_growth = True sess_cal = tf.Session(config=config) sess_cal.run(tf.global_variables_initializer()) #----process each folder for dir_path in dirs: paths = [ file.path for file in os.scandir(dir_path) if file.name.split(".")[-1] in img_format ] len_path = len(paths) if len_path == 0: print("No images in ", dir_path) else: # ----create the sub folder in the output folder save_dir = os.path.join(output_dir, dir_path.split("\\")[-1]) if not os.path.exists(save_dir): os.makedirs(save_dir) # ----calculate embeddings ites = math.ceil(len_path / batch_size) embeddings = np.zeros([len_path, tf_embeddings.shape[-1]], dtype=np.float32) for idx in range(ites): num_start = idx * batch_size num_end = np.minimum(num_start + batch_size, len_path) # ----read batch data batch_dim = [num_end - num_start] #[64] batch_dim.extend(model_shape[1:]) #[64,160, 160, 3] batch_data = np.zeros(batch_dim, dtype=np.float32) for idx_path, path in enumerate(paths[num_start:num_end]): img = cv2.imread(path) if img is None: print("Read failed:", path) else: img = cv2.resize(img, (model_shape[2], model_shape[1])) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) batch_data[idx_path] = img batch_data /= 255 # norm feed_dict[tf_input] = batch_data embeddings[num_start:num_end] = sess.run( tf_embeddings, feed_dict=feed_dict) # ----calculate ave distance of each image feed_dict_2 = {tf_ref: embeddings} ave_dis = np.zeros(embeddings.shape[0], dtype=np.float32) for idx, embedding in enumerate(embeddings): feed_dict_2[tf_tar] = embedding distance = sess_cal.run(tf_dis, feed_dict=feed_dict_2) ave_dis[idx] = np.sum(distance) / (embeddings.shape[0] - 1) # ----remove or copy images for idx, path in enumerate(paths): if ave_dis[idx] > threshold: print("path:{}, ave_distance:{}".format( path, ave_dis[idx])) if type == "copy": save_path = os.path.join(save_dir, path.split("\\")[-1]) shutil.copy(path, save_path) elif type == "move": save_path = os.path.join(save_dir, path.split("\\")[-1]) shutil.move(path, save_path)
##### dumping plots #################################################################### if args.doPlot: fileName = sampleToFit.nominalFit fitType = 'nominalFit' if args.altSig : fileName = sampleToFit.altSigFit fitType = 'altSigFit' if args.altBkg : fileName = sampleToFit.altBkgFit fitType = 'altBkgFit' plottingDir = '%s/plots/%s/%s' % (outputDirectory,sampleToFit.name,fitType) if not os.path.exists( plottingDir ): os.makedirs( plottingDir ) shutil.copy('etc/inputs/index.php.listPlots','%s/index.php' % plottingDir) for ib in range(len(tnpBins['bins'])): if (args.binNumber >= 0 and ib == args.binNumber) or args.binNumber < 0: tnpRoot.histPlotter( fileName, tnpBins['bins'][ib], plottingDir ) print ' ===> Plots saved in <=======' # print 'localhost/%s/' % plottingDir #################################################################### ##### dumping egamma txt file #################################################################### if args.sumUp: sampleToFit.dump() info = {
elif testCase == 'REST': srcpath = 'C:/_RT/rtData/rtQA_REST/RS_Run_1_src' dstpath = 'C:/_RT/rtData/rtQA_REST/RS_Run_1' pause_in_sec = 1.97 elif testCase == 'TASK': srcpath = 'C:/_RT/rtData/rtQA_TASK/TASK_Run_1_src' dstpath = 'C:/_RT/rtData/rtQA_TASK/TASK_Run_1' pause_in_sec = 1.97 if delete_files: files = glob.glob(dstpath + '/*') for f in files: os.remove(f) if fns is None: filelist = os.listdir(srcpath) else: filelist = [] for fn in fns: fname = "{0}{1:03d}.dcm".format(mask, fn) filelist.append(fname) for filename in filelist: src = os.path.join(srcpath, filename) if os.path.isfile(src): dst = os.path.join(dstpath, filename) shutil.copy(src, dst) print(filename) sleep(pause_in_sec) # seconds