def formatFailure(self, test, err):
     '''rename the testcase directory to denote the failure'''
     try:
         os.renames(self.screenshots_directory(test), self.screenshots_directory(test, failed=True))
     except OSError as e:
         # happens e.g. when a test case failed without taking any screenshots
         log.warning("got '%s' formating failure: '%s'" % (e, err))
Esempio n. 2
0
 def build(self, platform='windows'):
     log('Building from %s' % self.build_root)
     # prepare "dist" directory
     log('Cleaning old directories...')
     if os.path.isdir(os.path.join(red5_root, 'dist.java5')):
         remove_tree(os.path.join(red5_root, 'dist.java5'))
     if os.path.isdir(os.path.join(red5_root, 'dist.java6')):
         remove_tree(os.path.join(red5_root, 'dist.java6'))
     
     log('Compiling Java 1.5 version...')
     self.compile(self.ant_cmd, os.path.join(red5_root, 'build.xml'), '1.5', 'dist-installer')
     os.renames(os.path.join(red5_root, '.', 'dist'), os.path.join(red5_root, '.', 'dist.java5'))
     
     log('Compiling Java 1.6 version...')
     os.environ['JAVACMD'] = os.path.join(JAVA6_HOME, 'bin', 'java.exe')
     self.compile(self.ant_cmd, os.path.join(red5_root, 'build.xml'), '1.6', 'dist-installer')
     os.renames(os.path.join(red5_root, '.', 'dist'), os.path.join(red5_root, '.', 'dist.java6'))
             
     # build installer
     script = os.path.join(self.build_root, 'red5.nsi')
     cmd = NSIS_CMD
     if ' ' in cmd and not cmd[:1] == '"':
         cmd = '"' + cmd + '"'
     log('Compiling installer, this may take some time...')
     os.system(cmd, script)
     log('Installer written')
Esempio n. 3
0
def main():
	parser = OptionParser( version="%prog 1.0", usage='Usage: %prog [options]' )

	parser.set_defaults( **{
		'verbose': False,
	} )

	parser.add_option( '-v', '--verbose', action='store_true' )

	opts, args = parser.parse_args()

	pat_dot = re.compile( r'\s+\.', re.UNICODE )

	for fn in os.listdir( '.' ):

		tags, rest = path.extract_tags( fn )

		dst = pat_dot.sub( '.', ' '.join( rest ) )

		p = ''.join( map( lambda x: '[' + x + ']', tags ) )

		dst = p and p + ' ' + dst or dst

		if os.path.exists( dst ):
			print( "Existed, skiped:", dst )
		else:
			try:
				os.renames( fn , dst )
			except OSError:
				print( "Failed:", dst )
Esempio n. 4
0
def delFile():
	
	# dictionary to store the hashes and file names from the test files
	testHash = dict()
	# open the file chooser dialogue
	fileName = askopenfilename()
	# reset the hasher to ensure matches for identical files
	hasher = hashlib.md5()

	# get the file, hash it and store both in dictionary
	with open(fileName, 'rb') as afile:
		buf = afile.read()
		hasher.update(buf)
		testHash[fileName]=hasher.hexdigest()
		afile.close()

	if (checkFile(testHash)):
		#remove both the the file name/path and the resulting hash from the text file
		result = tkMessageBox.askquestion("Delete", "Are You Sure You Want\nTo Remove This File?", icon='warning')
		if result == 'yes':
			with open("workfile") as f:
			    with open("temp", "w") as f1:
				for line in f:
					for name, sig in testHash.items():
					    if not sig in line:
						f1.write(line)			
			tkMessageBox.showinfo("File Delete", "File Deleted")
			try:
				os.remove("workfile")
				os.renames("temp", "workfile")
			except OSError, e:  ## if failed, report it back to the user ##
				print ("Error: %s - %s." % (e.filename,e.strerror))
Esempio n. 5
0
    def fixlinks(self):
        '''Fix broken links.'''
        if self.tofix:
            print('\nFixing links.')
            for sourcepath, linkpath in self.tofix.iteritems():
                print('FIX %s -> %s' % (linkpath, sourcepath))

                # Instantiate link name.
                linkname = os.path.basename(linkpath)

                # Standardize relative path.
                relative_path = self.standardize_path(sourcepath)

                # Join relative link with unique name.
                relative_link = os.path.join(os.path.dirname(relative_path), linkname)
                # Final link path.
                final_link = os.path.join(self.linked_media, relative_link)

                # Rename file cleaning empty folders.
                os.renames(linkpath, final_link)
                # Remove file to create new updated link.
                os.remove(final_link)
                # Create updated symbolic link.
                os.symlink(sourcepath, final_link)

            if self.check_link(final_link):
                # Needed for the comparison in the add_new function.
                self.healthy_links.append(sourcepath)
            else:
                print('Problems in the new link: %s' % final_link)
        else:
            print('\nNo link to fix...')
Esempio n. 6
0
 def rename_dir(self, src, dstname):
     # get paths
     absolute_src = self.get_dir(src)
     
     if absolute_src:
         absolute_src = absolute_src[0:-1]               # strip trailing slash
         folder, name = os.path.split(absolute_src)      # find folder name
         absolute_dst = folder + '/' + dstname
     
         # move
         os.rename(absolute_src, absolute_dst)
         
         # move cache
         old_cache = self.get_dir(src, cache=True)
         old_cache = old_cache[0:-1]
         folder, name = os.path.split(old_cache)
         new_cache = folder + '/' + dstname
         os.renames(old_cache, new_cache)
         
         # normalize dst folder
         if src[-1:] == '/':
             src = src[0:-1]
         relpath, name = os.path.split(src)
         return relpath + '/' + dstname 
         
     return False
def exportBookmarkstoSailfishBrowser(bookmarks, sPath):
    bookmark_list = []

    home = os.environ['HOME']
    path = '/.local/share/'
    browser = 'org.sailfishos/sailfish-browser/'
    timestamp = str(datetime.datetime.now().timestamp())
    backup = sPath + '/bookmarks.json.bak' + timestamp

    try:
        bookmark_obj = json.loads(bookmarks)
        with open(home + path + browser + 'bookmarks.json', 'r') as f:
            exist_bookmarks = json.load(f)
        exist_urls = []
        for ebm in exist_bookmarks:
            bookmark_list.append(ebm)
            exist_urls.append(ebm['url'])

        for bm in bookmark_obj:
            if bm['url'] not in exist_urls:
                bookmark = {
                    'favicon': 'icon-launcher-bookmark',
                    'hasTouchIcon': False,
                    'title': bm['title'],
                    'url': bm['url']
                }
                bookmark_list.append(bookmark)

        os.renames(home + path + browser + 'bookmarks.json', backup)
        with open(home + path + browser + 'bookmarks.json', 'w') as f:
            json.dump(bookmark_list, f)
    except:
        pyotherside.send(traceback.print_exc())
Esempio n. 8
0
	def save_schedule(self):
		rows=self.schedule.rowCount()
		path=os.path.join(self.userconfdir, 'schedule.csv')
		temppath=''.join(self.userconfdir, 'schedule_modified.csv')
		f=open(temppath, "w")
		planner = csv.writer(f)
		planner.writerow(["Enabled","Date","Hour","Event Type","Text"])
		for i in range(rows):
			if self.schedule.item(i,0).checkState()==QtCore.Qt.Checked:
				first_column="True"
			else:
				first_column="False"
			second_column=self.schedule.item(i,1).data(QtCore.Qt.UserRole) #need format like this: %m/%d/%Y
			if isinstance(second_column,QtCore.QDate):
				#print second_column
				second_column=second_column.toString("MM/dd/yyyy")
			else:
				second_column=self.schedule.item(i,1).data(QtCore.Qt.EditRole)
			third_column=self.schedule.item(i,2).data(QtCore.Qt.UserRole) #need format like this: %H:%M

			if isinstance(third_column,QtCore.QTime):
				third_column=third_column.toString("HH:mm")
			else:
				third_column=self.schedule.item(i,2).data(QtCore.Qt.EditRole)
			fourth_column=self.schedule.item(i,3).data(QtCore.Qt.EditRole)
			fifth_column=self.schedule.item(i,4).data(QtCore.Qt.EditRole)
			planner.writerow([first_column,second_column,third_column,fourth_column,fifth_column])
		f.close()
		os.remove(path)
		os.renames(temppath, path)
Esempio n. 9
0
 def move_dir(self, src, dst):
     # get paths
     absolute_src = self.get_dir(src)
     absolute_dst = self.get_dir(dst)
     
     if absolute_src and absolute_dst and absolute_src != absolute_dst:
         absolute_src = absolute_src[0:-1]               # strip trailing slash
         _discard, name = os.path.split(absolute_src)    # find folder name
         absolute_dst = absolute_dst + name
     
         # move
         os.rename(absolute_src, absolute_dst)
         
         # move cache
         old_cache = self.get_dir(src, cache=True)
         old_cache = old_cache[0:-1]
         new_cache = self.get_dir(dst, cache=True)
         new_cache = new_cache + name
         os.renames(old_cache, new_cache)
         
         # normalize dst folder
         if dst[-1:] != '/':
             dst = dst + '/'
             
         return dst + name 
         
     return False
Esempio n. 10
0
def share_files(srcdir, dstdir, interpreter, options):
    """Try to move as many files from srcdir to dstdir as possible."""
    for i in os.listdir(srcdir):
        fpath1 = join(srcdir, i)
        if not options.no_ext_rename and splitext(i)[-1] == '.so':
            # try to rename extension here as well (in :meth:`scan` info about
            # Python version is gone)
            version = interpreter.parse_public_dir(srcdir)
            # if version is True it means it's unversioned dist-packages dir
            if version and version is not True:
                # note that if ver is empty, default Python version will be used
                fpath1_orig = fpath1
                new_name = interpreter.check_extname(i, version)
                if new_name:
                    fpath1 = join(srcdir, new_name)
                    if exists(fpath1):
                        log.warn('destination file exist, '
                                 'cannot rename %s to %s', fpath1_orig, fpath1)
                    else:
                        log.info('renaming %s to %s', fpath1_orig, fpath1)
                        os.renames(fpath1_orig, fpath1)
                        i = new_name
        fpath2 = join(dstdir, i)
        if not isdir(fpath1) and not exists(fpath2):
            # do not rename directories here - all .so files have to be renamed first
            os.renames(fpath1, fpath2)
            continue
        if isdir(fpath1):
            share_files(fpath1, fpath2, interpreter, options)
        elif cmpfile(fpath1, fpath2, shallow=False):
            os.remove(fpath1)
        # XXX: check symlinks

    if exists(srcdir) and not os.listdir(srcdir):
        os.rmdir(srcdir)
Esempio n. 11
0
File: CSYD.py Progetto: caijun/PFD
def downloadFile(url, localfile):
    global previous
    # only real explorer can deal with the alert:sorry,please login first
    # chrome browser
    chromeOptions = webdriver.ChromeOptions()
    prefs = {"download.default_directory" : os.getcwd()}
    chromeOptions.add_experimental_option("prefs", prefs)
    # add --test-type argument to disable the "unsupported flag" prompt 
    chromeOptions.add_argument("--test-type")
    browser = webdriver.Chrome(chrome_options = chromeOptions)
    browser.set_page_load_timeout(20)
    browser.get(url)
    try:
        WebDriverWait(browser, 3).until(EC.alert_is_present())
 
        alert = browser.switch_to_alert()
        alert.accept()
#         print("alert accepted")
    except TimeoutException:
        print("no alert")
         
    # login in cnki
    browser.find_element_by_id("username").send_keys("thlib")
    browser.find_element_by_id("password").send_keys("thlib")
    browser.find_element_by_id("ImageButton1").click()
    
    if waituntil(5):
        newest = max(glob.iglob("*.[Xx][Ll][Ss]"), key = os.path.getctime)
        print("download: " + newest)
        os.renames(newest, localfile)
        previous = localfile
        print("======")

    # browser.close()
    browser.quit()
 def commit(self, target_prefix):
     if self._dry_run:
         return
     # Start a database transaction to add the files.
     txn = self._db.begin_add(self._volume, self._import_timestamp)
     # Write each new file into the database.
     for au_file in self._all_au_files:
         txn.add(au_file)
     ufid_prefix = ufid.ufid_prefix(self._volume, self._import_timestamp)
     # Strip off trailing "/"
     if ufid_prefix.endswith("/"):
         ufid_prefix = ufid_prefix[:-1]
     tmp_dir = os.path.join(self._tmp_prefix, ufid_prefix)
     real_dir = os.path.join(target_prefix, ufid_prefix)
     cprint("*** Committing %d albums / %d bytes" % (
         self.num_albums, self.total_size_in_bytes))
     cprint("*** tmp_dir=%s" % tmp_dir)
     cprint("*** real_dir=%s" % real_dir)
     sys.stdout.flush()
     os.renames(tmp_dir, real_dir)
     txn.commit()
     # Write out a list of source files that were just committed.
     out = open(os.path.join(real_dir, "_source_files"), "w")
     for path in sorted(af.path for af in self._all_au_files):
         out.write(path)
         out.write("\n")
     out.close()
Esempio n. 13
0
def cleanup_optionals(mod):
    print("")
    try:
        for dir_name in mod:
            #userconfig requires special handling since it is not a PBO source folder.
            if (dir_name == "userconfig"):
                destination = os.path.join(work_drive,dir_name)
            else:
                destination = os.path.join(module_root,dir_name)

            print("Cleaning {}".format(destination))

            try:
                file_name = "meu_{}.pbo".format(dir_name)
                src_file_path = os.path.join(release_dir, "@meu","addons",file_name)
                dst_file_path = os.path.join(release_dir, "@meu","optionals",file_name)

                sigFile_name = file_name +"."+ key_name + ".bisign"
                src_sig_path = os.path.join(release_dir, "@meu","addons",sigFile_name)
                dst_sig_path = os.path.join(release_dir, "@meu","optionals",sigFile_name)

                if (os.path.isfile(src_file_path)):
                    #print("Preserving {}".format(file_name))
                    os.renames(src_file_path,dst_file_path)
                if (os.path.isfile(src_sig_path)):
                    #print("Preserving {}".format(sigFile_name))
                    os.renames(src_sig_path,dst_sig_path)
            except FileExistsError:
                print_error(file_name + " already exists")
                continue
            shutil.rmtree(destination)

    except:
        print_error("Cleaning Optionals Failed")
        raise
Esempio n. 14
0
 def move(self, oldName, newName):
     destination = os.path.join(self.distDir, self.packageDir)        
     oldName = os.path.abspath(os.path.join(destination, oldName))
     newName = os.path.abspath(os.path.join(destination, newName))
     if not os.path.exists(oldName):
         raise FilesNotFound(oldName, self.distDir)
     os.renames(oldName, newName)
Esempio n. 15
0
    def plotHEO(self):
        """Plot predicted HEO satellites"""
        pp2.getPlist(tmpath=self.tmpath)            
        with open(os.path.join(self.tmpath, 'plist.out')) as f:
            heo_list = [line for line in f if line.split()[1][:2].upper() in
                        ['GL', 'CO', 'GI', 'ET', 'GA', 'GP']]
        utcnow = dt.datetime.utcnow()
        epoch = utcnow.second + 60 * (utcnow.minute + 60 * utcnow.hour)
        gazs, gels = [], []
        self.npass = []
        self.sname = []
        for line in heo_list:
            npass = line[1:4]
            self.npass.append(npass)
            self.sname.append(line[5:15])
            function = os.path.join(self.tmpath, 'function.' + npass)
            # Retrieve function unless we have it already and it's not too old
            if os.path.exists(function):
                if (time.time() - os.path.getctime(function) > 5 * 3600):
                    sp.ftpit('function.' + npass, path='pred')
                    os.renames('function.' + npass, function)
            else:
                    sp.ftpit('function.' + npass, path='pred')
                    os.renames('function.' + npass, function)	
            gazelr = fp.azelsat(npass, epoch, fun_path=self.tmpath)
            gazs.append(gazelr[0, 0])
            gels.append(90 - gazelr[0, 1] * 180 / np.pi)

        self.heos_line[0].set_data(gazs, gels)
Esempio n. 16
0
    def replaceWith(self, path):
        app_dir = Env.get('app_dir')

        # Get list of files we want to overwrite
        self.deletePyc(only_excess = False)
        existing_files = []
        for root, subfiles, filenames in os.walk(app_dir):
            for filename in filenames:
                existing_files.append(os.path.join(root, filename))

        for root, subfiles, filenames in os.walk(path):
            for filename in filenames:
                fromfile = os.path.join(root, filename)
                tofile = os.path.join(app_dir, fromfile.replace(path + os.path.sep, ''))

                if not Env.get('dev'):
                    try:
                        os.remove(tofile)
                    except:
                        pass

                    try:
                        os.renames(fromfile, tofile)
                        try:
                            existing_files.remove(tofile)
                        except ValueError:
                            pass
                    except Exception, e:
                        log.error('Failed overwriting file: %s' % e)
Esempio n. 17
0
def update(job):
    paths = []
    
    main_file = job.files[0]
    
    job_full_path = main_file.filepath

        
    path, ext = os.path.splitext(job_full_path)
    
    new_path = path + ".remap" + ext 
    
    # Disable for now. Partial repath should work anyway
    #all = main_file.filepath != main_file.original_path
    all = False 
    
    for rfile in job.files[1:]:
        if all or rfile.original_path != rfile.filepath:
            paths.append(rfile.original_path)
            paths.append(rfile.filepath)
    
    # Only update if needed
    if paths:        
        process = subprocess.Popen([BLENDER_PATH, "-b", "-noaudio", job_full_path, "-P", __file__, "--", new_path] + paths, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
        process.wait()
        
        os.renames(job_full_path, job_full_path + ".bak")
        os.renames(new_path, job_full_path)
Esempio n. 18
0
def createArchive(archive_name, src_dir_path):

    archive_name        = os.path.realpath( archive_name )
    src_dir_path        = os.path.realpath( src_dir_path )
    files_to_pack       = []
    archive_name_tmp    = os.path.join(
        os.path.dirname( archive_name ),
        'tmp_{}'.format( os.path.basename( archive_name )),
    )

    for name in os.listdir( src_dir_path ):

        current_path    = os.path.join( src_dir_path, name )
        real_path       = os.path.realpath( current_path )
        rel_path        = os.path.relpath( current_path, src_dir_path )

        files_to_pack.append( ( real_path, rel_path, ))

    if not files_to_pack:
        warning( 'Error: no files to pack, directory [{}] is empty'.format( src_dir_path ) )
        sys.exit( CMAKEPM_ERRORCODE_NO_FILES_TO_PACK )



    with contextlib.closing( tarfile.TarFile.open( archive_name_tmp, 'w:gz' )) as tf:

        for real_path, rel_path in files_to_pack:

            tf.add( real_path, arcname=rel_path )

    os.renames( archive_name_tmp, archive_name )
def render_and_rename(renderpath,newpath,rendername,newname):
 RPR_Main_OnCommand(41823,0) #add project to render queue, using the most recent render settings
 RPR_Main_OnCommand(41207,0) #render all queued renders
 if new not in os.listdir(newpat):
  os.renames(renderpath + os.sep + rendername,newpath + os.sep + newname)
 if new in os.listdir(newpat):
  RPR_ShowConsoleMsg(new+" is generated in "+newpat+"\n\n") 
 def set_output_file(self, produced_file):
     digest = self.get_hash(produced_file)    
     output_filename = self.get_filename(digest)
     os.chdir(self.workspace)
     os.renames(produced_file, output_filename)
     self.store_metadata(digest)
     return digest, os.path.join(self.workspace, output_filename)
Esempio n. 21
0
def execBench(cmd, repeats, out_dir,
	      client_cmd="", client_terminate_server=False,
	      init_env_cmd=""):
	mkdir_p(out_dir.replace('<port>',''))
	for i in range(int(repeats)):
		print cmd
		sys.stderr.write("        PROGRESS: %5d/%d\r" % (i+1, int(repeats)))
		with open('%s/output.%d' % (out_dir, i), 'w', 102400) as log_file:
			if init_env_cmd:
				os.system(init_env_cmd)
			proc = subprocess.Popen(cmd, stdout=log_file, stderr=subprocess.STDOUT, shell=True, executable=bash_path, bufsize=102400)
			if client_cmd:
				time.sleep(1)
				with open('%s/client.%d' % (out_dir, i), 'w', 102400) as client_log_file:
					client_proc = subprocess.Popen(client_cmd, stdout=client_log_file, stderr=subprocess.STDOUT, shell=True, executable=bash_path, bufsize=102400)
					client_proc.wait()
				if client_terminate_server:
					os.killpg(proc.pid, signal.SIGTERM)
				proc.wait()
				time.sleep(2)
			else:
				try:
					proc.wait()
				except KeyboardInterrupt as k:
					try:
						os.killpg(proc.pid, signal.SIGTERM)
					except:
						pass
					raise k
		try:
			os.renames('out', '%s/out.%d' % (out_dir, i))
		except OSError:
			pass
Esempio n. 22
0
    def title(self, newtitle):
        if self.readonly:
            raise ReadonlyException

        oldtitle = self.title
        oldpath = self.path
        oldsubpath = self.subpath

        if oldtitle == newtitle:
            return

        # Проверка на дубликат страниц, а также на то, что в заголовке страницы
        # может меняться только регистр букв
        if not self.canRename(newtitle):
            raise DuplicateTitle

        newpath = os.path.join(os.path.dirname(oldpath), newtitle)
        os.renames(oldpath, newpath)
        self._title = newtitle

        WikiPage._renamePaths(self, newpath)
        self.root.registry.rename_page_sections(oldsubpath, self.subpath)

        self.root.onPageRename(self, oldsubpath)
        self.root.onPageUpdate(self, change=events.PAGE_UPDATE_TITLE)
    def download_stable(self):
        if not os.path.exists(CHARMS_DIR):
            os.makedirs(CHARMS_DIR)

        r = requests.get(CHARM_STABLE_URL, verify=True)
        tarball_name = os.path.join(CHARMS_DIR, 'stable.tar.gz')
        with open(tarball_name, mode='wb') as tarball:
            tarball.write(r.content)

        try:
            subprocess.check_output(['tar', '-C', CHARMS_DIR, '-zxf',
                                     tarball_name], stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            log.warning("error untarring: rc={} out={}".format(e.returncode,
                                                               e.output))
            raise e

        # filename includes commit hash at end:
        srcpat = os.path.join(CHARMS_DIR,
                              'Ubuntu-Solutions-Engineering-'
                              'glance-simplestreams-sync-charm-*')
        srcs = glob.glob(srcpat)
        if len(srcs) != 1:
            log.warning("error finding downloaded stable charm."
                        " got {}".format(srcs))
            raise Exception("Could not find downloaded stable charm.")

        src = srcs[0]
        dest = os.path.join(CHARMS_DIR, CURRENT_DISTRO,
                            'glance-simplestreams-sync')
        if os.path.exists(dest):
            shutil.rmtree(dest)
        os.renames(src, dest)
Esempio n. 24
0
def fix_file_name(uid, fname, message, ctime):
    from dobackup import get_filename_by_date
    archive_path = get_filename_by_date(uid, ctime)
    if not fname.endswith(archive_path):
        os.renames(fname, archive_path)
        return archive_path
    return fname
Esempio n. 25
0
 def _remove_extra_herschel_directory(self, file_and_directory_name,
                                      directory_path):
     full_directory_path = os.path.abspath(directory_path)
     file_name = file_and_directory_name[file_and_directory_name.index("/") + 1:]
     os.renames(os.path.join(full_directory_path, file_and_directory_name),
                os.path.join(full_directory_path, file_name))
     return file_name
Esempio n. 26
0
 def rename_jda_filename(self,src_o):
     jda_dt_str = src_o.dt.strftime('%Y%m%d')
     jda_fn = jda_fn_prefix + src_o.site + '_' + jda_dt_str + jda_fn_suffix
     jda_f = os.path.join(arc_base_p,jda_fn)
     os.renames(src_o.file,jda_f)
     src_o.file = jda_f
     src_o.filepath,src_o.filename = os.path.split(jda_f)
Esempio n. 27
0
    def write(self,content):
        
        filePath=self.getPath()
        
        if filePath==None or len(filePath)<2:
            print "Robustness.......[",filePath,"] Invalid filepath"
            return

        isFileRenamed=False
        tempFileName=filePath+".part"

        if os.path.exists(tempFileName):
            if os.path.exists(filePath):
                os.remove(filePath)
            os.renames(tempFileName, filePath)
        
        if os.path.exists(filePath):
            try:
                os.rename(filePath,tempFileName)                
                fc=open(filePath,"w")
                fc.write(str(content))                
                fc.close()
                isFileRenamed=True

            except IOError,strErr:
                self.createLog("IO ERROR["+str(strErr.code)+"] reason:"+str(strErr.reason) + "\n")
            except:
Esempio n. 28
0
def manifest_mover(debug_json_file, manifest_json_file):
    if os.path.isfile(debug_json_file):
        add_all_urls(debug_json_file)
        os.remove(manifest_json_file)
        os.renames(debug_json_file, manifest_json_file)
    else:
        add_all_urls(manifest_json_file)
Esempio n. 29
0
def rename_using(path,ftype,pre="",post=""):
	"""
	Rename files according to creation date in meta-data.
	For SLRs which can click multiple pictures per second,
	an extra index is suffixed to prevent overwriting
	Takes path of the folder, the specific filetype to be renamed
	and optional prefix and postfixes
	"""
	filelist = subprocess.Popen("find " + path + " -name \"*."+ftype+"\"" , shell=True,stdout=subprocess.PIPE, cwd=None).stdout.read().strip('\n').split('\n')
	j=1
	for i in filelist:
		try:
			statinfo = os.stat(i)
		except OSError,msg:
			print msg
			show_help()
			sys.exit(2)
		# Added prefix here, tricky
		newname= i.rsplit('/',1)[0] +'/'+ pre + time.strftime("%Y-%m-%d %H.%M.%S", time.localtime(statinfo.st_mtime))
		# Added postfix here
		tempnewname = newname + post +  ".%s"%(ftype)
		print tempnewname
		if(not os.path.exists(tempnewname)):
			os.renames(i,tempnewname)
			j=1
		else:
			tempnewname = newname + str(j) + post + ".%s"%(ftype)
			os.renames(i,tempnewname)
			j+=1
Esempio n. 30
0
    def remove(self):
        """
        Удалить страницу
        """
        if self.readonly:
            raise ReadonlyException

        oldpath = self.path
        tempname = self._getTempName(oldpath)
        oldSelectedPage = self.root.selectedPage

        try:
            os.renames(oldpath, tempname)
            shutil.rmtree(tempname)
        except (OSError, shutil.Error):
            raise IOError

        self.root.onStartTreeUpdate(self.root)
        self._removePageFromTree(self)

        # Если выбранная страница была удалена
        if (oldSelectedPage is not None and
                (oldSelectedPage == self or self.isChild(oldSelectedPage))):
            # Новая выбранная страница взамен старой
            newselpage = oldSelectedPage
            while newselpage.parent is not None and newselpage.isRemoved:
                newselpage = newselpage.parent

            # Если попали в корень дерева
            if newselpage.parent is None:
                newselpage = None

            self.root.selectedPage = newselpage

        self.root.onEndTreeUpdate(self.root)
def rename_dir(old, new):
    try:
        os.renames(old, new)
    except IOError as ex:
        raise IOError("Cannot rename the folder '{}' to '{}'".format(old, new))
Esempio n. 32
0
    def update(self):
        """
        Downloads the latest source tarball from github and installs it over the existing version.
        """

        tar_download_url = 'https://github.com/mozvip/Sick-Beard/tarball/'+version.SICKBEARD_VERSION
        sb_update_dir = os.path.join(sickbeard.PROG_DIR, 'sb-update')
        version_path = os.path.join(sickbeard.PROG_DIR, 'version.txt')

        # retrieve file
        try:
            logger.log(u"Downloading update from "+tar_download_url)
            data = urllib2.urlopen(tar_download_url)
        except (IOError, URLError):
            logger.log(u"Unable to retrieve new version from "+tar_download_url+", can't update", logger.ERROR)
            return False

        download_name = data.geturl().split('/')[-1].split('?')[0]

        tar_download_path = os.path.join(sickbeard.PROG_DIR, download_name)

        # save to disk
        f = open(tar_download_path, 'wb')
        f.write(data.read())
        f.close()

        # extract to temp folder
        logger.log(u"Extracting file "+tar_download_path)
        tar = tarfile.open(tar_download_path)
        tar.extractall(sb_update_dir)
        tar.close()

        # delete .tar.gz
        logger.log(u"Deleting file "+tar_download_path)
        os.remove(tar_download_path)

        # find update dir name
        update_dir_contents = [x for x in os.listdir(sb_update_dir) if os.path.isdir(os.path.join(sb_update_dir, x))]
        if len(update_dir_contents) != 1:
            logger.log(u"Invalid update data, update failed: "+str(update_dir_contents), logger.ERROR)
            return False
        content_dir = os.path.join(sb_update_dir, update_dir_contents[0])

        # walk temp folder and move files to main folder
        for dirname, dirnames, filenames in os.walk(content_dir): #@UnusedVariable
            dirname = dirname[len(content_dir)+1:]
            for curfile in filenames:
                old_path = os.path.join(content_dir, dirname, curfile)
                new_path = os.path.join(sickbeard.PROG_DIR, dirname, curfile)

                if os.path.isfile(new_path):
                    os.remove(new_path)
                os.renames(old_path, new_path)

        # update version.txt with commit hash
        try:
            ver_file = open(version_path, 'w')
            ver_file.write(self._newest_commit_hash)
            ver_file.close()
        except IOError, e:
            logger.log(u"Unable to write version file, update not complete: "+ex(e), logger.ERROR)
            return False
Esempio n. 33
0
 def rename(self, new, parents=False):
     if parents:
         os.renames(self, new)
     else:
         os.rename(self, new)
Esempio n. 34
0
 def renames(self, new):
     os.renames(self, new)
Esempio n. 35
0
    def update(self):
        """
        Downloads the latest source tarball from server and installs it over the existing version.
        """

        tar_download_url = 'https://git.sickrage.ca/SiCKRAGE/sickrage/repository/archive.tar?ref=master'

        try:
            # prepare the update dir
            sr_update_dir = os.path.join(sickrage.PROG_DIR, 'sr-update')

            if os.path.isdir(sr_update_dir):
                sickrage.app.log.info("Clearing out update folder " + sr_update_dir + " before extracting")
                shutil.rmtree(sr_update_dir)

            sickrage.app.log.info("Creating update folder " + sr_update_dir + " before extracting")
            try:
                os.makedirs(sr_update_dir)
            except OSError as e:
                sickrage.app.log.warning("Unable to create update folder " + sr_update_dir + ': ' + str(e))
                return False

            # retrieve file
            sickrage.app.log.info("Downloading update from " + repr(tar_download_url))
            tar_download_path = os.path.join(sr_update_dir, 'sr-update.tar')
            WebSession().download(tar_download_url, tar_download_path)

            if not os.path.isfile(tar_download_path):
                sickrage.app.log.warning(
                    "Unable to retrieve new version from " + tar_download_url + ", can't update")
                return False

            if not tarfile.is_tarfile(tar_download_path):
                sickrage.app.log.warning(
                    "Retrieved version from " + tar_download_url + " is corrupt, can't update")
                return False

            # extract to sr-update dir
            sickrage.app.log.info("Extracting file " + tar_download_path)
            tar = tarfile.open(tar_download_path)
            tar.extractall(sr_update_dir)
            tar.close()

            # delete .tar.gz
            sickrage.app.log.info("Deleting file " + tar_download_path)
            os.remove(tar_download_path)

            # find update dir name
            update_dir_contents = [x for x in os.listdir(sr_update_dir) if
                                   os.path.isdir(os.path.join(sr_update_dir, x))]
            if len(update_dir_contents) != 1:
                sickrage.app.log.warning("Invalid update data, update failed: " + str(update_dir_contents))
                return False
            content_dir = os.path.join(sr_update_dir, update_dir_contents[0])

            # walk temp folder and move files to main folder
            sickrage.app.log.info("Moving files from " + content_dir + " to " + sickrage.PROG_DIR)
            for dirname, __, filenames in os.walk(content_dir):
                dirname = dirname[len(content_dir) + 1:]
                for curfile in filenames:
                    old_path = os.path.join(content_dir, dirname, curfile)
                    new_path = os.path.join(sickrage.PROG_DIR, dirname, curfile)

                    # Avoid DLL access problem on WIN32/64
                    # These files needing to be updated manually
                    # or find a way to kill the access from memory
                    if curfile in ('unrar.dll', 'unrar64.dll'):
                        try:
                            os.chmod(new_path, stat.S_IWRITE)
                            os.remove(new_path)
                            os.renames(old_path, new_path)
                        except Exception as e:
                            sickrage.app.log.debug("Unable to update " + new_path + ': ' + str(e))
                            os.remove(old_path)  # Trash the updated file without moving in new path
                        continue

                    if os.path.isfile(new_path):
                        os.remove(new_path)
                    os.renames(old_path, new_path)

        except Exception as e:
            sickrage.app.log.error("Error while trying to update: {}".format(e))
            return False

        # Notify update successful
        Notifiers.mass_notify_version_update(self.get_newest_version)

        # install requirements
        self.install_requirements()

        return True
Esempio n. 36
0
def ablog_deploy(
    website,
    message=None,
    github_pages=None,
    push_quietly=False,
    push_force=False,
    github_token=None,
    github_is_http=True,
    github_url=None,
    repodir=None,
    **kwargs,
):

    confdir = find_confdir()
    conf = read_conf(confdir)

    github_pages = github_pages or getattr(conf, "github_pages", None)

    github_url = github_url or getattr(conf, "github_url", None)
    github_url += ":"

    website = website or os.path.join(confdir, getattr(conf, "ablog_builddir", "_website"))

    tomove = glob.glob(os.path.join(website, "*"))
    if not tomove:
        print("Nothing to deploy, build first.")
        return

    if github_pages:

        if repodir is None:
            repodir = os.path.join(confdir, f"{github_pages}.github.io")
        if os.path.isdir(repodir):
            os.chdir(repodir)
            run("git pull", echo=True)
        else:
            run(
                "git clone "
                + ("https://github.com/" if github_is_http else github_url)
                + "{0}/{0}.github.io.git {1}".format(github_pages, repodir),
                echo=True,
            )

        git_add = []
        for tm in tomove:
            for root, dirnames, filenames in os.walk(website):
                for filename in filenames:
                    fn = os.path.join(root, filename)
                    fnnew = fn.replace(website, repodir)
                    try:
                        os.renames(fn, fnnew)
                    except OSError:
                        if os.path.isdir(fnnew):
                            shutil.rmtree(fnnew)
                        else:
                            os.remove(fnnew)
                        os.renames(fn, fnnew)

                    git_add.append(fnnew)
        print("Moved {} files to {}.github.io".format(len(git_add), github_pages))

        os.chdir(repodir)

        run("git add -f " + " ".join(['"{}"'.format(os.path.relpath(p)) for p in git_add]), echo=True)
        if not os.path.isfile(".nojekyll"):
            open(".nojekyll", "w")
            run("git add -f .nojekyll")

        # Check to see if anything has actually been committed
        result = run("git diff --cached --name-status HEAD")
        if not result.stdout:
            print("Nothing changed from last deployment")
            return

        commit = 'git commit -m "{}"'.format(message or "Updates.")
        if push_force:
            commit += " --amend"
        run(commit, echo=True)

        if github_token:
            with open(os.path.join(repodir, ".git/credentials"), "w") as out:
                out.write("https://{}:@github.com".format(os.environ[github_token]))
            run('git config credential.helper "store --file=.git/credentials"')
        push = "git push"
        if push_quietly:
            push += " -q"
        if push_force:
            push += " -f"
        push += " origin master"
        run(push, echo=True)

    else:
        print("No place to deploy.")
Esempio n. 37
0
 def renames(self, new):
     os.renames(self, new)
     return self
Esempio n. 38
0
# -*- coding: UTF-8 -*-

# import os
# path = 'ch4_training_images/'  #更改pic
# # 获取该目录下所有文件,存入列表中
# files= os.listdir(path)
# n = 0
# for name in files:
#     print name
#     oldname=path+name
#     print 'oldname is : ' + oldname
#     rename = path+'gt_'+name.split('.')[1] + '.jpg'
#     print 'rename is : ' + rename
#     os.renames(oldname, rename)
#     print(name, '======>', rename)

import os
path = 'ch4_training_localization_transcription_gt/'  #更改txt
# 获取该目录下所有文件,存入列表中
files = os.listdir(path)
n = 0
for name in files:
    print name
    oldname = path + name
    print 'oldname is : ' + oldname
    rename = path + 'gt_' + name.split('.')[1] + '.txt'
    print 'rename is : ' + rename
    os.renames(oldname, rename)
    print(name, '======>', rename)
Esempio n. 39
0
def main():
    options = _process_args()
    src = options.directory
    local_txt = os.path.join(src, 'local.txt')
    if not os.path.isdir(os.path.join(src, 'cache')):
        raise Exception('%s doesn\'t look like a daemon directory. '
                        'Try --directory.' % src)

    if 'LOCKSS_IPADDR' in os.environ: ipAddr = os.environ['LOCKSS_IPADDR']
    else: ipAddr = '127.0.0.1'

    if 'LOCKSS_UI_PORT' in os.environ:
        port = os.environ['LOCKSS_UI_PORT']
    else:
        if not os.path.isfile(local_txt):
          raise Exception('LOCKSS_UI_PORT is not set but there is no'
                          '%s' % (local_txt,))
        config = ConfigParser.ConfigParser()
        local_config = open(local_txt)
        try:
            config.readfp(_SectionAdder('foo', local_config))
            port = config.get('foo', 'org.lockss.ui.port')
        finally:
            local_config.close()

    fix_auth_failure.fix_auth_failure()
    client = lockss_daemon.Client(ipAddr, port,
                                  options.username, options.password)
    repos = client._getStatusTable( 'RepositoryTable' )[ 1 ]

    no_auid = [r for r in repos if r['status'] == 'No AUID']
    if no_auid:
        print 'Warning: These cache directories have no AUID:'
        for r in no_auid:
            print r['dir']
        print

    deleted = [r for r in repos if r['status'] == 'Deleted']
    for r in deleted:
        r['auid'] = _auid(os.path.join(src, r['dir']))
    deleted.sort(key=lambda r: r['auid'])

    move_all = False
    if options.verbose:
        if deleted:
            print 'These AUs have been deleted on the daemon:'
            for r in deleted:
                print r['auid']
            if options.verify:
                move_all = raw_input('move all [y]? ').startswith('y')
        else:
            print 'No deleted AUs.'

    verify_each = options.verify and not move_all
    dst = os.path.join(options.directory, options.dest)
    for r in deleted:
        dir = r['dir']
        if not verify_each or \
                verify_each and \
                raw_input('move %s [n]? ' % r['auid']).startswith('y'):
            src_r = os.path.join(src, dir)
            if os.path.isabs(dir):
              if not dir.startswith(options.directory): print 'Absolute/relative path mismatch: %s' % (dir,)
              dst_r = os.path.join(dst, dir[len(options.directory)+1:])
            else: dst_r = os.path.join(dst, dir)
            if options.commands:
                print "mv %s %s # %s" % (src_r, dst_r, r['auid'])
            else:
                os.renames(src_r, dst_r)
Esempio n. 40
0
def rename(dir):
    img_names = sorted(os.listdir(dir))
    for i, img_name in enumerate(img_names):
        old_n = ''.join([dir, '/', img_name])
        new_n = ''.join([dir, '/', str(i), '.jpg'])
        os.renames(old_n, new_n)
Esempio n. 41
0
def main():
    parser = argparse.ArgumentParser(
        description=
        'Dataset Manipulator: useful to merge two datasets by concatenating ' +
        'episodes. PS: Deleting sources after merging into the destination ' +
        'folder.')
    parser.add_argument(
        '--continual-learning-labels',
        type=str,
        nargs=2,
        metavar=('label_1', 'label_2'),
        default=argparse.SUPPRESS,
        help='Labels for the continual learning RL distillation task.')
    parser.add_argument(
        '-f',
        '--force',
        action='store_true',
        default=False,
        help='Force the merge, even if it overrides something else,'
        ' including the destination if it exist')
    group = parser.add_mutually_exclusive_group()
    group.add_argument(
        '--merge',
        type=str,
        nargs=3,
        metavar=('source_1', 'source_2', 'destination'),
        default=argparse.SUPPRESS,
        help=
        'Merge two datasets by appending the episodes, deleting sources right after.'
    )

    args = parser.parse_args()

    if 'merge' in args:
        # let make sure everything is in order
        assert os.path.exists(
            args.merge[0]), "Error: dataset '{}' could not be found".format(
                args.merge[0])

        # If the merge file exists already, delete it for the convenince of updating student's policy
        if os.path.exists(
                args.merge[2]) or os.path.exists(args.merge[2] + '/'):
            assert args.force, "Error: destination directory '{}' already exists".format(
                args.merge[2])
            shutil.rmtree(args.merge[2])

        if 'continual_learning_labels' in args:
            assert args.continual_learning_labels[0] in CONTINUAL_LEARNING_LABELS \
                   and args.continual_learning_labels[1] in CONTINUAL_LEARNING_LABELS, \
                   "Please specify a valid Continual learning label to each dataset to be used for RL distillation !"

        # create the output
        os.mkdir(args.merge[2])

        # copy files from first source
        os.rename(args.merge[0] + "/dataset_config.json",
                  args.merge[2] + "/dataset_config.json")
        os.rename(args.merge[0] + "/env_globals.json",
                  args.merge[2] + "/env_globals.json")

        for record in sorted(glob.glob(args.merge[0] + "/record_[0-9]*/*")):
            s = args.merge[2] + "/" + record.split(
                "/")[-2] + '/' + record.split("/")[-1]
            os.renames(record, s)

        num_episode_dataset_1 = int(record.split("/")[-2][7:]) + 1

        # copy files from second source
        for record in sorted(glob.glob(args.merge[1] + "/record_[0-9]*/*")):
            episode = str(num_episode_dataset_1 +
                          int(record.split("/")[-2][7:]))
            new_episode = record.split("/")[-2][:-len(episode)] + episode
            s = args.merge[2] + "/" + new_episode + '/' + record.split("/")[-1]
            os.renames(record, s)
        num_episode_dataset_2 = int(record.split("/")[-2][7:]) + 1

        # load and correct ground_truth
        ground_truth = {}
        ground_truth_load = np.load(args.merge[0] + "/ground_truth.npz")
        ground_truth_load_2 = np.load(args.merge[1] + "/ground_truth.npz")
        ground_truth["images_path"] = []
        num_episode_dataset = num_episode_dataset_1

        index_slash = args.merge[2].find("/")
        index_margin_str = len("/record_")
        directory_str = args.merge[2][index_slash + 1:]

        for idx_, gt_load in enumerate(
            [ground_truth_load, ground_truth_load_2], 1):
            for arr in gt_load.files:
                if arr == "images_path":
                    # here, we want to rename just the folder containing the records, hence the black magic

                    for i in tqdm(range(len(gt_load["images_path"])),
                                  desc="Update of paths (Folder " +
                                  str(1 + idx_) + ")"):
                        # find the "record_" position
                        path = gt_load["images_path"][i]
                        end_pos = path.find("/record_")
                        inter_pos = path.find(
                            "/frame")  # pos in the complete path.

                        if idx_ > 1:
                            episode = str(num_episode_dataset_1 + int(
                                path[end_pos + index_margin_str:inter_pos]))
                            episode = episode.zfill(3)
                            new_record_path = "/record_" + episode + path[
                                inter_pos:]
                        else:
                            new_record_path = path[end_pos:]
                        ground_truth["images_path"].append(directory_str +
                                                           new_record_path)
                else:
                    # anything that isnt image_path, we dont need to change
                    gt_arr = gt_load[arr]

                    if idx_ > 1:
                        num_episode_dataset = num_episode_dataset_2

                    # HERE check before overwritting that the target is random !+
                    if gt_load[arr].shape[0] < num_episode_dataset:
                        gt_arr = np.repeat(gt_load[arr],
                                           num_episode_dataset,
                                           axis=0)

                    if idx_ > 1:
                        ground_truth[arr] = np.concatenate(
                            (ground_truth[arr], gt_arr), axis=0)
                    else:
                        ground_truth[arr] = gt_arr

        # save the corrected ground_truth
        np.savez(args.merge[2] + "/ground_truth.npz", **ground_truth)

        # load and correct the preprocessed data (actions, rewards etc)
        preprocessed = {}
        preprocessed_load = np.load(args.merge[0] + "/preprocessed_data.npz")
        preprocessed_load_2 = np.load(args.merge[1] + "/preprocessed_data.npz")

        dataset_1_size = preprocessed_load["actions"].shape[0]
        dataset_2_size = preprocessed_load_2["actions"].shape[0]

        for idx, prepro_load in enumerate(
            [preprocessed_load, preprocessed_load_2]):
            for arr in prepro_load.files:
                pr_arr = prepro_load[arr]

                to_class = None
                if arr == "episode_starts":
                    to_class = bool
                elif arr == "actions_proba":
                    to_class = float
                else:
                    to_class = int
                if preprocessed.get(arr, None) is None:
                    preprocessed[arr] = pr_arr.astype(to_class)
                else:
                    preprocessed[arr] = np.concatenate(
                        (preprocessed[arr].astype(to_class),
                         pr_arr.astype(to_class)),
                        axis=0)
            if 'continual_learning_labels' in args:
                if preprocessed.get(CL_LABEL_KEY, None) is None:
                    preprocessed[CL_LABEL_KEY] = \
                        np.array([args.continual_learning_labels[idx] for _ in range(dataset_1_size)])
                else:
                    preprocessed[CL_LABEL_KEY] = \
                        np.concatenate((preprocessed[CL_LABEL_KEY], np.array([args.continual_learning_labels[idx]
                                                                              for _ in range(dataset_2_size)])), axis=0)

        np.savez(args.merge[2] + "/preprocessed_data.npz", **preprocessed)

        # remove the old folders
        shutil.rmtree(args.merge[0])
        shutil.rmtree(args.merge[1])
Esempio n. 42
0
     os.system('git add -A')
     os.system('git commit -am "update"')
     rs = os.system('git push origin gh-pages')
     if (int(rs) != 0):
         rs = os.system('git push origin gh-pages')
     if (int(rs) != 0):
         print "Git submit fail!!! Check it!"
         sys.exit(1)
     os.chdir(workingdir)
 if (os.path.isfile(f)):
     count += 1
     fname = filebasename(f)
     if (' ' in fname):
         print "File name has blank!", f
         os.renames(
             f,
             os.path.split(f)[0] + os.sep + fname.strip() +
             os.path.splitext(f)[1])
         fname = fname.strip()
     dirname = doilink + "/pages/" + decomposeDOI(
         unquotefileDOI(fname), url=False, outdir=True)
     if (not os.path.exists(dirname + fname + '.html')):
         touchcount += 1
         try:
             if (not os.path.exists(dirname)): os.makedirs(dirname)
             os.system('touch ' + dirname + fname + '.html')
         except WindowsError as e:
             print e
         except:
             print "Something error for file:", f
 else:
     for ff in glob.iglob(f + "/10.*.pdf"):
Esempio n. 43
0
def Getprbest(ref_path,
              date_start=None,
              date_end=None,
              df=None,
              thumb_root=None,
              ignoreSLCoff=True,
              debug=False,
              datepaser='%Y-%m-%d',
              copydir='',
              monthlist=None):
    date_start, date_end = datetime.strptime(date_start,
                                             datepaser), datetime.strptime(
                                                 date_end, datepaser)
    # Get candidate PID list for df
    pr = path.splitext(path.basename(ref_path))[0]
    print('processing {}'.format(pr))
    wrs_path, wrs_row = int(pr[:3]), int(pr[3:])
    cach_candi = '{}_{}_{}_{}.csv'.format(
        str(wrs_path).zfill(3),
        str(wrs_row).zfill(3), datetime.strftime(date_start, '%Y%m%d'),
        datetime.strftime(date_end, '%Y%m%d'))
    cach_candi_path = path.join(thumb_root, cach_candi)
    if not path.exists(cach_candi_path):
        candi_df = Get_candi_by_onepr(wrs_path,
                                      wrs_row,
                                      date_start,
                                      date_end,
                                      df,
                                      ignoreSLCoff=ignoreSLCoff,
                                      monthlist=monthlist)
        candi_df.to_csv(cach_candi_path)
    else:
        candi_df = pd.read_csv(cach_candi_path)
    candi_jpg_list = download_c1df_thumbnail(candi_df, thumb_root)
    print('{} has {} candi'.format(pr, len(candi_jpg_list)))
    if len(candi_jpg_list) == 0:
        return None
    imgQ = io.imread(ref_path)
    scores = []
    CCS = []
    # print(candi_jpg_list)
    for id, candi_img in enumerate(candi_jpg_list):
        if not path.exists(candi_img):
            scores.append(0)
            CCS.append(100)
            continue
        imgD = io.imread(candi_img)
        this_score = hist_score(imgQ, imgD)
        if debug:
            newname = add_prefix(candi_img, "{0:.2f}".format(this_score))
            os.renames(candi_img, newname)
            candi_jpg_list[id] = newname
        scores.append(this_score)
        pid = path.splitext(path.basename(candi_img))[0]
        CCS.append(
            list(candi_df.loc[candi_df.PRODUCT_ID == pid].CLOUD_COVER)[0])
    scores = np.array(scores)
    CCS = np.array(CCS)
    # CCS[scores < 0.5] = 100
    # CCS[scores < 0.2] = 100
    if CCS.min() == 100:
        print('{} failed!'.format(pr))
        return None
    best = candi_jpg_list[scores.argmin()]
    if copydir is not '':
        if best is not None:
            shutil.copy(best, copydir)
    return best
Esempio n. 44
0
def main():
    parser = argparse.ArgumentParser(
        description='Deteministic dataset generator for SRL training ' +
        '(can be used for environment testing)')
    parser.add_argument('--num-cpu',
                        type=int,
                        default=1,
                        help='number of cpu to run on')
    parser.add_argument('--num-episode',
                        type=int,
                        default=50,
                        help='number of episode to run')
    parser.add_argument('--max_steps_per_epoch',
                        type=int,
                        default=200,
                        help='max num steps per epoch')

    #CUSTOM ARGS. want to udpate eventually, i.e., specify a specific path for dr
    parser.add_argument(
        '--dr',
        action='store_true',
        default=False,
        help=
        "Include this flag to use the chosen environment with domain randomization"
    )
    parser.add_argument(
        '--alt',
        action='store_true',
        default=False,
        help=
        "Include this flag to use the chosen environment with alternate view")
    parser.add_argument(
        '--special_start',
        action='store_true',
        default=False,
        help=
        "Include this flag to use the chosen environment with the special start"
    )

    parser.add_argument(
        '--save-path',
        type=str,
        default='robotics-rl-srl/data/',
        help='Folder where the environments will save the output')
    parser.add_argument('--name',
                        type=str,
                        default='UNSETNAME',
                        help='Folder name for the output')
    parser.add_argument('--env',
                        type=str,
                        default='push_rotate',
                        help='The environment wanted',
                        choices=list(envs.keys()))

    parser.add_argument('--display', action='store_true', default=False)
    parser.add_argument('--no-record-data', action='store_true', default=False)

    parser.add_argument('--seed', type=int, default=0, help='the seed')
    parser.add_argument(
        '-f',
        '--force',
        action='store_true',
        default=False,
        help='Force the save, even if it overrides something else,' +
        ' including partial parts if they exist')

    #TODO: Change this argument to be for the diff types of tasks
    parser.add_argument('--multi-view',
                        action='store_true',
                        default=False,
                        help='Set a second camera to the scene')

    parser.add_argument(
        '--reward-dist',
        action='store_true',
        default=False,
        help=
        'Prints out the reward distribution when the dataset generation is finished'
    )
    parser.add_argument('--run-ppo2',
                        action='store_true',
                        default=False,
                        help='runs a ppo2 agent instead of a random agent')
    parser.add_argument(
        '--ppo2-timesteps',
        type=int,
        default=1000,
        help='number of timesteps to run PPO2 on before generating the dataset'
    )

    args = parser.parse_args()

    assert (args.num_cpu >
            0), "Error: number of cpu must be positive and non zero"

    assert (args.num_episode >
            0), "Error: number of episodes must be positive and non zero"
    # assert not(registered_env[args.env][3] is ThreadingType.NONE and args.num_cpu != 1), \
    # "Error: cannot have more than 1 CPU for the environment {}".format(args.env)

    if args.num_cpu > args.num_episode:
        args.num_cpu = args.num_episode
        printYellow(
            "num_cpu cannot be greater than num_episode, defaulting to {} cpus."
            .format(args.num_cpu))

    # this is done so seed 0 and 1 are different and not simply offset of the same datasets.
    args.seed = np.random.RandomState(args.seed).randint(int(1e10))

    # File exists, need to deal with it
    if not args.no_record_data and os.path.exists(args.save_path + args.name):
        assert args.force, "Error: save directory '{}' already exists".format(
            args.save_path + args.name)

        shutil.rmtree(args.save_path + args.name)
        for part in glob.glob(args.save_path + args.name + "_part-[0-9]*"):
            shutil.rmtree(part)
    if not args.no_record_data:
        # create the output
        os.makedirs(args.save_path + args.name, exist_ok=True)

    if args.num_cpu == 1:
        env_thread(args, 0, partition=False, use_ppo2=args.run_ppo2)
    else:
        # try and divide into multiple processes, with an environment each
        try:
            jobs = []
            for i in range(args.num_cpu):
                process = multiprocessing.Process(target=env_thread,
                                                  args=(args, i, True,
                                                        args.run_ppo2))
                jobs.append(process)

            for j in jobs:
                j.start()

            try:
                for j in jobs:
                    j.join()
            except Exception as e:
                printRed("Error: unable to join thread")
                raise e

        except Exception as e:
            printRed("Error: unable to start thread")
            raise e

    if not args.no_record_data and args.num_cpu > 1:
        # sleep 1 second, to avoid congruency issues from multiprocess (eg., files still writing)
        time.sleep(1)
        # get all the parts
        file_parts = sorted(glob.glob(args.save_path + args.name +
                                      "_part-[0-9]*"),
                            key=lambda a: int(a.split("-")[-1]))

        # move the config files from any as they are identical
        os.rename(file_parts[0] + "/dataset_config.json",
                  args.save_path + args.name + "/dataset_config.json")
        os.rename(file_parts[0] + "/env_globals.json",
                  args.save_path + args.name + "/env_globals.json")

        ground_truth = None
        preprocessed_data = None

        # used to convert the part record_id to the fused record_id
        record_id = 0
        for part in file_parts:
            # sort the record names alphabetically, then numerically
            records = sorted(glob.glob(part + "/record_[0-9]*"),
                             key=lambda a: int(a.split("_")[-1]))

            record_id_start = record_id
            for record in records:
                os.renames(
                    record, args.save_path + args.name +
                    "/record_{:03d}".format(record_id))
                record_id += 1

            # fuse the npz files together, in the right order
            if ground_truth is None:
                # init
                ground_truth = {}
                preprocessed_data = {}
                ground_truth_load = np.load(part + "/ground_truth.npz")
                preprocessed_data_load = np.load(part +
                                                 "/preprocessed_data.npz")

                for arr in ground_truth_load.files:
                    if arr == "images_path":
                        ground_truth[arr] = np.array([
                            convertImagePath(args, path, record_id_start)
                            for path in ground_truth_load[arr]
                        ])
                    else:
                        ground_truth[arr] = ground_truth_load[arr]
                for arr in preprocessed_data_load.files:
                    preprocessed_data[arr] = preprocessed_data_load[arr]

            else:
                ground_truth_load = np.load(part + "/ground_truth.npz")
                preprocessed_data_load = np.load(part +
                                                 "/preprocessed_data.npz")

                for arr in ground_truth_load.files:
                    if arr == "images_path":
                        sanitised_paths = np.array([
                            convertImagePath(args, path, record_id_start)
                            for path in ground_truth_load[arr]
                        ])
                        ground_truth[arr] = np.concatenate(
                            (ground_truth[arr], sanitised_paths))
                    else:
                        ground_truth[arr] = np.concatenate(
                            (ground_truth[arr], ground_truth_load[arr]))
                for arr in preprocessed_data_load.files:
                    preprocessed_data[arr] = np.concatenate(
                        (preprocessed_data[arr], preprocessed_data_load[arr]))

            # remove the current part folder
            shutil.rmtree(part)

        # save the fused outputs
        np.savez(args.save_path + args.name + "/ground_truth.npz",
                 **ground_truth)
        np.savez(args.save_path + args.name + "/preprocessed_data.npz",
                 **preprocessed_data)

    if args.reward_dist:
        rewards, counts = np.unique(
            np.load(args.save_path + args.name +
                    "/preprocessed_data.npz")['rewards'],
            return_counts=True)
        counts = [
            "{:.2f}%".format(val * 100) for val in counts / np.sum(counts)
        ]
        print("reward distribution:")
        [
            print(" ", reward, count)
            for reward, count in list(zip(rewards, counts))
        ]
Esempio n. 45
0
        usage()
        sys.exit()
    elif op == "-o":
        output_path = value
    elif op == "-n":
        output_name = value
uid = str(uuid.uuid1())

# zfile = zipfile.ZipFile(output_file,'w',zipfile.zlib.DEFLATED)
tempdic = os.path.join(os.path.expanduser('~'),'.createaar',uid)
os.makedirs(tempdic)
print('tempdic path',tempdic)
so_buffer_path = os.path.join(tempdic,'jni','armeabi')
os.makedirs(so_buffer_path)
manifest_buffer_path = tempdic
mani_file = open(os.path.join(manifest_buffer_path,'AndroidManifest.xml'),'w')
mani_file.write(manifest)
mani_file.close()
print('so path',so_buffer_path)
jar_buffer_path = os.path.join(tempdic,'libs')
os.makedirs(jar_buffer_path)
print('jar path',jar_buffer_path)
shutil.copy(so_path,so_buffer_path)
shutil.copy(jar_path,jar_buffer_path)

if not output_name.strip():
    output_name = "aarlib"
shutil.make_archive(os.path.join(output_path,output_name),'zip',tempdic)
os.renames(os.path.join(output_path,output_name+'.zip'),os.path.join(output_path,output_name+'.aar'))

Esempio n. 46
0
                # Collect the predictions here
                all_predictions = []

                for x_test_batch in batches:
                    batch_predictions = sess.run(predictions, {
                        input_x: x_test_batch,
                        dropout_keep_prob: 1.0
                    })
                    all_predictions = np.concatenate(
                        [all_predictions, batch_predictions])

        # Save the evaluation to a csv

        df["sentiment"] = np.array(all_predictions)
        df.to_csv(
            '../../../twitter-swisscom/Divided_Predicted/sentiments.tsv.00' +
            str(i))
        print("dataframe number :" + str(i))
    except (RuntimeError, TypeError, NameError, ValueError,
            UnicodeDecodeError):
        print("Error")

#### Renaming files ######
count = 0
path = '../../../twitter-swisscom/Divided_Predicted/'
for filename in os.listdir(path):
    count += 1
    print('the file name is : ', filename)
    os.renames(path + filename,
               path + 'sentiments_renamed.tsv.00' + str(count))
    print(str(count))
Esempio n. 47
0
def LOT():
    name = 13100000000
    path = r"D:\picture\diku\diku" + "\\"
    for file in os.listdir(path):
        os.renames(path + file, path + str(name) + "-" + str(name) + ".jpg")
        name += 1
Esempio n. 48
0
import os

file_path = "/home/chenli/head_counts/head_data/head_crop5/0_background/"

# 同名导致会覆盖,文件会减少,如果命名为相同的,最好加上一个编号
file_names = os.listdir(file_path)
index = 15000
for i, file_name in enumerate(file_names):
    os.renames(file_path + file_name, file_path + str(i + index) + '.jpg')
    print(file_path + str(i + index) + '.jpg')
Esempio n. 49
0
    def unpack_dir(self, target_dir, callback=None):
        rmode = ""
        self.tar = None
        if self.type == 'tar':
            rmode = 'r:'
        elif self.type == 'targz':
            rmode = 'r:gz'
        elif self.type == 'tarbz2':
            rmode = 'r:bz2'
        elif self.type in ('tarlzma', 'tarxz'):
            self.tar = TarFile.lzmaopen(self.file_path, fileobj=self.fileobj)
        else:
            raise UnknownArchiveType

        if self.tar is None:
            self.tar = tarfile.open(self.file_path,
                                    rmode,
                                    fileobj=self.fileobj)

        oldwd = None
        try:
            # Don't fail if CWD doesn't exist (#6748)
            oldwd = os.getcwd()
        except OSError:
            pass
        os.chdir(target_dir)

        uid = os.getuid()
        gid = os.getgid()

        for tarinfo in self.tar:
            if callback:
                callback(tarinfo, extracted=False)

            startservices = []
            if tarinfo.issym() and \
                    os.path.isdir(tarinfo.name) and \
                    not os.path.islink(tarinfo.name):
                # Changing a directory with a symlink. tarfile module
                # cannot handle this case.

                if os.path.isdir(tarinfo.linkname):
                    # Symlink target is a directory. Move old directory's
                    # content to this directory.
                    for filename in os.listdir(tarinfo.name):
                        old_path = util.join_path(tarinfo.name, filename)
                        new_path = util.join_path(tarinfo.linkname, filename)

                        if os.path.lexists(new_path):
                            if not os.path.isdir(new_path):
                                # A file with the same name exists in the
                                # target. Remove the one in the old directory.
                                os.remove(old_path)
                            continue

                        # try as up to this time
                        try:
                            os.renames(old_path, new_path)
                        except OSError, e:
                            # something gone wrong? [Errno 18] Invalid cross-device link?
                            # try in other way
                            if e.errno == errno.EXDEV:
                                if tarinfo.linkname.startswith(".."):
                                    new_path = util.join_path(
                                        os.path.normpath(
                                            os.path.join(
                                                os.path.dirname(tarinfo.name),
                                                tarinfo.linkname)), filename)
                                if not old_path.startswith("/"):
                                    old_path = "/" + old_path
                                if not new_path.startswith("/"):
                                    new_path = "/" + new_path
                                print "Moving:", old_path, " -> ", new_path
                                os.system("mv -f %s %s" % (old_path, new_path))
                            else:
                                raise
                    try:
                        os.rmdir(tarinfo.name)
                    except OSError, e:
                        # hmmm, not empty dir? try rename it adding .old extension.
                        if e.errno == errno.ENOTEMPTY:
                            # if directory with dbus/pid file was moved we have to restart dbus
                            for (path, dirs, files) in os.walk(tarinfo.name):
                                if path.endswith("dbus") and "pid" in files:
                                    startservices.append("dbus")
                                    for service in ("NetworkManager",
                                                    "connman", "wicd"):
                                        if os.path.isfile(
                                                "/etc/mudur/services/enabled/%s"
                                                % service):
                                            startservices.append(service)
                                            os.system("service % stop" %
                                                      service)
                                    os.system("service dbus stop")
                                    break
                            os.system("mv -f %s %s.old" %
                                      (tarinfo.name, tarinfo.name))
                        else:
                            raise

                elif not os.path.lexists(tarinfo.linkname):
                    # Symlink target does not exist. Assume the old
                    # directory is moved to another place in package.
                    os.renames(tarinfo.name, tarinfo.linkname)
Esempio n. 50
0
def update():
    if lazylibrarian.CONFIG['INSTALL_TYPE'] == 'win':
        logger.debug('(update) Windows install - no update available')
        logger.info('(update) Windows .exe updating not supported yet.')
        # pass
    elif lazylibrarian.CONFIG['INSTALL_TYPE'] == 'package':
        logger.debug('(update) Package install - no update available')
        logger.info('(update) Please use your package manager to update')
        # pass
    elif lazylibrarian.CONFIG['INSTALL_TYPE'] == 'git':
        branch = getCurrentGitBranch()

        _, _ = runGit('stash clear')
        output, err = runGit('pull origin ' + branch)

        if not output:
            logger.error('(update) Couldn\'t download latest version')

        for line in output.split('\n'):

            if 'Already up-to-date.' in line:
                logger.info('(update) No update available, not updating')
                logger.info('(update) Output: ' + str(output))
            elif line.endswith('Aborting.'):
                logger.error('(update) Unable to update from git: ' + line)
                logger.info('(update) Output: ' + str(output))

    elif lazylibrarian.CONFIG['INSTALL_TYPE'] == 'source':

        # As this is a non GIT install, we assume that the comparison is
        # always to master.

        tar_download_url = 'https://github.com/%s/%s/tarball/%s' % (
            lazylibrarian.CONFIG['GIT_USER'], lazylibrarian.CONFIG['GIT_REPO'],
            lazylibrarian.CONFIG['GIT_BRANCH'])
        update_dir = os.path.join(lazylibrarian.PROG_DIR, 'update')
        # version_path = os.path.join(lazylibrarian.PROG_DIR, 'version.txt')

        try:
            logger.info('(update) Downloading update from: ' +
                        tar_download_url)
            request = urllib2.Request(tar_download_url)
            request.add_header('User-Agent', USER_AGENT)
            data = urllib2.urlopen(request, timeout=30)
        except socket.timeout:
            logger.error("(update) Timeout retrieving new version from " +
                         tar_download_url)
            return
        except (urllib2.HTTPError, urllib2.URLError) as e:
            if hasattr(e, 'reason'):
                errmsg = e.reason
            else:
                errmsg = str(e)
            logger.error("(update) Unable to retrieve new version from " +
                         tar_download_url + ", can't update: %s" % errmsg)
            return

        download_name = data.geturl().split('/')[-1]

        tar_download_path = os.path.join(lazylibrarian.PROG_DIR, download_name)

        # Save tar to disk
        f = open(tar_download_path, 'wb')
        f.write(data.read())
        f.close()

        # Extract the tar to update folder
        logger.info('(update) Extracting file' + tar_download_path)
        tar = tarfile.open(tar_download_path)
        tar.extractall(update_dir)
        tar.close()

        # Delete the tar.gz
        logger.info('(update) Deleting file' + tar_download_path)
        os.remove(tar_download_path)

        # Find update dir name
        update_dir_contents = [
            x for x in os.listdir(update_dir)
            if os.path.isdir(os.path.join(update_dir, x))
        ]
        if len(update_dir_contents) != 1:
            logger.error(u"(update) Invalid update data, update failed: " +
                         str(update_dir_contents))
            return
        content_dir = os.path.join(update_dir, update_dir_contents[0])

        # walk temp folder and move files to main folder
        for dirname, dirnames, filenames in os.walk(content_dir):
            dirname = dirname[len(content_dir) + 1:]
            for curfile in filenames:
                old_path = os.path.join(content_dir, dirname, curfile)
                new_path = os.path.join(lazylibrarian.PROG_DIR, dirname,
                                        curfile)

                if os.path.isfile(new_path):
                    os.remove(new_path)
                os.renames(old_path, new_path)

        # Update version.txt
        updateVersionFile(lazylibrarian.CONFIG['LATEST_VERSION'])
    else:
        logger.error("(update) Cannot perform update - Install Type not set")
        return
Esempio n. 51
0
        self.quad = StringVar() # c a x
        self.butt = StringVar()
        self.fuso.set("Fuso de \"1\" a \"60\"")
        self.quad.set("Quadrante de \"C\" a \"X\"")
        self.fusoEtry = Entry(root, textvariable=self.fuso)
        self.fusoEtry.focus()
        self.fusoEtry.bind("<Key>", self.infFQ)
        self.quadEtry = Entry(root, textvariable=self.quad)
        self.quadEtry.focus()
        self.quadEtry.bind("<Key>", self.infFQ)
        self.execButt = Button(root, textvariable=self.butt, width=40, height=1)
        self.fusoEtry.grid(row=2, column=1, padx=9, pady=9)
        self.quadEtry.grid(row=3, column=1, padx=9, pady=9)
        self.execButt.grid(row=4, column=0, columnspan=20, padx=9, pady=9)
        self.butt.set("Aguardando informações!!!")
        self.execButt.unbind("<Button-1>")
        self.execButt.config(state=DISABLED)

Programa(root)
root.mainloop()


# Ganhar tempo para não conflitar com o Lisp
import time
time.sleep(3)
try:
    os.remove(flagDir + 'dwgtokml.asc')
except:
    pass
os.renames(flagDir + 'temporario.kml', flagDir + 'dwgtokml.kml')
print("Ação concluida...")
Esempio n. 52
0
def train(model, train_dataset, train_eval_dataset, val_dataset, opt):
    # Prepare optimizer
    if opt.device.type == "cuda":
        logger.info("CUDA enabled.")
        model.to(opt.device)
        if len(opt.device_ids) > 1:
            logger.info("Use multi GPU", opt.device_ids)
            model = torch.nn.DataParallel(model, device_ids=opt.device_ids)  # use multi GPU

    train_loader = DataLoader(train_dataset,
                              collate_fn=start_end_collate,
                              batch_size=opt.bsz,
                              num_workers=opt.num_workers,
                              shuffle=True,
                              pin_memory=opt.pin_memory)

    train_eval_loader = DataLoader(train_eval_dataset,
                                   collate_fn=start_end_collate,
                                   batch_size=opt.bsz,
                                   num_workers=opt.num_workers,
                                   shuffle=False,
                                   pin_memory=opt.pin_memory)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = [
        {"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], "weight_decay": 0.01},
        {"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
    ]

    num_train_optimization_steps = len(train_loader) * opt.n_epoch
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=opt.lr,
                         weight_decay=opt.wd,
                         warmup=opt.lr_warmup_proportion,
                         t_total=num_train_optimization_steps,
                         schedule="warmup_linear")

    prev_best_score = 0.
    es_cnt = 0
    start_epoch = -1 if opt.eval_untrained else 0
    eval_tasks_at_training = opt.eval_tasks_at_training  # VR is computed along with VCMR
    save_submission_filename = \
        "latest_{}_{}_predictions_{}.json".format(opt.dset_name, opt.eval_split_name, "_".join(eval_tasks_at_training))
    for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"):
        if epoch_i > -1:
            train_epoch(model, train_loader, optimizer, opt, epoch_i, training=True)
        # TODO: continue from here.
        global_step = (epoch_i + 1) * len(train_loader)
        if opt.eval_path is not None:
            with torch.no_grad():
                train_epoch(model, train_eval_loader, optimizer, opt, epoch_i, training=False)

                metrics_no_nms, metrics_nms, latest_file_paths = \
                    eval_epoch(model, val_dataset, opt, save_submission_filename,
                               tasks=eval_tasks_at_training, max_after_nms=100)
            to_write = opt.eval_log_txt_formatter.format(
                time_str=time.strftime("%Y_%m_%d_%H_%M_%S"),
                epoch=epoch_i,
                eval_metrics_str=json.dumps(metrics_no_nms))
            with open(opt.eval_log_filepath, "a") as f:
                f.write(to_write)
            logger.info("metrics_no_nms {}".format(pprint.pformat(rm_key_from_odict(metrics_no_nms, rm_suffix="by_type"), indent=4)))
            logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms, indent=4)))

            # metrics = metrics_nms if metrics_nms is not None else metrics_no_nms
            metrics = metrics_no_nms
            # early stop/ log / save model
            for task_type in ["SVMR", "VCMR"]:
                if task_type in metrics:
                    task_metrics = metrics[task_type]
                    for iou_thd in [0.5, 0.7]:
                        opt.writer.add_scalars("Eval/{}-{}".format(task_type, iou_thd),
                                               {k: v for k, v in task_metrics.items() if str(iou_thd) in k},
                                               global_step)

            task_type = "VR"
            if task_type in metrics:
                task_metrics = metrics[task_type]
                opt.writer.add_scalars("Eval/{}".format(task_type),
                                       {k: v for k, v in task_metrics.items()},
                                       global_step)

            # use the most strict metric available
            stop_metric_names = ["r1"] if opt.stop_task == "VR" else ["0.5-r1", "0.7-r1"]
            stop_score = sum([metrics[opt.stop_task][e] for e in stop_metric_names])

            if stop_score > prev_best_score:
                es_cnt = 0
                prev_best_score = stop_score

                checkpoint = {
                    "model": model.state_dict(),
                    "model_cfg": model.config,
                    "epoch": epoch_i}
                torch.save(checkpoint, opt.ckpt_filepath)

                best_file_paths = [e.replace("latest", "best") for e in latest_file_paths]
                for src, tgt in zip(latest_file_paths, best_file_paths):
                    os.renames(src, tgt)
                logger.info("The checkpoint file has been updated.")
            else:
                es_cnt += 1
                if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt:  # early stop
                    with open(opt.train_log_filepath, "a") as f:
                        f.write("Early Stop at epoch {}".format(epoch_i))
                    logger.info("Early stop at {} with {} {}"
                                .format(epoch_i, " ".join([opt.stop_task] + stop_metric_names), prev_best_score))
                    break
        else:
            checkpoint = {
                "model": model.state_dict(),
                "model_cfg": model.config,
                "epoch": epoch_i}
            torch.save(checkpoint, opt.ckpt_filepath)

        if opt.debug:
            break

    opt.writer.close()
Esempio n. 53
0
mymkdir(dest)
#remove_folder(dest)

print '*** Copying the list of files from : ', src, 'to : ', dest
#copytree(src,dest)
copyfiles(src, dest)
os.chdir(dest)
print "Current directory is: %s" % os.getcwd()

# listing directories
list = os.listdir(os.getcwd())

for i in list:
    if 'listST_t-channel_top_4f' in i:
        print '*** Renaming file', i
        os.renames(i, "ST_T_tch.txt")

    if 'listST_t-channel_antitop' in i:
        print '*** Renaming file', i
        os.renames(i, "ST_Tbar_tch.txt")

    if 'listST_s-channel_4f' in i:
        print '*** Renaming file', i
        os.renames(i, "ST_T_sch.txt")

    if 'listST_tW_top_5f' in i:
        print '*** Renaming file', i
        os.renames(i, "ST_T_tW.txt")

    if 'listST_tW_antitop_5f' in i:
        print '*** Renaming file', i
Esempio n. 54
0
    make_stm32_module.cleanup(module_serie)
    sys.exit()

# 4) build the module from this previous version
# reset the STM32Cube repo to this previous version
os.chdir(repo_path)
git_cmd = 'git reset --hard ' + previous_version
print git_cmd
os.system(git_cmd)

# build the zephyr module from the stm32cube
previous_module = make_stm32_module.module(module_serie)
print "Building module from STM32Cube_repo " + previous_version

# populate the new repo with this previous_version
os.renames(previous_module, os.path.join(new_repo, 'stm32cube',
                                         module_seriexx))
print "Transfer previous module from " + previous_module + " to " + new_repo
os.chdir(new_repo)

git_cmd = 'git add -A stm32cube/' + module_seriexx + '/*'
print git_cmd
os.system(git_cmd)

git_cmd = 'git commit -am \"module' + previous_version + '\"'
print git_cmd
os.system(git_cmd)

git_cmd = 'git rebase --whitespace=fix HEAD~1'
print "Remove remove trailing spaces: " + git_cmd
os.system(git_cmd)
Esempio n. 55
0
            if get_var is not None:  #check that record exists
                data_row.extend([get_var.attrib[var_attr[nVar]]
                                 ])  #append row with new daata
        data_row = [fcst.attrib['from']
                    ] + data_row  #add time step at the beginning of the row
        if len(data_row) == len(var_names) + 1:  #check that no data is missing
            csvwrite.writerows([data_row
                                ])  #if all data is present write wrote to csv
            rec_cnt = rec_cnt + 1  #add counter

    csvopen.close()  #close csv
    print("Total number of records stored for each variable: " + str(rec_cnt))

    #save csv file in local directory
    save_path = './run/' + run_timestamp + '_' + stn + '.csv'
    os.renames(csvname, save_path)
    print('Saving individual station file %s to directory %s ' %
          (csvname, save_path))

#construct a Wind Hub file from existing station files 							#create storage array for all stations
WND_HUB = np.empty((rec_cnt, len(stations) + 1)) * np.nan
for nStn, stn in enumerate(stations):
    stnfile = './run/' + run_timestamp + '_' + stn + '.csv'  #generate filename
    #if on first stsation - extract forecast horizon
    if nStn == 0:
        fcsthr = np.genfromtxt(
            stnfile, usecols=0, dtype=str, skip_header=1,
            delimiter=',')  #for first run record forecast horizons
        if len(fcsthr) != (rec_cnt):
            print(
                'WARNING: mismatch in number of records between stations. Requires manual verification!!!'
Esempio n. 56
0
season = 0
episode = 0
season_pattern = r"(s|season)[0-9]."
episode_pattern = r"(e|epiosode)[0-9]."

poster_image_path: Path = show_folder / Path("folder.jpg")

if not os.path.exists(poster_image_path):
    urlretrieve(poster_url, poster_image_path)

for file in files:
    season = int(
        str(re.search(season_pattern, file.lower()).group()).replace(
            "season", "").replace("s", "").replace("e", ""))
    episode = int(
        str(re.search(episode_pattern, file.lower()).group()).replace(
            "episode", "").replace("e", "").replace(".", ""))
    print(show_folder.name, file, season, episode)

    episode_name, aired_year, poster_url, episode_summary = search_episode(
        tvmaze_id, season, episode)

    file_name = Path(file)
    season_info = "S{}E{}-".format(season, episode)

    os.renames(
        show_folder / file_name,
        show_folder / Path("Season " + str(season)) / Path(
            get_legal_chars(season_info + episode_name) + "." +
            file_name.suffix))
Esempio n. 57
0
def obfuscated_code():
    global cpp_type_list
    global file_replace_name_dic
    global file_replace_name_dic_keys
    global ignore_type_list
    global file_to_much_call_keys
    global file_to_much_call_dic
    global ignore_file_name_list
    top_dir = input("输入项目路径或直接拖入文件夹(例:E:\svn_reposition\ZheJiang):\n")
    if top_dir == '':
        print("未输入,程序结束")
        return
    if top_dir == '':
        print("未输入,程序结束")
        return
    if not os.path.exists(top_dir):
        print("文件夹不存在")
        return
    if not os.path.isdir(top_dir):
        print("文件不存在")
        return
    print("file_name_path正在生成替换列表......")
    for dir_path4, sub_paths, files in os.walk(top_dir, False):
        for s_file in files:
            file_name, file_type = os.path.splitext(s_file)
            if file_type not in cpp_type_list:
                continue
            if ".framework" in str(dir_path4):
                continue
            file_path = os.path.join(dir_path4, s_file)
            is_ignore = is_ignore_path(file_path)
            if is_ignore:
                continue
            if file_name not in file_replace_name_dic.keys():
                random_file_name = get_random_string(
                    get_random_number_10_20()) + get_random_string(
                        get_random_number_10_20())
                file_replace_name_dic[file_name] = random_file_name

    print("file_name_path===========替换列表生成完成===========")

    for i, v in file_replace_name_dic.items():
        file_replace_name_dic_keys.append(i)
    for ignore_name in ignore_file_name_list:
        if ignore_name in file_replace_name_dic_keys:
            del file_replace_name_dic[ignore_name]
            index = file_replace_name_dic_keys.index(ignore_name)
            del file_replace_name_dic_keys[index]

    change_xcode_project(top_dir)
    for dir_path5, sub_paths, files in os.walk(top_dir, False):
        for s_file in files:
            file_path = os.path.join(dir_path5, s_file)
            if ".framework" in str(file_path):
                print("跳过file_name文件名字混淆")
                continue
            file_name, file_type = os.path.splitext(s_file)
            if file_type == ".xcodeproj":
                continue
            if file_type not in cpp_type_with_header:
                continue
            if file_name not in file_replace_name_dic_keys:
                continue
            random_file_name = file_replace_name_dic[file_name]
            new_file_path = file_path.replace(file_name + file_type,
                                              random_file_name + file_type)
            os.renames(file_path, new_file_path)
            print("正在重命名文件:" + file_name + file_type + "->" +
                  random_file_name + file_type)
    print("file_name_path===========正在重命名文件完成===========")

    for dir_path6, sub_paths, files in os.walk(top_dir, False):
        for s_file in files:
            file_path = os.path.join(dir_path6, s_file)
            file_name, file_type = os.path.splitext(file_path)
            if ".framework" in str(file_path):
                print("跳过file_name文件名字混淆")
                continue
            if file_type not in change_file_type_list:
                print("跳过文件:" + file_path)
                continue
            f = open(file_path, 'rb')
            data = f.read()
            encode_type = chardet.detect(data)["encoding"]
            f.close()
            obscure_file_name(file_path, encode_type, s_file)
Esempio n. 58
0
        src_files.append(src_path)
    elif 2 == check_flag:
        recursive_dir_list = read_dirs(src_path, True)
        for dir_file in recursive_dir_list:
            print(dir_file)
            if os.path.isfile(dir_file):
                src_files.append(dir_file)

    write_file = open(output_file, "w", encoding="utf-8")
    i = 0
    artists = {}
    for src_file in src_files:
        if src_file.endswith(".MP3") or (src_file.find(" ") > -1):
            to_rename_file = src_file[0:-4] + ".mp3"
            to_rename_file = to_rename_file.replace(" ", "_")
            os.renames(src_file, to_rename_file)
            src_file = to_rename_file

        clean_meta = clean(src_file)
        artist = ""
        try:
            artist = utils.get_v(clean_meta["DATA"], "artist")
            if artist is None:
                artist = ""
        except KeyError as E:
            print(clean_meta)

        artist = check_chars(artist)

        write_file.write(json.dumps(clean_meta, ensure_ascii=False) + "\n")
        artists[artist] = artist
Esempio n. 59
0
def update_dir_structure_file(book_id, calibrepath, first_author):
    localbook = db.session.query(
        db.Books).filter(db.Books.id == book_id).first()
    path = os.path.join(calibrepath, localbook.path)

    authordir = localbook.path.split('/')[0]
    if first_author:
        new_authordir = get_valid_filename(first_author)
    else:
        new_authordir = get_valid_filename(localbook.authors[0].name)

    titledir = localbook.path.split('/')[1]
    new_titledir = get_valid_filename(
        localbook.title) + " (" + str(book_id) + ")"

    if titledir != new_titledir:
        new_title_path = os.path.join(os.path.dirname(path), new_titledir)
        try:
            if not os.path.exists(new_title_path):
                os.renames(path, new_title_path)
            else:
                log.info("Copying title: %s into existing: %s", path,
                         new_title_path)
                for dir_name, __, file_list in os.walk(path):
                    for file in file_list:
                        os.renames(
                            os.path.join(dir_name, file),
                            os.path.join(new_title_path + dir_name[len(path):],
                                         file))
            path = new_title_path
            localbook.path = localbook.path.split('/')[0] + '/' + new_titledir
        except OSError as ex:
            log.error("Rename title from: %s to %s: %s", path, new_title_path,
                      ex)
            log.debug(ex, exc_info=True)
            return _(
                "Rename title from: '%(src)s' to '%(dest)s' failed with error: %(error)s",
                src=path,
                dest=new_title_path,
                error=str(ex))
    if authordir != new_authordir:
        new_author_path = os.path.join(calibrepath, new_authordir,
                                       os.path.basename(path))
        try:
            os.renames(path, new_author_path)
            localbook.path = new_authordir + '/' + localbook.path.split('/')[1]
        except OSError as ex:
            log.error("Rename author from: %s to %s: %s", path,
                      new_author_path, ex)
            log.debug(ex, exc_info=True)
            return _(
                "Rename author from: '%(src)s' to '%(dest)s' failed with error: %(error)s",
                src=path,
                dest=new_author_path,
                error=str(ex))
    # Rename all files from old names to new names
    if authordir != new_authordir or titledir != new_titledir:
        try:
            new_name = get_valid_filename(
                localbook.title) + ' - ' + get_valid_filename(new_authordir)
            path_name = os.path.join(calibrepath, new_authordir,
                                     os.path.basename(path))
            for file_format in localbook.data:
                os.renames(
                    os.path.join(
                        path_name,
                        file_format.name + '.' + file_format.format.lower()),
                    os.path.join(path_name,
                                 new_name + '.' + file_format.format.lower()))
                file_format.name = new_name
        except OSError as ex:
            log.error("Rename file in path %s to %s: %s", path, new_name, ex)
            log.debug(ex, exc_info=True)
            return _(
                "Rename file in path '%(src)s' to '%(dest)s' failed with error: %(error)s",
                src=path,
                dest=new_name,
                error=str(ex))
    return False
Esempio n. 60
0
# 改名脚本:UI提供的图都带有@2x,@3x字样,无法放入android资源文件夹中。
# 脚本的作用就是去除非法字符并分类
multiple1 = "@1x"
multiple1dirName = "1倍_m"
multiple2 = "@2x"
multiple2dirName = "2倍_x"
multiple3 = "@3x"
multiple3dirName = "3倍_xx"

for fileName in os.listdir(sys.path[0]):
    if fileName.find(multiple2) >= 0:
        if os.path.exists(multiple2dirName) == False:
            os.mkdir(multiple2dirName)
        newFileName = fileName.replace(multiple2, "")
        os.renames(fileName, newFileName)
        shutil.move(newFileName, multiple2dirName + "/" + newFileName)

    elif fileName.find(multiple3) >= 0:
        if os.path.exists(multiple3dirName) == False:
            os.mkdir(multiple3dirName)
        newFileName = fileName.replace(multiple3, "")
        os.renames(fileName, newFileName)
        shutil.move(newFileName, multiple3dirName + "/" + newFileName)

    else:
        # 不带倍数,即为1倍
        if fileName.find("AutoRename") < 0:
            if os.path.exists(multiple1dirName) == False:
                os.mkdir(multiple1dirName)
            shutil.move(fileName, multiple1dirName + "/" + fileName)