def get_recipe_names(recipe_dir, search_tags=[]):
    """
    return a list of all the .txt files in a directory without the extension
     this list will represent all the available recipes to the user
    :param recipe_dir: which directory to search
    :return: a list of strings. each is the name of a txt file in recipe_dir
    """
    recipe_names=[]

    if search_tags == []:
        for file in glob.glob(recipe_dir+"/*.txt"):
            head, file = ntpath.split(file)
            file = re.sub('.txt', '', file)
            recipe_names.append(file)
    else:
        for file in glob.glob(recipe_dir+"/*.txt"):
            head, file = ntpath.split(file)
            file = re.sub('.txt', '', file)
            file_tags = get_tags_from_recipe_file(recipe_dir+"/"+file+".txt")
            if(set(search_tags).issubset(file_tags)):
                recipe_names.append(file)

    if recipe_names == []:
        for file in glob.glob(recipe_dir + "/*.txt"):
            head, file = ntpath.split(file)
            file = re.sub('.txt', '', file)
            recipe_names.append(file)

    return recipe_names
    def build_paths(tiff_base, out, extents, work):
        """Provides the processing paths and makes the output directories"""

        base = os.path.join(out, '_'.join(extents), tiff_base[9:13])
        phs = {'SR': (os.path.join(work, tiff_base + '.vrt'),
                      os.path.join(work, tiff_base + '_sr.tif'),
                      os.path.join(base, 'SR', tiff_base + '_sr.tif')),
               'NBR': (os.path.join(work, tiff_base + '_sr_nbr.tif'),
                       os.path.join(base, 'NBR', tiff_base + '_sr_nbr.tif')),
               'NDVI': (os.path.join(work, tiff_base + '_sr_ndvi.tif'),
                        os.path.join(base, 'NDVI', tiff_base + '_sr_ndvi.tif')),
               'CFMASK': (os.path.join(work, tiff_base + '_cfmask.tif'),
                          os.path.join(base, 'CFMASK', tiff_base + '_cfmask.tif')),
               'XML': (os.path.join(work, tiff_base + '.xml'),
                       os.path.join(base, 'XML', tiff_base + '.xml')),
               'BNB': (os.path.join(base, 'SR', tiff_base + '_sr.tif'),
                       os.path.join(base, 'BNB', tiff_base + '_bnb.tif'))}
        for key in phs:
            if key == 'SR':
                base, file = ntpath.split(phs[key][2])
            else:
                base, file = ntpath.split(phs[key][1])
            if not os.path.exists(base):
                os.makedirs(base)

        return phs
    def _uploadImages(self, qemu_vm):
        """
        Upload hard drive images to Cloud Files.
        """

        # Start uploading the image to cloud files
        self._upload_image_progress_dialog = QtGui.QProgressDialog(
            "Uploading image file(s)", "Cancel", 0, 0, parent=self)
        self._upload_image_progress_dialog.setWindowModality(QtCore.Qt.WindowModal)
        self._upload_image_progress_dialog.setWindowTitle("Qemu image upload")
        self._upload_image_progress_dialog.show()

        try:
            uploads = []
            src = qemu_vm.get("hda_disk_image", None)
            if src:
                _, filename = ntpath.split(src)
                dst = "images/qemu/{}".format(filename)
                uploads.append((src, dst))

            src = qemu_vm.get("hdb_disk_image", None)
            if src:
                _, filename = ntpath.split(src)
                dst = "images/qemu/{}".format(filename)
                uploads.append((src, dst))

            upload_thread = UploadFilesThread(self, MainWindow.instance().cloudSettings(), uploads)
            upload_thread.completed.connect(self._imageUploadComplete)
            upload_thread.start()
        except Exception as e:
            self._upload_image_progress_dialog.reject()
            import logging
            log = logging.getLogger(__name__)
            log.error(e)
            QtGui.QMessageBox.critical(self, "Qemu image upload", "Error uploading Qemu image: {}".format(e))
Example #4
0
def renames(old, new):
    """renames(old, new)

    Super-rename; create directories as necessary and delete any left
    empty.  Works like rename, except creation of any intermediate
    directories needed to make the new pathname good is attempted
    first.  After the rename, directories corresponding to rightmost
    path segments of the old name will be pruned way until either the
    whole path is consumed or a nonempty directory is found.

    Note: this function can fail with the new directory structure made
    if you lack permissions needed to unlink the leaf directory or
    file.

    """
    head, tail = path.split(new)
    if head and tail and not path.exists(head):
        makedirs(head)
    rename(old, new)
    head, tail = path.split(old)
    if head and tail:
        try:
            removedirs(head)
        except error:
            pass
Example #5
0
    def run_files(self, files):
        """
        puts a file in the VM and then runs it
        :param files:
        :return:
        """
        self.put_files(files)

        filename = ''
        remote_path = ''
        if not isinstance(files, (list, tuple)):
            head, tail = ntpath.split(files)
            filename = tail or ntpath.basename(head)
            remote_path = "~/scripts/" + filename
        else:
            for f in files:
                head, tail = ntpath.split(f)
                short_fname = (tail or ntpath.basename(head))
                filename += short_fname + ' '
                remote_path += "~/scripts/"+short_fname+"; "
        #generate the command that runs the desired scripts
        command = 'chmod +x %s; ' \
                  'mkdir -p scripts;' \
                  'mv %s ~/scripts/ 2>/dev/null;' \
                  '%s'\
                  % (filename, filename, remote_path)
        return self.run_command(command)
Example #6
0
def split_path_name(path):
#     print(ntpath.split(path))
    head, tail = ntpath.split(path)
    if tail == '':
        return ntpath.split(head)
    else:
        return head, tail
Example #7
0
 def do_plotfluxnet(self,mode="standard"):
     """ Plot FluxNet style time series of data."""
     self.do_progress(text='Doing FluxNet plots ...')
     if mode=="standard":
         stdname = "controlfiles/standard/fluxnet.txt"
         if os.path.exists(stdname):
             cf = qcio.get_controlfilecontents(stdname)
             filename = qcio.get_filename_dialog(path='../Sites',title='Choose a netCDF file')
             if len(filename)==0 or not os.path.exists(filename):
                 self.do_progress(text='Waiting for input ...'); return
             if "Files" not in dir(cf): cf["Files"] = {}
             cf["Files"]["file_path"] = ntpath.split(filename)[0]+"/"
             cf["Files"]["in_filename"] = ntpath.split(filename)[1]
         else:
             self.do_progress(text='Loading control file ...')
             cf = qcio.load_controlfile(path='controlfiles')
             if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
     else:
         self.do_progress(text='Loading control file ...')
         cf = qcio.load_controlfile(path='controlfiles')
         if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
     self.do_progress(text='Plotting FluxNet style plots ...')
     qcplot.plot_fluxnet(cf)
     self.do_progress(text='Finished FluxNet plotting')
     logging.info(' Finished FluxNet plotting')
Example #8
0
 def do_plotfingerprint(self,mode="standard"):
     """ Plot fingerprint"""
     logging.info(' Starting fingerprint plot')
     self.do_progress(text='Doing fingerprint plot ...')
     if mode=="standard":
         stdname = "controlfiles/standard/fingerprint.txt"
         if os.path.exists(stdname):
             cf = qcio.get_controlfilecontents(stdname)
             filename = qcio.get_filename_dialog(path='../Sites',title='Choose a netCDF file')
             if len(filename)==0 or not os.path.exists(filename):
                 self.do_progress(text='Waiting for input ...'); return
             if "Files" not in dir(cf): cf["Files"] = {}
             cf["Files"]["file_path"] = ntpath.split(filename)[0]+"/"
             cf["Files"]["in_filename"] = ntpath.split(filename)[1]
         else:
             self.do_progress(text='Loading control file ...')
             cf = qcio.load_controlfile(path='controlfiles')
             if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
     else:
         self.do_progress(text='Loading control file ...')
         cf = qcio.load_controlfile(path='controlfiles')
         if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
     if "Options" not in cf: cf["Options"]={}
     cf["Options"]["call_mode"] = "interactive"
     self.do_progress(text='Plotting fingerprint ...')
     qcplot.plot_fingerprint(cf)
     self.do_progress(text='Finished plotting fingerprint')
     logging.info(' Finished plotting fingerprint')
     logging.info("")
Example #9
0
 def do_climatology(self,mode="standard"):
     """
     Calls qcclim.climatology
     """
     logging.info(' Starting climatology')
     self.do_progress(text='Doing climatology ...')
     if mode=="standard":
         stdname = "controlfiles/standard/climatology.txt"
         if os.path.exists(stdname):
             cf = qcio.get_controlfilecontents(stdname)
             self.do_progress(text='Opening input file ...')
             filename = qcio.get_filename_dialog(path='../Sites',title='Choose a netCDF file')
             if len(filename)==0:
                 logging.info( " Climatology: no input file chosen")
                 self.do_progress(text='Waiting for input ...')
                 return
             if "Files" not in dir(cf): cf["Files"] = {}
             cf["Files"]["file_path"] = ntpath.split(filename)[0]+"/"
             cf["Files"]["in_filename"] = ntpath.split(filename)[1]
         else:
             self.do_progress(text='Loading control file ...')
             cf = qcio.load_controlfile(path='controlfiles')
             if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
     else:
         self.do_progress(text='Loading control file ...')
         cf = qcio.load_controlfile(path='controlfiles')
         if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
     self.do_progress(text='Doing the climatology')
     qcclim.climatology(cf)
     self.do_progress(text='Finished climatology')
     logging.info(' Finished climatology')
     logging.info("")
Example #10
0
def list_children_in_folder(path, isFolderSearch, client):
	item_list = []

	try:
		folder_metadata = client.metadata(path)
	except:
		print ('The Directory does not exist')
		sys.exit(-1)
	
	if folder_metadata['is_dir'] == "False":
		print ('This is not a Directory')
		sys.exit(1)

	for I in folder_metadata['contents']:
		if isFolderSearch:
			if I['is_dir'] == True:
				ntpath.basename(I['path'])
				head, tail = ntpath.split(I['path'])
				folder_info = { "folder_name" : decode_string(tail),
								"folder_path" : decode_string(I["path"])
							}
				item_list.append(folder_info)
		else:
			ntpath.basename(I['path'])
			head, tail = ntpath.split(I['path'])
			if I['is_dir'] == False:
				if tail.endswith('.pdf'):
					item_list.append(tail)

	return item_list
Example #11
0
    def update_nis(self):
        
        webapps_dir = ntpath.split(self.oldpath)[0]
        TOMCAT_HOME = ntpath.split(webapps_dir)[0]
        
        self.info("开始备份nis系统...")
        self.zip_dir(self.oldpath, r"%s/nis-backup %s.zip" % (TOMCAT_HOME, time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time()))))
        self.info("nis系统备份完成")
        try:
            if self.replace_all == True:
                self.info("更新模式为直接替换整个文件夹...")
                
                self.info("清空目录%s" % self.oldpath)
                shutil.rmtree(self.oldpath, True, None)
                self.info("目录%s清空完成" % self.oldpath)
                
                self.info("开始将新文件%s拷贝到%s下" % (self.newpath, self.oldpath))
                shutil.copytree(self.newpath, self.oldpath, True)
                self.info("文件拷贝完成!")
                
                self.info("开始清空work目录...")
                shutil.rmtree(r"%s/work" % TOMCAT_HOME, True, None)
                self.info("work目录清空成功,NIS更新结束!")
                
                return 
            else:
                self.info("更新模式为单文件分析...")
                self.info("开始分析文件目录结构...")
                alllist = walk_list(self.oldpath, self.newpath)
                
                self.info("开始更新...")
                self.procee_walklist(alllist)
                self.info("常规更新结束")
                
                self.process_force_delete_update_direcory()
                
                self.info("开始分析orcus_web.xml...")
                self.merge_orcusweb_xml()
#                 self.info("orcus_web.xml更新结束")
                self.info("orcus_web.xml更新提醒结束")
                
                self.info("开始分析diagnosis.xml...")
                self.merge_diagnosis_xml()
                self.info("diagnosis.xml更新提醒结束")
                
                self.info("开始清空work目录...")
                shutil.rmtree(r"%s/work" % TOMCAT_HOME, True, None)
                self.info("work目录清空成功!")
                
                self.info("NIS系统更新结束")
                return
            
        except Exception as ex:
            self.error("系统更新出错:%s" % ex)
        finally:
            self.line_finished()
Example #12
0
def canonalize_path(path):
    """Convert a win32 path to unix path."""
    head, tail = ntpath.split(path)
    lst = [tail]
    while head and tail:
        head, tail = ntpath.split(head)
        lst.insert(0, tail)
    lst.insert(0, head)

    return posixpath.join(*lst)
def sep_file_and_path(path):

    """Handle paths with ending slash
    :type path: str
    :param path: the path to separate
    :return: filename and path tuple
    :rtype: tuple
    """
    head, tail = ntpath.split(path)
    return head, tail or ntpath.split(head)
Example #14
0
def findPackageName(absolutePathOfCommandScript):
	
	"""Finds the package name of the software package in which the command script resides.
	Returns the package Name."""

	head, tail = ntpath.split(absolutePathOfCommandScript)
	head1, tail1 = ntpath.split(head)
	head2, tail2 = ntpath.split(head1)
	

	return tail2
Example #15
0
def get_dbnames():
    logger.debug("getting dblist")
    dbfiles = sorted(glob.glob(u'../tlogx/data/*.db'))
    if dbfiles == None or len(dbfiles) == 0:
        dbfiles = sorted(glob.glob(u'*.db'))
    # search for a devices file that start with 28
    if dbfiles == []:
        logger.critical(u'no databases found')
        return
    path = ntpath.split(dbfiles[0])[0]
    dbnames = []
    for dbfile in dbfiles:
        dbnames.append(ntpath.split(dbfile)[1])
    return path, dbnames
Example #16
0
def findRelativeFolder(rootDir, file):
    relativeDir = ntpath.split(file)[0]

    if(relativeDir.endswith(rootDir)):
        return relativeDir+"\\"+ntpath.basename(file)
    else:
        while not relativeDir.endswith(rootDir):
            relativeDirTuple = ntpath.split(relativeDir)
            relativeDir = relativeDirTuple[0]
            folderName = relativeDirTuple[1]
        try:
            return relativeDir+"\\"+folderName
        except UnboundLocalError:
            return relativeDir+"\\"+ntpath.basename(file) #If file is not in any folder
Example #17
0
def get_filename_from_path(path):
    """Cross-platform filename extraction from path.
    @param path: file path.
    @return: filename.
    """
    dirpath, filename = ntpath.split(path)
    return filename if filename else ntpath.basename(dirpath)
Example #18
0
    def get_pixeldata_head_root(texture_filename):
        r"""Transform NiSourceTexture.file_name into something workable.

        >>> SpellExportPixelData.get_pixeldata_head_root("test.tga")
        ('', 'test')
        >>> SpellExportPixelData.get_pixeldata_head_root(r"textures\test.tga")
        ('textures', 'test')
        >>> SpellExportPixelData.get_pixeldata_head_root(
        ...     r"Z:\Bully\Temp\Export\Textures\Clothing\P_Pants1\P_Pants1_d.tga")
        ('z:/bully/temp/export/textures/clothing/p_pants1', 'p_pants1_d')
        """
        # note: have to use ntpath here so we can split correctly
        # nif convention always uses windows style paths
        head, tail = ntpath.split(texture_filename)
        root, ext = ntpath.splitext(tail)
        # for linux: make paths case insensitive by converting to lower case
        head = head.lower()
        root = root.lower()
        # XXX following is disabled because not all textures in Bully
        # XXX actually have this form; use "-a textures" for this game
        # make relative path for Bully SE
        #tmp1, tmp2, tmp3 = head.partition("\\bully\\temp\\export\\")
        #if tmp2:
        #    head = tmp3
        # for linux: convert backslash to forward slash
        head = head.replace("\\", "/")
        return (head, root) if root else ("", "image")
def convertDirMP3ToWav(dirName, Fs, nC, useMp3TagsAsName = False):
	'''
	This function converts the MP3 files stored in a folder to WAV. If required, the output names of the WAV files are based on MP3 tags, otherwise the same names are used.
	ARGUMENTS:
	 - dirName:		the path of the folder where the MP3s are stored
	 - Fs:			the sampling rate of the generated WAV files
	 - nC:			the number of channesl of the generated WAV files
	 - useMp3TagsAsName: 	True if the WAV filename is generated on MP3 tags
	'''

	types = (dirName+os.sep+'*.mp3',) # the tuple of file types
	filesToProcess = []

	tag = eyeD3.Tag()	

	for files in types:
		filesToProcess.extend(glob.glob(files))		

	for f in filesToProcess:
		tag.link(f)
		if useMp3TagsAsName:
			artist = tag.getArtist()
			title = tag.getTitle()
			if len(title)>0 and len(artist)>0:
				wavFileName = ntpath.split(f)[0] + os.sep + artist.replace(","," ") + " --- " + title.replace(","," ") + ".wav"
			else:
				wavFileName = f.replace(".mp3",".wav")	
		else:
			wavFileName = f.replace(".mp3",".wav")		
		command = "avconv -i \"" + f + "\" -ar " +str(Fs) + " -ac " + str(nC) + " \"" + wavFileName + "\"";
		print command
		os.system(command.decode('unicode_escape').encode('ascii','ignore').replace("\0",""))
 def check_filename(data):
     report, file_obj = data
     source_file_path, f_name = ntpath.split(file_obj.filepath)
     if f_name == source_file_name:
         return True
     else:
         return False
Example #21
0
def secure_store(requests_files, user, form_file_name):
    # check if the post request has the file part
    if form_file_name not in requests_files:
        # User gave no file.  That's fine.
        return False
    
    else:
        file = request.files[form_file_name]
        # if user does not select file, browser also
        # submit a empty part without filename
        if file.filename == '':
            # User gave no file.  That's fine.
            return False
        
        if file and allowed_file(file.filename):
            old_name, extension = os.path.splitext(file.filename)
            new_filename = "{fname}_{lname}_{email}_{filetype}{ext}".format(
                fname=user.first_name,
                lname=user.last_name,
                email=user.email,
                filetype=form_file_name,
                ext=extension)
            new_filename = ntpath.split(new_filename)[-1]  # in case the user tries to manipulate the path
            new_filename = new_filename.replace(' ', '')  # Replace any unintentional spaces
            full_file_path = os.path.join(app.config['UPLOAD_FOLDER'], new_filename)
            print("Resume uploaded - " + full_file_path)
            file.save(full_file_path)
            return {
                "action": "uploaded",
                "filename": full_file_path
            }
Example #22
0
 def getExtBugDb(self,bug_db):
   
     if not bug_db is None:
         return bug_db
     
     #Following is for D4j
     _ , project_name = ntpath.split(self.project)
     #print ">>>>>> " , project_name
      
     sha_list = {}
     
     config_szz  = self.configInfo.cfg.ConfigSectionMap('Szz')
     d4_sha_file = config_szz['snapshot_sha_file']
     
     with open(d4_sha_file, 'r') as csvfile:
         csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
         csvreader.next()
         for row in csvreader:
             bug_no,buggy_sha,bugfix_sha,project = row[:]       
               
             if project.strip("\"") == project:
                 #print bug_no,buggy_sha,bugfix_sha,project
                 bugfix_sha = bugfix_sha.strip("\"")
                 sha_list[(project,bugfix_sha)] = 'Bug'
     
     return sha_list
Example #23
0
def create_copy(file_path):
    length_pattern = re.compile(' \([^)]*\) ')
    sql_file = open(file_path)
    head,tail = ntpath.split(file_path)
    table_name = tail.upper()[0:tail.find('.')]
    copy = open(r'd:\vssflile\\' + table_name + 'MAST','wb')
    #copy.write(' '*7 + '01 ' + table_name + '-RECORD.\n')
    for line in sql_file.readlines():
        if not('CHAR' in line or 'NUMBER' in line):
            continue
        for i in range(1,len(line)):
            if line[i] == ' ':
                variable = ' '*11 + '03 ' + table_name + '-' + line[1:i].upper().replace('_','-') + ' PIC '
                variable_length =  re.findall(length_pattern,line[1:])[0].replace('(','').replace(')','').strip()
                if 'CHAR' in line:
                    variable += 'X(' + str(variable_length) + ').\n'
                else:
                    comma_index = variable_length.find(',')
                    decimal_lenght = int(variable_length[comma_index+1:]) if comma_index>0 else 0
                    comma_index = len(variable_length) if comma_index < 0 else comma_index
                    integer_length = int(variable_length[0:comma_index]) - decimal_lenght
                    variable += '9(' + str(integer_length) + ')V9(' + str(decimal_lenght) + ') COMP-3.\n'
                copy.write(variable)
                break
    sql_file.close()
    copy.close()
Example #24
0
    def allocate_next_uncategorized(self):
        """Allocate next uncategorized document name and rename self into it

        This function only should be called upon storing a file and being certain in it.
        It allocates a next free code for setting it into filename, also adding self.options variable
        'uncategorized_filename' with original stored filename (e.g.: for indexes storage)"""
        if self.get_docrule().uncategorized:
            # Forcing filesystem name for file
            document_file = self.get_file_obj()
            original_name = document_file.name
            code = self.get_docrule().allocate_barcode()
            # In case we have a path instead of a filename with extension
            if os.path.sep in original_name:
                head, tail = ntpath.split(original_name)
                original_name = tail or ntpath.basename(head)
            original_code, extension = os.path.splitext(original_name)
            new_name = code + extension
            if document_file:
                self.update_options({'uncategorized_filename': original_name})
            if not self.mimetype:
                self.get_mimetype()
            if 'barcode' in self.options:
                self.set_option('barcode', code)
            self.set_filename(new_name)
            self.set_full_filename(new_name)
        return self
Example #25
0
def prepare_step_paths(datastore, tempdir):
    current_path, current_file = ntpath.split(os.path.abspath(__file__))
    shutil.copytree(os.path.join(current_path, 'steps'), os.path.join(tempdir, 'steps'))
    local_step_path = '%s/steps/' % tempdir
    s3_step_path = datastore.data.s3_artifacts_path + '/steps'
    s3_temp_path = datastore.data.s3_artifacts_path + '/_temp'
    return local_step_path, s3_step_path, s3_temp_path
def kill_path(path):
    """
    :param path:
    :return: file without path
    """
    head, tail = ntpath.split(path)
    return tail or ntpath.basename(head)
  def _SmbclientRemoteCopy(self, local_path, remote_path,
                           copy_to, network_drive):
    """Copies a file to or from the VM using smbclient.

    Args:
      local_path: Local path to file.
      remote_path: Optional path of where to copy file on remote host.
      copy_to: True to copy to vm, False to copy from vm.
      network_drive: The smb specification for the remote drive
          (//{ip_address}/{share_name}).

    Raises:
      RemoteCommandError: If there was a problem copying the file.
    """
    local_directory, local_file = os.path.split(local_path)
    remote_directory, remote_file = ntpath.split(remote_path)

    smb_command = 'cd %s; lcd %s; ' % (remote_directory, local_directory)
    if copy_to:
      smb_command += 'put %s %s' % (local_file, remote_file)
    else:
      smb_command += 'get %s %s' % (remote_file, local_file)
    smb_copy = [
        'smbclient', network_drive,
        '--max-protocol', 'SMB3',
        '--user', '%s%%%s' % (self.user_name, self.password),
        '--port', str(self.smb_port),
        '--command', smb_command
    ]
    stdout, stderr, retcode = vm_util.IssueCommand(smb_copy)
    if retcode:
      error_text = ('Got non-zero return code (%s) executing %s\n'
                    'STDOUT: %sSTDERR: %s' %
                    (retcode, smb_copy, stdout, stderr))
      raise errors.VirtualMachine.RemoteCommandError(error_text)
Example #28
0
 def load_project(self, filename):
     if filename and os.path.exists(filename):
         projdir, _ = ntpath.split(filename)
         self.dir = os.path.abspath(projdir)
         self.project_dir = os.path.abspath(projdir)
         self.project = MLNProject.open(filename)
         self.project.addlistener(self.project_setdirty)
         self.reset_gui()
         self.set_config(self.project.queryconf.config)
         self.mln_container.update_file_choices()
         self.db_container.update_file_choices()
         if len(self.project.mlns) > 0:
             self.mln_container.selected_file.set(self.project.queryconf['mln'] or list(self.project.mlns.keys())[0])
         self.mln_container.dirty = False
         if len(self.project.emlns) > 0:
             self.emln_container.selected_file.set(self.project.queryconf['emln'] or list(self.project.emlns.keys())[0])
         self.emln_container.dirty = False
         if len(self.project.dbs) > 0:
             self.db_container.selected_file.set(self.project.queryconf['db'] or list(self.project.dbs.keys())[0])
         self.db_container.dirty = False
         self.write_gconfig(savegeometry=False)
         self.settings_dirty.set(0)
         self.project_setdirty(dirty=False)
         self.changewindowtitle()
     else:
         logger.error('File {} does not exist. Creating new project...'.format(filename))
         self.new_project()
def convertFsDirWavToWav(dirName, Fs, nC):
	'''
	This function converts the WAV files stored in a folder to WAV using a different sampling freq and number of channels.
	ARGUMENTS:
	 - dirName:		the path of the folder where the WAVs are stored
	 - Fs:			the sampling rate of the generated WAV files
	 - nC:			the number of channesl of the generated WAV files
	'''

	types = (dirName+os.sep+'*.wav',) # the tuple of file types
	filesToProcess = []

	for files in types:
		filesToProcess.extend(glob.glob(files))		

	newDir = dirName + os.sep + "Fs" + str(Fs) + "_" + "NC"+str(nC)
	if os.path.exists(newDir) and newDir!=".":
		shutil.rmtree(newDir)	
	os.makedirs(newDir)	

	for f in filesToProcess:	
		_, wavFileName = ntpath.split(f)	
		command = "avconv -i \"" + f + "\" -ar " +str(Fs) + " -ac " + str(nC) + " \"" + newDir + os.sep + wavFileName + "\"";
		print command
		os.system(command)
Example #30
0
def get_directory(directory, extractFiles):

    directory = directory.strip()
    
    #~ if directory.endswith('.zip'):
        #~ archive = zipfile.ZipFile(directory)
        #~ for elem in archive.namelist():
            #~ if elem.endswith('.info'):
                #~ (root,elem) = os.path.split(elem)
                #~ filelist = IndexFile(root,elem,archive)
    if not os.path.isdir(directory) and is_recognized_repository_filetype(directory):
        head, tail = ntpath.split(directory[:directory.find('.t')])
        dirname = head + os.sep + genericsettings.extraction_folder_prefix + tail
        # extract only if extracted folder does not exist yet or if it was
        # extracted earlier than last change of archive:
        if (extractFiles):        
            if ((not os.path.exists(dirname))
                    or (os.path.getmtime(dirname) < os.path.getmtime(directory))): 
                tarfile.TarFile.open(directory).extractall(dirname)
                # TarFile.open handles tar.gz/tgz
                print '    archive extracted to folder', dirname, '...'
        directory = dirname
            # archive = tarfile.TarFile(directory)
            # for elem in archivefile.namelist():
            #    ~ if elem.endswith('.info'):
            #        ~ (root,elem) = os.path.split(elem)
            #        ~ filelist = IndexFile(root,elem,archive)
    
    return directory
Example #31
0
total = 0
batch_index = 0
for batch in batches:
    print('getting predictions on batch ' + str(batch_index))
    images = []
    for image_file in batch:
        img = image.load_img(image_file, target_size=(image_size, image_size))
        img = image.img_to_array(img)
        img = img.astype('float32')
        img /= 255.0
        img = img.reshape(image_size, image_size, 3)
        images.append(img)
    result = model.predict(np.array(images), batch_size=len(batch))
    total += len(batch)
    batch_index += 1
    print('prediction done on batch ' + str(batch_index) + ', total of ' +
          str(total) + ' images')
    image_index = 0
    for image_result in result:
        for result_index in range(len(image_result)):
            if image_result[result_index] >= 0.9:
                head, tail = ntpath.split(batch[image_index])
                shutil.move(
                    batch[image_index],
                    os.path.join(args.output_dir, labels[result_index], tail))
                break
        image_index += 1

    print('finished moving images to category destinations')
print(str(total))
def saveColorBar(bar):
    head, tail = ntpath.split(args["image"])
    parts = tail.split(".")
    output = "{}/{}-colorbar.jpg".format(head, parts[0])
    print("Color bar stored in %s" % output)
    cv2.imwrite(output, bar)
Example #33
0
 def path_leaf(self, path):
     self.path = path
     self.head, self.tail = ntpath.split(self.path)
     return self.tail or ntpath.basename(self.head)
Example #34
0
def file_from_path(path):
    head, tail = split(path)
    return tail
Example #35
0
def get_movie_name(file):
    name = ntpath.split(file)[1]
    name = os.path.splitext(ntpath.split(name)[1])[0]
    return name
 def _getFileNameFromPath(self, path):
     head, tail = ntpath.split(path)
     return tail or ntpath.basename(head)
Example #37
0
# PARSE FILENAME (TAKES FULL PATH AND CURRENT DIR)

filename = os.fspath(sys.argv[1])
print(f"[!] Generating {arch} shellcode from file:\n    {filename}")
files = [f for f in os.listdir('.') if os.path.isfile(f)]

if filename not in files:
    sc = donut.create(file=filename, arch=donut_id)
    if sc is None:
        print(f"\n[-] Cannot find file:\n    {filename}\n\
    Ensure that file is either in current directory or full path is specified!"
              )
        sys.exit(1)
else:
    sc = donut.create(file=filename, arch=donut_id)

# WRITE SHELLCODE
head, tail = ntpath.split(filename)
sc_filename = tail.split('.')[0] + '{}'.format(arch) + '.bin'
sc_filepath = os.path.join(os.getcwd(), sc_filename)
filesc = open(sc_filepath, 'wb')
length = filesc.write(sc)
filesc.close()
print(f"[+] Shellcode of {length} bytes written in:\n    {sc_filepath}")

# WRITE BASE64 VERSION
fileb = open(sc_filepath + '.b64', 'w')
fileb.write(b64e(sc).decode())
fileb.close()
print(f"[+] Base64 version is written to:\n    {sc_filepath}.b64")
Example #38
0
def path_leaf(path):
    head, tail = ntpath.split(path)
    return tail or ntpath.basename(head)
Example #39
0
    def __init__(self):
        settings = Settings()
        self.trainingImageDir = dirname(settings.app_path + "\\nn_training_images\\"
        self.splitTrainingImageFolderName = 'split_training_images'
        self.splitTrainingImageDir = self.trainingImageDir + self.splitTrainingImageFolderName + "\\"

        self.checkAndMakeDir(self.trainingImageDir)
        self.checkAndMakeDir(self.splitTrainingImageDir)

        self.minSegmentArea = 600
        self.maxSegmentWidth = 0
        self.maxSegmentHeight = 0

    def checkAndMakeDir(self, directory):
        if not os.path.exists(directory):
            os.makedirs(directory)

    def getImageList(self):
        #@TODO: store images already converted, and filter them from this list
        trainingImageNames = os.listdir(self.trainingImageDir)
        trainingImageNames.remove(self.splitTrainingImageFolderName)
        return map(lambda trainingImageNames: self.trainingImageDir + trainingImageNames, trainingImageNames)

    def processImages(self, imageFilePaths, objectColor, threshold):
        processedImages = []
        for i, imageFilePath in zip(range(0, len(imageFilePaths) - 1), imageFilePaths):
            if i%100 == 0.0:
                print 'Chopped', i, 'images so far...'
            processedImages.append(self.processImage(imageFilePath, objectColor, threshold))
        return processedImages

    def processImage(self, imageFilePath, objectColor, threshold):
        if type(imageFilePath) == list:
            return self.processImages(imageFilePath, objectColor, threshold)
        img = cv2.imread(imageFilePath)
        head, tail = ntpath.split(imageFilePath)
        value, rand = tail.split('_')
        res = [0, 0]  # @TODO: return original source image resolution
        imgBig = self.scaleImageToHeight(img, 100)
        segments = self.segmentImage(imgBig, objectColor, threshold)
        for val, segment in zip(value, segments):
            #cv2.imshow('asd', numpy.asarray(segment[0]))
            #cv2.waitKey(0)
            #cv2.destroyAllWindows()

            cv2.imwrite(self.splitTrainingImageDir + val + '_' + rand + '.png', segment[0])
            s = segment[0]
            h = len(s)
            w = len(s[0])
            self.trackMaxSegmentSize(w, h)
        return [value, res, segments]

    def trackMaxSegmentSize(self, width, height):
        if width > self.maxSegmentWidth:
            self.maxSegmentWidth = width
        if height > self.maxSegmentHeight:
            self.maxSegmentHeight = height

    def segmentImage(self, img, objectColor, threshold):
        horizontalSlices = []
        verticalSlices = self.segmentImageVertical(img, objectColor, threshold)  #first we segment vertically
        for verticalSlice in verticalSlices:
            if len(verticalSlice):
                horizontalSlice = self.segmentImageHorizontal(verticalSlice, objectColor, threshold)  #then we segment those segments horizontally
                if len(horizontalSlice):
                    horizontalSlices.append(horizontalSlice)
        return horizontalSlices

    def scaleImageToHeight(self, img, newHeight):
        h = float(img.shape[0])
        w = float(img.shape[1])
        ratio = (newHeight / h)
        newWidth = int(round(w * ratio, 0))
        return cv2.resize(img, (newWidth, newHeight))

    def segmentImageHorizontal(self, img, objectColor, threshold):
        imgBig = img
        height = len(imgBig)
        width = len(imgBig[0])
        horizontalBounds = []
        topSide = -1
        matched = False
        for y in range(0, height - 1):
            for x in range(0, width - 1):
                if self.colorMatch(imgBig[y][x], objectColor, threshold):  #we found objectColor
                    matched = True
            if matched:
                if topSide < 0:  #we don't have a value for the topSide yet, so this must be the first horizontal
                    topSide = max(y - 1, 0)  #move one up (if we can) to capture any potential antialiasing we may have missed through threshold

            else:  #we're off objectColor
                if topSide >= 0:  #if we've already found the topSide, then this must be the bottom one
                    horizontalBounds.append([topSide, y])
                    topSide = -1
            matched = False

        #print 'image dimensions: ' + str(width) + 'x' + str(height), len(imgBig[0][0])
        imageSlices = []
        for horizontalBound in horizontalBounds:
            y = horizontalBound[0]
            h = horizontalBound[1] - y
            x = 0
            w = width
            if w * h > self.minSegmentArea:  #remove pesky commas/periods from the images
                imageSlices.append(numpy.array(imgBig[y:y + h, x:x + w]))
                #cv2.rectangle(imgBig, (x, y), (x + w, y + h), (0, 255, 0), 1)
        return numpy.array(imageSlices)

    def segmentImageVertical(self, img, objectColor, threshold):
        imgBig = img
        height = len(imgBig)
        width = len(imgBig[0])
        verticalBounds = []
        leftSide = -1
        matched = False

        for x in range(0, width - 1):
            for y in range(0, height - 1):
                if self.colorMatch(imgBig[y][x], objectColor, threshold):  #we found objectColor
                    matched = True
            if matched:
                if leftSide < 0:  #we don't have a value for the left side yet, so this must be the first vertical
                    leftSide = max(x - 1, 0)  #move one left (if we can) to capture any potential antialiasing we may have missed through threshold

            else:  #we are off objectColor
                if leftSide >= 0:  #if we've already found the left side, then this must be the right one
                    verticalBounds.append([leftSide, x + 1])
                    leftSide = -1
            matched = False

        #print 'image dimensions: ' + str(width) + 'x' + str(height), len(imgBig[0][0])
        imageSlices = []
        for verticalBound in verticalBounds:
            x = verticalBound[0]
            w = verticalBound[1] - x
            y = 0
            h = height
            if w * h > self.minSegmentArea:  #remove pesky commas/periods from the images
                imageSlices.append(numpy.array(imgBig[y:y + h, x:x + w]))
                #cv2.rectangle(imgBig, (x, y), (x + w, y + h), (0, 255, 0), 1)
        return imageSlices

    def colorMatch(self, color1, color2, threshold):
        dist = 0.0
        for c1, c2 in zip(color1, numpy.asarray(color2)):
            dist += pow((c1 - c2), 2)
        return pow(dist, 0.5) <= threshold


print 'Conversion of images beginning...'
train = nnTraining()
imgList = train.getImageList()
imgPaths = imgList  #[0:10]
#imgPaths = './nn_training_images/1462_1417536191-38695.png'
print 'Found', len(imgPaths), 'images to chop'
print 'Chopping...'
train.processImage(imgPaths, [0, 0, 0], 350)
print 'Chopping completed.'
print 'Max digit image dimensions:', train.maxSegmentWidth, train.maxSegmentHeight

cv2.waitKey(0)
Example #40
0
def filename_from_path(path):
    head, tail = ntpath.split(path)
    return tail or ntpath.basename(head)
Example #41
0
fig = plt.figure()
plt.imshow(img2, cmap='gray')
plt.show()

exit()

datasetname = 'triangles_64'
data_type = 'train'
paths_A = glob('../Data_sources/images/%s/%sA/*' % (datasetname, data_type))
paths_B = glob('../Data_sources/images/%s/%sB/*' % (datasetname, data_type))

# check integrety
items_A = set([])
items_B = set([])
for i in range(len(paths_A)):
    head, tail = ntpath.split(paths_A[i])
    items_A.add(tail)

for i in range(len(paths_B)):
    head, tail = ntpath.split(paths_B[i])
    items_B.add(tail)

# get difference
dif = items_A - items_B

print(len(dif))
#print(items_A.pop())
print('---------------------------------------')
print(paths_A[0])
head, tail = ntpath.split(paths_A[0])
print(tail, ntpath.basename(head))
def path_leaf(path):
  head, tail = ntpath.split(path)
  return tail
def GetExifData(lists, ignoreCameraModel, fileFullPath):

    photoData = PhotoData.Photo()

    exts = ("jpg", "jpeg")
    list = []
    header = []

    i = 0

    print("[" + fileFullPath + "]")

    for file in FindAllFiles(fileFullPath):

        root, ext = os.path.splitext(file)
        if ext[1:].lower() in exts:
            try:
                taginfo = photoData.GetExif(file)

                fpath, fname = ntpath.split(file)
                root, ext = os.path.splitext(file)

                loadData = False

                if taginfo != "[NO EXIF]" and str(taginfo["Model"]) != "":
                    if ignoreCameraModel == []: loadData = True
                    elif taginfo["Model"] in ignoreCameraModel:
                        loadData = False
                    else:
                        loadData = True

                    if loadData == True:
                        i += 1

                        if i == 1:
                            n = 0
                            for value in taginfo.keys():
                                n += 1
                                if n == 1: header.append("FileName")
                                else: pass
                                header.append(value)
                        else:
                            pass

                        n = 0
                        for value in taginfo.values():
                            n += 1
                            if n == 1: list.append(fname)
                            else: pass

                            list.append(value)

                        lists.append(list)
                        list = []
                    else:
                        pass
                else:
                    pass

            except AttributeError as err:
                print("*EXCEPTION:", err)
                print(" [" + file + "]")
        else:
            pass

    return (lists, header)
Example #44
0
def path_end(path):
  beginning,end = ntpath.split(path)
  return end #only end is needed 
Example #45
0
	def minify(self):
		inpfile = self.view.file_name()
		if type(inpfile).__name__ in ('str', 'unicode') and re.search(r'\.[^\.]+$', inpfile):
			if self.view.is_dirty() and self.get_setting('save_first'):
				self.view.run_command('save')
				if self.get_setting('auto_minify_on_save'):
					return
			outfile = re.sub(r'(\.[^\.]+)$', r'.min\1', inpfile, 1)
			syntax = self.view.settings().get('syntax')
			if self.get_setting('debug_mode'):
				print('Minify: Syntax: ' + str(syntax))
			if re.search(r'\.js$', inpfile) or re.search(r'/JavaScript\.tmLanguage$', syntax):
				cmd = self.fixStr(self.get_setting('uglifyjs_command') or 'uglifyjs').split()
				cmd.extend([self.quoteChrs(inpfile), '-o', self.quoteChrs(outfile), '-m', '-c'])
				eo = self.get_setting('uglifyjs_options')
				if type(eo).__name__ in ('str', 'unicode'):
					cmd.extend(self.fixStr(eo).split())
				if self.get_setting('source_map'):
					head, tail = ntpath.split(outfile)
					mapfile = tail or ntpath.basename(head)
					cmd.extend(['--source-map', self.quoteChrs(outfile) + '.map', '--source-map-url', self.quoteChrs(mapfile) + '.map', '--source-map-root', './', '-p', 'relative'])
				if self.get_setting('keep_comments'):
					cmd.extend(['--comments'])
					eo = self.get_setting('comments_to_keep')
					if type(eo).__name__ in ('str', 'unicode'):
						cmd.extend([eo])
			elif re.search(r'\.json$', inpfile) or re.search(r'/JSON\.tmLanguage$', syntax):
				cmd = self.fixStr(self.get_setting('minjson_command') or 'minjson').split()
				cmd.extend([self.quoteChrs(inpfile), '-o', self.quoteChrs(outfile)])
			elif re.search(r'\.css$', inpfile) or re.search(r'/CSS\.tmLanguage$', syntax):
				minifier = self.get_setting('cssminifier') or 'clean-css'
				if minifier == 'uglifycss':
					cmd = self.fixStr(self.get_setting('uglifycss_command') or 'uglifycss').split()
					eo = self.get_setting('uglifycss_options')
					if type(eo).__name__ in ('str', 'unicode'):
						cmd.extend(self.fixStr(eo).split())
					cmd.extend([self.quoteChrs(inpfile), '>', self.quoteChrs(outfile)])
				elif minifier == 'yui':
					cmd = self.fixStr(self.get_setting('java_command') or 'java').split()
					yui_compressor = self.get_setting('yui_compressor') or 'yuicompressor-2.4.7.jar'
					cmd.extend(['-jar', PLUGIN_DIR + '/bin/' + str(yui_compressor), self.quoteChrs(inpfile), '-o', self.quoteChrs(outfile)])
					eo = self.get_setting('yui_charset')
					if type(eo).__name__ in ('str', 'unicode'):
						cmd.extend(['--charset', eo])
					eo = self.get_setting('yui_line_break')
					if type(eo).__name__ in ('int', 'str', 'unicode'):
						cmd.extend(['--line-break', str(eo)])
				else:
					cmd = self.fixStr(self.get_setting('cleancss_command') or 'cleancss').split()
					eo = self.get_setting('cleancss_options') or '--s0 -s --skip-rebase'
					if type(eo).__name__ in ('str', 'unicode'):
						cmd.extend(self.fixStr(eo).split())
					if self.get_setting('css_source_map'):
						cmd.extend(['--source-map'])
					cmd.extend(['-o', self.quoteChrs(outfile), self.quoteChrs(inpfile)])
			elif re.search(r'\.html?$', inpfile) or re.search(r'/HTML\.tmLanguage$', syntax):
				cmd = self.fixStr(self.get_setting('html-minifier_command') or 'html-minifier').split()
				eo = self.get_setting('html-minifier_options') or '--collapse-boolean-attributes --collapse-whitespace --html5 --minify-css --minify-js --preserve-line-breaks --process-conditional-comments --remove-comments --remove-empty-attributes --remove-redundant-attributes --remove-script-type-attributes --remove-style-link-type-attributes'
				if type(eo).__name__ in ('str', 'unicode'):
					cmd.extend(self.fixStr(eo).split())
				cmd.extend(['-o', self.quoteChrs(outfile), self.quoteChrs(inpfile)])
			elif re.search(r'\.svg$', inpfile):
				cmd = self.fixStr(self.get_setting('svgo_command') or 'svgo').split()
				eo = self.get_setting('svgo_min_options')
				if type(eo).__name__ in ('str', 'unicode'):
					cmd.extend(self.fixStr(eo).split())
				cmd.extend([self.quoteChrs(inpfile), self.quoteChrs(outfile)])
			else:
				cmd = False
			if cmd:
				print('Minify: Minifying file:' + str(inpfile))
				self.run_cmd(cmd, outfile)
Example #46
0
 def extract_filename(self, path):
     import ntpath
     head, tail = ntpath.split(path)
     return tail or ntpath.basename(head)
Example #47
0
def check(check_data):
    """
    Invoke clang with an action which called by processes.
    Different analyzer object belongs to for each build action.

    skiplist handler is None if no skip file was configured.
    """
    args, action, context, analyzer_config_map, skp_handler, \
        report_output_dir, use_db = check_data

    skipped = False
    try:
        # If one analysis fails the check fails.
        return_codes = 0
        skipped = False
        for source in action.sources:

            # If there is no skiplist handler there was no skip list file
            # in the command line.
            # C++ file skipping is handled here.
            _, source_file_name = ntpath.split(source)

            if skp_handler and skp_handler.should_skip(source):
                LOG.debug_analyzer(source_file_name + ' is skipped')
                skipped = True
                continue

            # Construct analyzer env.
            analyzer_environment = analyzer_env.get_check_env(
                context.path_env_extra, context.ld_lib_path_extra)
            run_id = context.run_id

            rh = analyzer_types.construct_result_handler(
                args, action, run_id, report_output_dir, context.severity_map,
                skp_handler, progress_lock, use_db)

            # Create a source analyzer.
            source_analyzer = \
                analyzer_types.construct_analyzer(action,
                                                  analyzer_config_map)

            # Source is the currently analyzed source file
            # there can be more in one buildaction.
            source_analyzer.source_file = source

            # Fills up the result handler with the analyzer information.
            source_analyzer.analyze(rh, analyzer_environment)

            if rh.analyzer_returncode == 0:
                # Analysis was successful processing results.
                if rh.analyzer_stdout != '':
                    LOG.debug_analyzer('\n' + rh.analyzer_stdout)
                if rh.analyzer_stderr != '':
                    LOG.debug_analyzer('\n' + rh.analyzer_stderr)
                rh.postprocess_result()
                rh.handle_results()

                LOG.info("[%d/%d] %s analyzed %s successfully." %
                         (progress_checked_num.value, progress_actions.value,
                          action.analyzer_type, source_file_name))
            else:
                # Analysis failed.
                LOG.error('Analyzing ' + source_file_name + ' with ' +
                          action.analyzer_type + ' failed.')
                if rh.analyzer_stdout != '':
                    LOG.error(rh.analyzer_stdout)
                if rh.analyzer_stderr != '':
                    LOG.error(rh.analyzer_stderr)
                return_codes = rh.analyzer_returncode

            if not args.keep_tmp:
                rh.clean_results()

        progress_checked_num.value += 1

        return return_codes, skipped, action.analyzer_type

    except Exception as e:
        LOG.debug_analyzer(str(e))
        traceback.print_exc(file=sys.stdout)
        return 1, skipped, action.analyzer_type
Example #48
0
def path_leaf(path):
    # http://stackoverflow.com/questions/8384737/extract-file-name-
    #    from-path-no-matter-what-the-os-path-format
    head, tail = ntpath.split(path)
    return tail or ntpath.basename(head)
Example #49
0
 def split_path_last(self, path):
     # split path at last separator, the 'last' maybe a file or directory
     init, last = ntpath.split(path)
     return (init, last or ntpath.basename(init))
    def plot_data_from_file(csv_file_path, rti_config):
        """
        Generate plots from the CSV file selected.
        :param csv_file_path: CSV file to generate the plots.
        :param rti_config: RTI Config to get the settings.
        :return:
        """
        if os.path.exists(csv_file_path):
            # Read in the CSV data of the average data
            avg_df = pd.read_csv(csv_file_path)

            # Set the datetime column values as datetime values
            avg_df['datetime'] = pd.to_datetime(avg_df['datetime'])

            # Sort the data by date and time
            avg_df.sort_values(by=['datetime'], inplace=True)

            # Get the CSV file name without the extension and root dir
            head, tail = ntpath.split(csv_file_path)
            file_name_w_ext = tail or ntpath.basename(head)
            csv_file_name = os.path.splitext(file_name_w_ext)[0]

            wave_height_html_file = rti_config.config['AWC'][
                'output_dir'] + os.sep + "WaveHeight_" + csv_file_name + ".html"
            earth_east_vel_html_file = rti_config.config['AWC'][
                'output_dir'] + os.sep + "EarthVel_East_" + csv_file_name + ".html"
            earth_north_vel_html_file = rti_config.config['AWC'][
                'output_dir'] + os.sep + "EarthVel_North_" + csv_file_name + ".html"
            mag_html_file = rti_config.config['AWC'][
                'output_dir'] + os.sep + "Magnitude_" + csv_file_name + ".html"
            dir_html_file = rti_config.config['AWC'][
                'output_dir'] + os.sep + "Direction_" + csv_file_name + ".html"

            # Display the data
            thread_wave_height_display = PlotDataThread(
                rti_config, PlotDataThread.PLOT_TYPE_WAVE_HEIGHT,
                wave_height_html_file)
            thread_wave_height_display.start()

            thread_earth_east_display = PlotDataThread(
                rti_config, PlotDataThread.PLOT_TYPE_EARTH_EAST,
                earth_east_vel_html_file)
            thread_earth_east_display.start()

            thread_earth_north_display = PlotDataThread(
                rti_config, PlotDataThread.PLOT_TYPE_EARTH_NORTH,
                earth_north_vel_html_file)
            thread_earth_north_display.start()

            thread_mag_display = PlotDataThread(rti_config,
                                                PlotDataThread.PLOT_TYPE_MAG,
                                                mag_html_file)
            thread_mag_display.start()

            thread_dir_display = PlotDataThread(rti_config,
                                                PlotDataThread.PLOT_TYPE_DIR,
                                                dir_html_file)
            thread_dir_display.start()

            # Add the data to the plot threads
            thread_wave_height_display.add(avg_df)
            thread_earth_east_display.add(avg_df)
            thread_earth_north_display.add(avg_df)
            thread_mag_display.add(avg_df)
            thread_dir_display.add(avg_df)
Example #51
0
def get_filename(path):
    # cross plattform filename from a given path
    # source: http://stackoverflow.com/questions/8384737/python-extract-file-name-from-path-no-matter-what-the-os-path-format
    import ntpath
    head, tail = ntpath.split(path)
    return tail or ntpath.basename(head)
Example #52
0
def strip_path(path):
    head, tail = ntpath.split(path)
    return tail
Example #53
0
 def get_program_name(self):
     path = self.config.get("Miner", "program_location")
     head, tail = ntpath.split(path)
     return tail or ntpath.basename(head)
Example #54
0
def parse_path(path):
    file_path, file_name = ntpath.split(path)
    return file_name or ntpath.basename(file_path)
Example #55
0
def split_path(name):
    head, tail = ntpath.split(name)
    if not tail:
        tail = ntpath.basename(head)
    return head, tail
Example #56
0
def get_filename(path):
    head, tail = ntpath.split(path)
    return tail or ntpath.basename(head)
Example #57
0
def save_built_asset_descriptor(asset_path, descriptor):
    (head, _) = ntpath.split(asset_path)
    descriptor_file = os.path.join(
        head,
        path_leaf(asset_path.split('.')[0]) + '.toml')
    save_asset_descriptor(descriptor_file, descriptor)
Example #58
0
def files(ctx):
    """Display Terraform Project File Lists."""
    try:
        # Assign context objects
        log = ctx.obj.log
        files = ctx.obj.tf.files

        if not ctx.obj.verbose:
            click.clear()
        log.header("MagicDoc TF File Summary:")

        log.info("Invoking command magicdoc tf files.")
        log.info("Working with returned file object:")
        log.debug(json.dumps(files, indent=4, sort_keys=True))
        log.debug(' ')

        click.secho(
            "Terraform file search target directory location: {}".format(
                ctx.obj.workdir),
            fg='blue')
        click.secho(
            "  - {} terraform file(s) found in target directory.".format(
                len(files.get('list_tf_files', []))),
            fg='bright_blue')
        click.secho("  - {} tfvar file(s) found in target directory.".format(
            len(files.get('list_tfvar_files', []))),
                    fg='bright_blue')
        click.echo()

        # List TF Files:
        click.secho("Terraform .tf files:", fg='green')
        click.secho("====================", fg='green')
        for filename in files.get('list_tf_files', []):
            file_path, file_name = ntpath.split(filename)
            file_path = file_path.replace("/", "")
            log.debug("Using file path: {}".format(str(file_path)))
            log.debug("Using file name: {}".format(str(file_name)))
            if file_path != "":
                click.secho("{}/".format(file_path), fg='bright_red', nl=False)
            click.secho(file_name, fg='cyan')
        log.debug("Listing .tf file results completed!")
        click.echo()

        # List TFVar Files:
        click.secho("Terraform .tfvar files:", fg='green')
        click.secho("=======================", fg='green')
        for filename in files.get('list_tfvar_files', []):
            file_path, file_name = ntpath.split(filename)
            file_path = file_path.replace("/", "")
            log.debug("Using file path: {}".format(str(file_path)))
            log.debug("Using file name: {}".format(str(file_name)))
            if file_path != "":
                click.secho("{}/".format(file_path), fg='bright_red', nl=False)
            click.secho(file_name, fg='cyan')
        log.debug("Listing .tf file results completed!")
        click.echo()
    except Exception as e:
        log.error(
            "MagicDoc failed to parse the terraform project files output! Check your syntax, and retry. If you feel this is a bug please submit an issue on the project repository."
        )
        log.error("Exception: {}".format(str(e)))
        click.echo()
        sys.exit()
def get_module_name(module_path):
    """
    Return the module name of the module path
    """
    return ntpath.split(module_path)[1].split(".")[0]
Example #60
0
def path_leaf(path):
    # type: (str) -> str
    # http://stackoverflow.com/a/8384788/2482744
    head, tail = ntpath.split(path)
    return tail or ntpath.basename(head)