예제 #1
0
def classesToFolders(files_abs_path, partition_abs_path, pos_class_0_abs_path, pos_class_1_abs_path, neg_class_0_abs_path, neg_class_1_abs_path):

    '''seperate classes into diffeerent folders'''

    pos_partition_file = "positive-partitions-test.tab"
    neg_partition_file = "negative-partitions-test.tab"

    tab_list = os.listdir(partition_abs_path)
    for tab_file in os.listdir(partition_abs_path):
        files_list = os.listdir(files_abs_path)
        
        for folder_name in os.listdir(files_abs_path):
            folder_path = os.path.join(files_abs_path, folder_name)
            
            if tab_file == pos_partition_file and folder_name == 'positives-sto':
                read_pos_tab = open(os.path.join(partition_abs_path, tab_file), 'r')

                for line in read_pos_tab:
                    line_parts= line.split()
                    file_name = line_parts[0]
                    file_class_num = line_parts[1]

                    '''copy file to the pos_class_0 folder'''
                    if file_class_num == '0':
                        for file in os.listdir(folder_path):
                            file_path = os.path.join(folder_path, file)
                            file = ntpath.splitext( ntpath.basename(file_path))[0]
                            if fnmatch.fnmatch(file, file_name):
                                shutil.copy(file_path, pos_class_0_abs_path)
                            
                    '''copy file to the pos_class_1 folder'''
                    if file_class_num == '1':                      
                        for file in os.listdir(folder_path):
                            file_path = os.path.join(folder_path, file)
                            file = ntpath.splitext( ntpath.basename(file_path))[0]
                            if fnmatch.fnmatch(file, file_name):
                                shutil.copy(file_path, pos_class_1_abs_path)
                                
            if tab_file == neg_partition_file and folder_name == 'negatives-sto':
                read_neg_tab = open(os.path.join(partition_abs_path, tab_file), 'r')
                for line in read_neg_tab:
                    line_parts= line.split()
                    file_name = line_parts[0]
                    file_class_num = line_parts[1]
                    '''copy file to the pos_class_0 folder'''
                    if file_class_num == '0':
                        for file in os.listdir(folder_path):
                            file_path = os.path.join(folder_path, file)
                            file = ntpath.splitext( ntpath.basename(file_path))[0]
                            if fnmatch.fnmatch(file, file_name):
                                shutil.copy(file_path, neg_class_0_abs_path)

                    '''copy file to the pos_class_1 folder'''
                    if file_class_num == '1':
                        for file in os.listdir(folder_path):
                            file_path = os.path.join(folder_path, file)
                            file = ntpath.splitext( ntpath.basename(file_path))[0]
                            if fnmatch.fnmatch(file, file_name):
                                shutil.copy(file_path, neg_class_1_abs_path)
예제 #2
0
def ssis_proof(inputfn, outputfn=None):
    inputfolder = ntpath.dirname(inputfn)
    with open(inputfn, 'r') as f:
        contents = f.read()
        # Make sure all set catalog statments are properly commentted out for the 
        # SSIS loading framework
        contents = re.sub(r'(?<!--)(-+\s+)?(SET CATALOG )', '--SET CATALOG ', contents, flags=re.IGNORECASE)
        # Make sure all double quotes are escaped
        contents = re.sub(r'(?<!\\)"', r'\"',contents)
        # subsitute any quoted string
        contents = re.sub(r"([\"'])((?:\\\1|\1\1|(?P<quote>;)|(?!\1).)*)\1",semicolonrepl, contents)
        # Putting tokens in single quotes
        contents = re.sub(r"\((.*)\|\|''\)",r"'\1'",contents)
        # Putting tokens in single quotes
        contents = contents.replace(r"\\", r"\\\\")
        # Remove tailing and leading spaces
        contents = contents.strip()
        # Append start and end quote to contents
        contents = '"' + contents + '"'
        name = ntpath.splitext(ntpath.basename(inputfn))
        if outputfn is None:
            outputfn = name[0]+'_SSIS_READY'+name[1]
        if ntpath.dirname(outputfn) == '':
            outputfn = join(inputfolder, outputfn)
    with open(outputfn,'w') as o:
        o.write(contents)
예제 #3
0
 def move_files(self, file_map):
     fh = open(self.path, 'rb')
     zip_files = zipfile.ZipFile(fh)
     for name in zip_files.namelist():
         filename = ntpath.basename(name)
         extension = ntpath.splitext(filename)[-1]
         source_file = zip_files.open(name)
         if extension == '.txt':
             target_file = open(file_map['error.txt'], 'wb')
         elif filename != 'SnvGet Feature Description.xls' and extension != '.xls':
             target_file = open(file_map[filename], 'wbb')
         else:
             target_file = None
         if target_file:
             with source_file, target_file:
                 shutil.copyfileobj(source_file, target_file)
         if filename == 'SnvGet Feature Description.xls':
             with xlrd.open_workbook(source_file) as wb:
                 sheet_names = wb.sheet_names()
                 for name in sheet_names:
                     sh = wb.sheet_by_name(name)
                     name_shortened = name.replace(' ').strip() + '.csv'
                     with open(name_shortened, 'wb') as f:
                         c = csv.writer(f)
                         for r in range(sh.nrows):
                             c.writerow(sh.row_values(r))
     shutil.rmtree(self.tmp_dir)
     fh.close()
예제 #4
0
파일: dump.py 프로젝트: Dexesttp/pyffi
    def get_pixeldata_head_root(texture_filename):
        r"""Transform NiSourceTexture.file_name into something workable.

        >>> SpellExportPixelData.get_pixeldata_head_root("test.tga")
        ('', 'test')
        >>> SpellExportPixelData.get_pixeldata_head_root(r"textures\test.tga")
        ('textures', 'test')
        >>> SpellExportPixelData.get_pixeldata_head_root(
        ...     r"Z:\Bully\Temp\Export\Textures\Clothing\P_Pants1\P_Pants1_d.tga")
        ('z:/bully/temp/export/textures/clothing/p_pants1', 'p_pants1_d')
        """
        # note: have to use ntpath here so we can split correctly
        # nif convention always uses windows style paths
        head, tail = ntpath.split(texture_filename)
        root, ext = ntpath.splitext(tail)
        # for linux: make paths case insensitive by converting to lower case
        head = head.lower()
        root = root.lower()
        # XXX following is disabled because not all textures in Bully
        # XXX actually have this form; use "-a textures" for this game
        # make relative path for Bully SE
        #tmp1, tmp2, tmp3 = head.partition("\\bully\\temp\\export\\")
        #if tmp2:
        #    head = tmp3
        # for linux: convert backslash to forward slash
        head = head.replace("\\", "/")
        return (head, root) if root else ("", "image")
예제 #5
0
def main(args):
	# cov_report = pd.read_excel(args.infile) #for an earlier version of Excel, you may need to use the file extension of 'xls'
	cov_report = pd.read_csv(args.infile, sep="\t") #at this stage, the file is a csv file indeed
	# cov_report.head

	#add the 20xCov perc column
	cov_report['cov20x_perc'] = (cov_report['cov20x'] / (cov_report['contig_end'] - cov_report['contig_srt'] + 1))*100

	#add the cov100x_perc perc column
	# cov_report['cov100x_perc'] = (cov_report['cov100x'] / (cov_report['contig_end'] - cov_report['contig_srt'] + 1))*100
	
	#add the cov500x_perc perc column
	# cov_report['cov500x_perc'] = (cov_report['cov500x'] / (cov_report['contig_end'] - cov_report['contig_srt'] + 1))*100

	#internally extract the gene ID information in a new column
	# cov_report['gene_ID'] = cov_report['attributes'].str.split(";", expand=True)[0].str.split("=", expand=True)[1]
	#internally extract the pool information in a new column
	# cov_report['pool_nr'] = cov_report['attributes'].str.split(";", expand=True)[1].str.split("=", expand=True)[1]
	
	#now we need to generate the new file
	cov_report.to_csv(args.outfile,sep="\t", index=False)

	#now we need to do all the maths to get the wornings/lists of amplicons with a value below the cov_thr threshold AND overlapping or with a maximum distance of bp_dist
	c_thr = args.cov_thr
	bp_thr = args.bp_dist

	#1) first filter by cov_thr
	cov_rep_c_thr = cov_report[cov_report['cov20x_perc'] <= float(c_thr)]

	# 1-bis) write the file with only the c_thr passing data, sorted by position
	cov_thr_pass_name=ntpath.splitext(args.outfile)[0] + "_cov20x_lt" + str(c_thr) + ".xls"
	cov_rep_c_thr.sort_values(by=['contig_srt', 'contig_end']).to_csv(cov_thr_pass_name,sep="\t", index=False)
예제 #6
0
def combine(iMovieTitle, pMovieTitle):
    f = open(iMovieTitle, 'r')
    g = open(pMovieTitle, 'r')

    firstVid = ntpath.splitext(ntpath.basename(iMovieTitle))[0]
    secondVid = ntpath.splitext(ntpath.basename(pMovieTitle))[0]
    outTitle = firstVid + "-" + secondVid + ".avi"
    outFile = open(outTitle, 'w')

    iFrameList = f.read().split('00dc')
    pFrameList = g.read().split('00dc')

    iframe = '0001b0'.decode('hex')
    iframes = []
    pframes = []

    firstPassed = False
    for index, frame in enumerate(iFrameList):
        if firstPassed == False:
            outFile.write(frame + '00dc')
            if frame[5:8] == iframe:
                firstPassed = True
        else: 
            if frame[5:8] == iframe:
                iframes.append(frame)

    firstPassed = False
    for index, frame in enumerate(pFrameList):
        if firstPassed == False:
            if frame[5:8] == iframe:
                firstPassed = True
        else:
            if frame[5:8] != iframe:
                pframes.append(frame)

    for index in range(min(len(iframes), len(pframes))):
        outFile.write(iframes[index] + '00dc')

        if len(pframes) > len(iframes):
            spacing = len(pframes)/len(iframes)
            for i in range(spacing):
                outFile.write(pframes[index*spacing+i] + '00dc')
        else:
            for i in range(15):
                outFile.write(pframes[index] + '00dc')

    outFile.close()
예제 #7
0
파일: mspdb.py 프로젝트: queer1/rekall
    def render(self, renderer):
        # The filename is an executable
        if self.guid is None:
            self.pe = pe_vtypes.PE(filename=self.filename,
                                   session=self.session)
            data_directory = self.pe.nt_header.OptionalHeader.DataDirectory[
                "IMAGE_DIRECTORY_ENTRY_DEBUG"].VirtualAddress.dereference_as(
                "_IMAGE_DEBUG_DIRECTORY")

            # We only support the more recent RSDS format.
            debug = data_directory.AddressOfRawData.dereference_as(
                "CV_RSDS_HEADER")

            if debug.Signature != "RSDS":
                logging.error("PDB stream %s not supported.", debug.Signature)
                return

            self.pdb_filename = ntpath.basename(str(debug.Filename))
            self.guid = self.pe.RSDS.GUID_AGE

        elif self.filename is None:
            raise RuntimeError(
                "Filename must be provided when GUI is specified.")

        else:
            self.pdb_filename = self.filename
            self.guid = self.guid.upper()

        for url in self.SYM_URLS:
            try:
                basename = ntpath.splitext(self.pdb_filename)[0]
                url += "/%s/%s/%s.pd_" % (self.pdb_filename,
                                          self.guid, basename)

                renderer.format("Trying to fetch {0}\n", url)
                request = urllib2.Request(url, None, headers={
                        'User-Agent': self.USER_AGENT})

                data = urllib2.urlopen(request).read()
                renderer.format("Received {0} bytes\n", len(data))

                output_file = os.path.join(self.dump_dir, "%s.pd_" % basename)
                with open(output_file, "wb") as fd:
                    fd.write(data)

                try:
                    subprocess.check_call(["cabextract",
                                           os.path.basename(output_file)],
                                          cwd=self.dump_dir)
                except subprocess.CalledProcessError:
                    renderer.format(
                        "Failed to decompress output file {0}. "
                        "Ensure cabextract is installed.\n", output_file)

                break

            except IOError as e:
                logging.error(e)
                continue
예제 #8
0
파일: mspdb.py 프로젝트: atwong589/rekall
    def FetchPDBFile(self, pdb_filename, guid):
        # Ensure the pdb filename has the correct extension.
        if not pdb_filename.endswith(".pdb"):
            pdb_filename += ".pdb"

        for url in self.SYM_URLS:
            basename = ntpath.splitext(pdb_filename)[0]
            url += "/%s/%s/%s.pd_" % (pdb_filename, guid, basename)

            self.session.report_progress("Trying to fetch %s\n", url)
            request = urllib2.Request(url, None, headers={
                'User-Agent': self.USER_AGENT})

            url_handler = urllib2.urlopen(request)
            with utils.TempDirectory() as temp_dir:
                compressed_output_file = os.path.join(
                    temp_dir, "%s.pd_" % basename)

                output_file = os.path.join(
                    temp_dir, "%s.pdb" % basename)

                # Download the compressed file to a temp file.
                with open(compressed_output_file, "wb") as outfd:
                    while True:
                        data = url_handler.read(8192)
                        if not data:
                            break

                        outfd.write(data)
                        self.session.report_progress(
                            "%s: Downloaded %s bytes", basename, outfd.tell())

                # Now try to decompress it with system tools. This might fail.
                try:
                    if platform.system() == "Windows":
                        # This should already be installed on windows systems.
                        subprocess.check_call(
                            ["expand", compressed_output_file, output_file],
                            cwd=temp_dir)
                    else:
                        # In Linux we just hope the cabextract program was
                        # installed.
                        subprocess.check_call(
                            ["cabextract", compressed_output_file],
                            cwd=temp_dir,
                            stdout=sys.stderr)

                except (subprocess.CalledProcessError, OSError):
                    raise RuntimeError(
                        "Failed to decompress output file %s. "
                        "Ensure cabextract is installed.\n" % output_file)

                # We read the entire file into memory here - it should not be
                # larger than approximately 10mb.
                with open(output_file, "rb") as fd:
                    return fd.read(50 * 1024 * 1024)
예제 #9
0
파일: recode.py 프로젝트: darious/media
def FileNameCalc(VidFileIn, fileProcess, format):
# work out what to do with the files and calclate the names to use
	
	if fileProcess == 'backup':
	# then backup the file
		VidFileBackup = BackupLocation + ntpath.basename(VidFileIn)
		try:
			os.remove(VidFileBackup)
		except OSError:
			pass
			
		print bcolors.OKBLUE + "backing up file" + bcolors.ENDC
		copy_large_file(VidFileIn, VidFileBackup)
		os.remove(VidFileIn)
		print bcolors.OKBLUE + "backup complete" + bcolors.ENDC
		# work out the in and out filenames
		VidFileOutName, VidFileOutExt = ntpath.splitext(VidFileIn)
		VidFileOut = VidFileOutName + '.' + format
		if os.name in ("posix"):
		# if in cygwin then convert the filepath format
			VidFileIn = posix2win(VidFileBackup)
		else:
			VidFileIn = VidFileBackup
		
	if fileProcess == 'new':
		VidFileIn = VidFileIn
		VidFileOutName, VidFileOutExt = ntpath.splitext(VidFileIn)
		VidFileOut = VidFileOutName + '_new' + '.' + format
		
	if fileProcess == 'replace':
		VidFileStart = VidFileIn
		tmpRandom = base64.b64encode(os.urandom(12), '__')
		VidFileOutName, VidFileOutExt = ntpath.splitext(VidFileIn)
		VidFileOut = VidFileOutName + '.' + format
		VidFileIn = VidFileOutName + '_' + tmpRandom + VidFileOutExt
		# rename the file
		print VidFileOut
		print VidFileIn
		os.rename(VidFileStart, VidFileIn)
		print bcolors.OKBLUE + "File renamed from %s to %s" %(VidFileStart, VidFileIn) + bcolors.ENDC
	
	return (VidFileIn, VidFileOut)
예제 #10
0
def initializePoc(folders=[]):
    pocNumber = 0
    if not os.path.isdir(paths.POC_PATH):
        os.makedirs(paths.POC_PATH)

    folders.append(paths.POC_PATH)
    for folder in folders:
        files = os.listdir(folder)
        for file_i in files:
            if file_i.endswith(".py") and "__init__" not in file_i:
                PAYLOADS.update({ntpath.splitext(file_i)[0]: os.path.join(folder, file_i)})
예제 #11
0
파일: common.py 프로젝트: 0x24bin/Pocsuite
def changeToPyImportType(path):
    """
    >>> changeToPyImportType('/path/to/module.py')
    'path.to.module'
    >>> changeToPyImportType('path/to/module.py')
    'path.to.module'
    >>> changeToPyImportType('path/to')
    'path.to'
    """

    return ntpath.splitext(path)[0].strip("/").replace("/", ".")
예제 #12
0
def splitFile(value, justWindows):
    result = {}
    #	value = value.encode('utf-8')
    #	value = value.lower().replace('"', '&quot;')
    if justWindows == 1:
        (result['path'], ffile) = split(value)
        (result['filename'], extension) = splitext(ffile)
    else:
        (result['path'], ffile) = ntpath.split(value)
        (result['filename'], extension) = ntpath.splitext(ffile)
    result['extension'] = extension[1:len(extension)]
    return result
예제 #13
0
def _export_textures_and_materials(blender_objects, saved_mod):
    textures = get_textures_from_blender_objects(blender_objects)
    blender_materials = get_materials_from_blender_objects(blender_objects)
    textures_array = ((ctypes.c_char * 64) * len(textures))()
    materials_data_array = (MaterialData * len(blender_materials))()

    for i, texture in enumerate(textures):
        file_name = os.path.basename(texture.image.filepath)
        try:
            file_path = ntpath.join(texture.albam_imported_texture_folder, file_name)
        except AttributeError:
            raise ExportError('Texture {0} was not imported from an Arc file'.format(texture.name))
        try:
            file_path, _ = ntpath.splitext(file_path)
            textures_array[i] = (ctypes.c_char * 64)(*file_path.encode('ascii'))
        except UnicodeEncodeError:
            raise ExportError('Texture path {} is not in ascii'.format(file_path))
        if len(file_path) > 64:
            # TODO: what if relative path are used?
            raise ExportError('File path to texture {} is longer than 64 characters'
                              .format(file_path))

    for i, mat in enumerate(blender_materials):
        material_data = MaterialData()
        try:
            # TODO: Should use data from actual blender material
            saved_mat = saved_mod.materials_data_array[i]
        except IndexError:
            raise ExportError('Exporting models with more materials than the original not supported yet')
        material_data.unk_01 = saved_mat.unk_01
        material_data.unk_02 = saved_mat.unk_02
        material_data.unk_03 = saved_mat.unk_03
        material_data.unk_04 = saved_mat.unk_04
        material_data.unk_05 = saved_mat.unk_05
        material_data.unk_06 = saved_mat.unk_06
        material_data.unk_07 = saved_mat.unk_07
        for texture_slot in mat.texture_slots:
            if not texture_slot:
                continue
            texture = texture_slot.texture
            if not texture:
                # ?
                continue
            # texture_indices expects index-1 based
            try:
                texture_index = textures.index(texture) + 1
            except ValueError:
                # TODO: logging
                print('error in textures')
            material_data.texture_indices[texture.albam_imported_texture_type] = texture_index
        materials_data_array[i] = material_data

    return textures_array, materials_data_array
예제 #14
0
파일: helpers.py 프로젝트: rprabu/sdal0.5.2
def get_directory_filename_extension(filename):    
    d, f, e = "", "", ""
    if "\\" in filename:
        #this is windows style
        d = ntpath.split(filename)[0]
        (f, e) = ntpath.splitext(ntpath.basename(filename))
    else:
        #linux style
        d = os.path.split(filename)[0]
        (f, e) = os.path.splitext(os.path.basename(filename))
    if e:
        #convert .ext to ext
        e = e[1:]    
    return (d.strip(), f.strip(), e.strip())
예제 #15
0
 def file_to_graph(self, file_path=None):
     
     '''Read one file
        -------------
         read one STO file, extract the desired info, and then build the graph
         it takes the STO file path and returns it's graph'''
     
     head = ntpath.splitext(ntpath.basename(file_path))[0]
     zip_head_seqs = self._read_sto_file(file_path)
     #print zip_head_seqs
     info_type = self._identify_information_type(head, zip_head_seqs)
     sequence, structure, conservation_stringth, covariation, entropy_3 = self._filter_info(info_type)
     graph = self._build_graph(head, sequence, structure, conservation_stringth, covariation, entropy_3)
     #graph = self._remodel_graph(head, sequence, structure, conservation_stringth, covariation, entropy_3)
     return graph
예제 #16
0
파일: mspdb.py 프로젝트: google/rekall
    def FetchPDBFile(self):
        # Ensure the pdb filename has the correct extension.
        pdb_filename = self.plugin_args.pdb_filename
        guid = self.plugin_args.guid

        if not pdb_filename.endswith(".pdb"):
            pdb_filename += ".pdb"

        for url in self.SYM_URLS:
            basename = ntpath.splitext(pdb_filename)[0]
            try:
                return self.DownloadUncompressedPDBFile(
                    url, pdb_filename, guid, basename)
            except urllib.error.HTTPError:
                return self.DownloadCompressedPDBFile(
                    url, pdb_filename, guid, basename)
예제 #17
0
 def run(self, fRadius, fwhm_min, fwhm_max, getHead):
     
     alipy.pysex.run(image=self.filepath, imageref='', params=['X_IMAGE', 'Y_IMAGE', 'MAG_APER', 'MAG_AUTO', 'MU_MAX', 'FLUX_RADIUS', 'FWHM_IMAGE', 'ELLIPTICITY'], conf_file=None, conf_args={}, keepcat=True, rerun=True, catdir="/tmp/")
     fileName = ntpath.splitext(self.filepath)
     filepath = ntpath.basename(fileName[0])
     os.popen("cat /tmp/%s.pysexcat| grep -v '#'| awk '{if ($3 != '99.0000' && $4 != '99.0000' && $6 > %s && $7 > %s && $7 <%s) print $1, $2, $3, $4, $5, $6, $7, $8}'| sort -nk5| head -%s > /tmp/sexCoor" %(filepath, fRadius, fwhm_min, fwhm_max, getHead))
     
     ins = open("/tmp/sexCoor", "rb")
     array = []
     
     for line in ins:
         array.append(line.replace("\n","").split(" "))
     ins.close()
     
     os.remove("/tmp/%s.pysexcat" %(filepath))
     
     return array
예제 #18
0
def inputFromFile(filePath,err,label):
    # get current time
    timeStamp = datetime.datetime.now().strftime("%d.%m.%Y %H:%M:%S")
    # Check if file was chosen
    if(filePath == None):
        tmp = timeStamp
        tmp += ". Error in function 'inputFromFile'. Empty file path."
        # write error message to error file
        err.write(tmp)
    # get file name and file extension. 
    # I use here module 'ntpath' to get file name, 
    # because it is more system independent than module 'os'.
    fileName, fileExtension = ntpath.splitext(filePath)
    if(fileName == ""):
        tmp = timeStamp
        tmp += ". Error in function 'inputFromFile'. No file was chosen."
        # write error message to error file
        err.write(tmp)
    # check if 'fileName' has the correct extension.
    if(fileExtension == ""):
        tmp = timeStamp + ". "
        tmp += "Error in function 'inputFromFile'. File extension is missing."
        # write error message to error file
        err.write(tmp)
    # Allowed are: '.txt', '.fa', '.fasta'
    fileExtensions = ['.txt', '.fa', '.fasta']
    if(fileExtension not in fileExtensions):
        tmp = timeStamp
        tmp += ". Error in function 'inputFromFile'. Wrong file selected. "
        tmp += "Allowed file extensions are: '.txt', '.fa', '.fasta'."
        # write error message to error file
        err.write(tmp)
    # check if input file is empty
    if(ntpath.getsize(filePath) == 0):
        tmp = timeStamp
        tmp += ". Error in function 'inputFromFile'. Input file is empty."
        # write error message to error file
        err.write(tmp)
    # try to open the file
    try:
        handle = open(filePath, "rU")
    except ValueError, e:
        tmp = timeStamp + ". "
        tmp += "Error in function 'inputFromFile'. " + str(e)
        # write error message to error file
        err.write(tmp)
예제 #19
0
def main( argv ):

    # The log file name will be based on the target executable file name.
    logfile = basename( argv[ 0 ] )
    logfile = splitext( logfile )[ 0 ] + ".log"

    # Instance a global Logger object.
    global logger
    logger = Logger( logfile )

    # Launch the debugger.
    try:
        simple_debugger( argv )

    # On error log the exception and quit.
    except:
        logger.log_exc()
예제 #20
0
def parse_Compound_File(file_to_parse):


    file_object = open(file_to_parse, "rb")
    olecf_file = pyolecf.file()
    olecf_file.open_file_object(file_object)
    SQLitedb.CreateTempTable(table_name + '_temp', table_columns)

    root_item = olecf_file.get_root_item()

    #print ("Root Item Name ==> " + root_item.get_name())
    #print ("Number of Sub_Items ==> " + str(root_item.get_number_of_sub_items()))
    Base_Name = ntpath.basename(file_to_parse)
    (File_Name, Extension) = ntpath.splitext(Base_Name)

    if (App_Id.CheckAppId(File_Name)):
        App_Id_Desc = App_Id.SelectAppId(File_Name)[0]
        #print ("App id => " + App_Id.SelectAppId(File_Name)[0])
    else:
        App_Id_Desc = File_Name
        #print ("File Name => " + File_Name)

    for i in range (0, root_item.get_number_of_sub_items()):
        jl_record = []
        jl_record.append(File_Name)
        jl_record.append(App_Id_Desc)
        new_item = root_item.get_sub_item(i)
        jl_record.append(new_item.get_name())
        #print ("   Sub Item Name ==> " + new_item.get_name())
        #print ("   Sub Item Sub Items ==> " + str(new_item.get_number_of_sub_items()))
        if new_item.get_name() == u'DestList':
            continue
        new_link_item = pylnk.file()
        new_link_item.open_file_object(new_item)
        jl_record = Create_Bind_Values(jl_record, new_link_item)
        try:
            SQLitedb.InsertBindValues(table_name + '_temp', sql_ins_columns, sql_bind, jl_record)
        except:
            print ("Error in Link Item ==> " + str(jl_record[1]) + " <==> " + str(jl_record[2]))
    if (SQLitedb.TableExists(table_name)):
        SQLitedb.AppendTempToPermanentTable(table_name)
    else:
        SQLitedb.CreatePermanentTable(table_name)
    SQLitedb.DropTable(table_name + '_temp')
예제 #21
0
파일: views.py 프로젝트: nikolas/dmt
    def get(self, request):
        AWS_ACCESS_KEY = settings.AWS_ACCESS_KEY
        AWS_SECRET_KEY = settings.AWS_SECRET_KEY
        S3_BUCKET = settings.AWS_S3_UPLOAD_BUCKET

        object_name = safe_basename(
            request.GET.get('s3_object_name', 'unknown.obj'))
        mime_type = request.GET.get('s3_object_type')
        (basename, extension) = ntpath.splitext(object_name)
        # force the extension for some known cases
        if 'jpeg' in mime_type:
            extension = ".jpg"
        elif 'png' in mime_type:
            extension = ".png"
        elif 'gif' in mime_type:
            extension = ".gif"

        now = datetime.now()
        uid = str(uuid.uuid4())
        object_name = "%04d/%02d/%02d/%02d/%s-%s%s" % (
            now.year, now.month, now.day,
            now.hour, basename, uid, extension)

        expires = int(time.time()+10)
        amz_headers = "x-amz-acl:public-read"

        put_request = "PUT\n\n%s\n%d\n%s\n/%s/%s" % (
            mime_type, expires, amz_headers, S3_BUCKET, object_name)

        signature = base64.encodestring(
            hmac.new(AWS_SECRET_KEY, put_request, sha1).digest())
        signature = urllib.quote_plus(signature.strip())

        url = 'https://s3.amazonaws.com/%s/%s' % (S3_BUCKET, object_name)
        signed_request = '%s?AWSAccessKeyId=%s&Expires=%d&Signature=%s' % (
            url, AWS_ACCESS_KEY, expires, signature)

        return HttpResponse(
            json.dumps({
                'signed_request': signed_request,
                'url': url
            }), content_type="application/json")
예제 #22
0
def make_fname(title, relpath):
        """creates a new filename for the movie from its title

        Uses the movie title saved in database along with the original
        file extension to create a new and correct filename

        Args:
            title(str): title of the movie
            relpath(str): path stored in database

        Returns:
            str: new filename.ext

        Raises:
            None
        """
        # TODO: validate relpath contains filename.ext
        extension = splitext(relpath)[1]
        # TODO: validate that this is a valid/legal filename
        return title + extension
예제 #23
0
	def on_load (self, view):
		if ReaSyntax.detectJs and ntpath.splitext(view.file_name())[1] == "":

			jsFile = False
			descPos  = view.find("^\\s*desc:", 0)

			# Found "desc:" line, check if it's the first line in the file (omitting comments and empty lines)
			if not descPos.empty():
				if descPos.begin() == 0:
					jsFile = True
				else:
					lastComment = view.find("(^\\s*/\\*([^*]|[\r\n]|(\\*+([^*/]|[\r\n])))*\\*+/)|(^\\s*//.*)|(^\\s*$)", 0)
					if not lastComment.empty() and lastComment.begin() == 0:

						while lastComment.end() < descPos.begin():

							if (lastComment.end() + 1 == descPos.begin()):
								jsFile = True
								break

							comment = view.find("(/^\\s*\\*([^*]|[\r\n]|(\\*+([^*/]|[\r\n])))*\\*+/)|(^\\s*//.*)|(^\\s*$)", lastComment.end())
							if lastComment.end() + 1 != comment.begin(): # There's something between comments, abort
								break
							if (comment.empty()):                        # Reached last comment before "desc:"
								break
							lastComment = comment

			# No match yet, try to find at least 2 code sections
			if not jsFile:
				codeSections = list()
				view.find_all("^@(init|slider|block|sample|serialize|gfx)", 0, "\\1", codeSections)

				# Make sure there isn't more than one code section per type
				if len(codeSections) == len(set(codeSections)) and len(codeSections) >= 2:
					view.set_syntax_file(JS_SYNTAX)

			if jsFile:
				view.set_syntax_file(JS_SYNTAX)
예제 #24
0
    def __execute(self, param):

        if not self.client:
            return errno.EINVAL

        rpcData = {}
        for k,v in param.items():
            rpcData[k] = v
        rpcData['cmd'] = rpcData.get('method')

        if not rpcData.has_key('method'):
            logging.error('Invalid RPC data, did you forgot to call addParameters ?')
            return errno.EINVAL

        if 'syncdb' in sys.argv:
            import syncscripts
            import __main__
            base = ntpath.basename(__main__.__file__)
            filename = ntpath.splitext(base)[0]
            ret = syncscripts.sync_script(filename, rpcData)
            self.rpcResult['result'] = 'SUCCESS'
            return

        ipctm = rpcData.get('timeout', default_cmd_timeout)
        if ipctm > socket_timeout:
            self.client.settimeout(ipctm);
        elif ipctm == 0.0: # NONBLOCK
            self.client.settimeout(0.0)
            logging.info("execute NONBLOCK")

        ret = self.nestor.sendCommand(rpcData)
        if ret:
            logging.error("sendCommand error {0}".format(ret))
            return ret

        return self.__requestResp()
예제 #25
0
파일: znb.py 프로젝트: luksan/RSSscpi
    def save_screenshot(self, filename, diagram=None):
        """
        Take a screenshot containing only this diagram. The file type is inferred from the filename extension,
        valid options are BMP, EMF, EWMF, JPG, PDF, PNG, SVG, WMF.

        :param str filename: The filename under which the screenshot will be saved on the instrument.
        :param Diagram diagram: The diagram to be captured. The whole screen will be captured if None.
        :return: a File object representing the captured screenshot
        :rtype: File
        """
        _, filetype = ntpath.splitext(filename)
        filetype = filetype[1:].upper()
        if filetype not in self.HCOPy.DEVice.LANGuage.args:
            raise ValueError("Invalid file extension for screenshot: " + filetype)
        self.MMEMory.NAME().w(filename)  # Define the filename
        self.HCOPy.DESTination().w("MMEM")  # Print to mass storage
        self.HCOPy.DEVice.LANGuage().w(filetype)  # Define the file type
        if diagram is not None:
            diagram.select_diagram()
            self.HCOPy.PAGE.WINDow().w("ACTive")  # Print only the active diagram
        else:
            self.HCOPy.PAGE.WINDow().w("HARDcopy")
        self.HCOPy.IMMediate().w()  # Perform the screen capture
        return self.filesystem.file(filename)
예제 #26
0
파일: proc.py 프로젝트: Logan-lu/nrs
    def out(self):
        """ Output instruction in textform. """
        buf = idaapi.init_output_buffer(1024)

        if self.cmd.auxpref & self.FLo_PluginCall:
            lib,_ = self.get_string(self.cmd[0].addr)
            fn,_ = self.get_string(self.cmd[1].addr)
            lib = ntpath.splitext(ntpath.basename(lib))[0]
            out_line('{}::{}'.format(lib, fn), COLOR_INSN)
            OutChar(' ')
            out_one_operand(2)
        else:
            OutMnem(12)
            for i, op in ((i, self.cmd[i]) for i in range(6)):
                if op.type == o_void:
                    break
                if i > 0:
                    out_symbol(',')
                    OutChar(' ')
                out_one_operand(i)

        term_output_buffer()
        cvar.gl_comm = 1
        MakeLine(buf)
예제 #27
0
def main():
    from optparse import OptionParser
    parser = OptionParser()
    parser.add_option(
            "-o", "", dest="objsuffix",
            action="store", default=".o",
            metavar="SUFFIX",
            help="Specify object file suffix [default: %default]")
    parser.add_option(
            "-S", "", dest="sysincludes",
            action="append",
            metavar="INCL_DIR",
            help="Specify system include directory to omit from output")
    options, args_list = parser.parse_args()

    objsuffix = options.objsuffix
    global SYSTEM_INCLUDES
    SYSTEM_INCLUDES = [makePath([x]) for x in options.sysincludes]

    out = sys.stdout

    found_double_hyphen = False
    dirs = []
    includes = []
    cmd = ['cl', '-nologo', '-TP', '-showIncludes', '-Zs', '-w']
    for i,arg in enumerate(args_list):
	# simply note whether we've see '--' yet
	if arg == '--':
	    found_double_hyphen = True
	    continue
	# always add the arg to cmd to be passed to cl
	cmd.append(arg)
	if found_double_hyphen:
            dirs.append(ospath.dirname(arg))
	    continue
	# only do -I parsing before we've found '--'
	if arg == '-I':
	    includes.append(makePath([args_list[i+1]]))
    #print "cmd:", cmd
    #print "includes:", includes
    #print "dirs:", dirs

    i = 0
    target = None
    deps = []
    files = []
    output = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
    for line in output.splitlines():
        words = line.split()
	if len(words) < 1:
	    continue
	if words[0] != 'Note:':
	    # flush rule
	    if target and len(deps) > 0:
	        files.extend(writeRule(out, target, deps))
		deps = []
	    # create target and make the source file as the first dep
	    src = makePath(words, base=dirs[i])
	    target = ospath.splitext(ospath.basename(src))[0] + objsuffix
	    deps = [src]
	    continue
	# record dependency
	deps.append(makePath(words[3:]))
    # flush rule
    if target and len(deps) > 0:
	files.extend(writeRule(out, target, deps))
    # output list of deps as targets to handle when they get deleted
    files = list(set(files)) # remove duplicates
    for path in files:
	out.write(path.replace(' ', '\\\\ ') + ':\n')
예제 #28
0
파일: common.py 프로젝트: 0x24bin/Pocsuite
def choosePocType(filepath):
    """
    @function choose '.py' or '.json' extension to load the poc file
    """
    return ntpath.splitext(filepath)[1][1:]
예제 #29
0
def get_files(filename):
    """Converts the data to Shapefiles or Geotiffs and returns
       a dictionary with all the required files
    """
    files = {}

    # Verify if the filename is in ascii format.
    try:
        filename.decode('ascii')
    except UnicodeEncodeError:
        msg = "Please use only characters from the english alphabet for the filename. '%s' is not yet supported." \
            % os.path.basename(filename).encode('UTF-8')
        raise GeoNodeException(msg)

    # Let's unzip the filname in case it is a ZIP file
    import tempfile
    import zipfile
    from geonode.utils import unzip_file
    if zipfile.is_zipfile(filename):
        tempdir = tempfile.mkdtemp()
        filename = unzip_file(filename, '.shp', tempdir=tempdir)
        if not filename:
            # We need to iterate files as filename could be the zipfile
            import ntpath
            from geonode.upload.utils import _SUPPORTED_EXT
            file_basename, file_ext = ntpath.splitext(filename)
            for item in os.listdir(tempdir):
                item_basename, item_ext = ntpath.splitext(item)
                if ntpath.basename(item_basename) == ntpath.basename(
                        file_basename) and (item_ext.lower()
                                            in _SUPPORTED_EXT):
                    filename = os.path.join(tempdir, item)
                    break

    # Make sure the file exists.
    if not os.path.exists(filename):
        msg = ('Could not open %s. Make sure you are using a '
               'valid file' % filename)
        logger.warn(msg)
        raise GeoNodeException(msg)

    base_name, extension = os.path.splitext(filename)
    # Replace special characters in filenames - []{}()
    glob_name = re.sub(r'([\[\]\(\)\{\}])', r'[\g<1>]', base_name)

    if extension.lower() == '.shp':
        required_extensions = dict(shp='.[sS][hH][pP]',
                                   dbf='.[dD][bB][fF]',
                                   shx='.[sS][hH][xX]')
        for ext, pattern in required_extensions.iteritems():
            matches = glob.glob(glob_name + pattern)
            if len(matches) == 0:
                msg = ('Expected helper file %s does not exist; a Shapefile '
                       'requires helper files with the following extensions: '
                       '%s') % (base_name + "." + ext,
                                required_extensions.keys())
                raise GeoNodeException(msg)
            elif len(matches) > 1:
                msg = ('Multiple helper files for %s exist; they need to be '
                       'distinct by spelling and not just case.') % filename
                raise GeoNodeException(msg)
            else:
                files[ext] = matches[0]

        matches = glob.glob(glob_name + ".[pP][rR][jJ]")
        if len(matches) == 1:
            files['prj'] = matches[0]
        elif len(matches) > 1:
            msg = ('Multiple helper files for %s exist; they need to be '
                   'distinct by spelling and not just case.') % filename
            raise GeoNodeException(msg)

    elif extension.lower() in cov_exts:
        files[extension.lower().replace('.', '')] = filename

    # Only for GeoServer
    if check_ogc_backend(geoserver.BACKEND_PACKAGE):
        matches = glob.glob(glob_name + ".[sS][lL][dD]")
        if len(matches) == 1:
            files['sld'] = matches[0]
        elif len(matches) > 1:
            msg = ('Multiple style files (sld) for %s exist; they need to be '
                   'distinct by spelling and not just case.') % filename
            raise GeoNodeException(msg)

    matches = glob.glob(glob_name + ".[xX][mM][lL]")

    # shapefile XML metadata is sometimes named base_name.shp.xml
    # try looking for filename.xml if base_name.xml does not exist
    if len(matches) == 0:
        matches = glob.glob(filename + ".[xX][mM][lL]")

    if len(matches) == 1:
        files['xml'] = matches[0]
    elif len(matches) > 1:
        msg = ('Multiple XML files for %s exist; they need to be '
               'distinct by spelling and not just case.') % filename
        raise GeoNodeException(msg)

    # Only for QGIS Server
    if check_ogc_backend(qgis_server.BACKEND_PACKAGE):
        matches = glob.glob(glob_name + ".[qQ][mM][lL]")
        logger.debug('Checking QML file')
        logger.debug('Number of matches QML file : %s' % len(matches))
        logger.debug('glob name: %s' % glob_name)
        if len(matches) == 1:
            files['qml'] = matches[0]
        elif len(matches) > 1:
            msg = ('Multiple style files (qml) for %s exist; they need to be '
                   'distinct by spelling and not just case.') % filename
            raise GeoNodeException(msg)

        # Provides json files for additional extra data
        matches = glob.glob(glob_name + ".[jJ][sS][oO][nN]")
        logger.debug('Checking JSON File')
        logger.debug('Number of matches JSON file : %s' % len(matches))
        logger.debug('glob name: %s' % glob)

        if len(matches) == 1:
            files['json'] = matches[0]
        elif len(matches) > 1:
            msg = ('Multiple json files (json) for %s exist; they need to be '
                   'distinct by spelling and not just case.') % filename
            raise GeoNodeException(msg)

    return files
예제 #30
0
파일: common.py 프로젝트: 0x24bin/Pocsuite
def filepathParser(path):
    return ntpath.split(ntpath.splitext(path)[0])
예제 #31
0
 def _checkSearch(res, name, filetype):
     assert isinstance(filetype, int)
     assert isinstance(name, basestring)
     self.assertEquals(1, len(res), "I searched for filetype %r and got %r" % (filetype, res,))
     self.assertEquals(name, ntpath.splitext(ntpath.basename(res[0][0]))[0])
예제 #32
0
def get_files(filename):
    """Converts the data to Shapefiles or Geotiffs and returns
       a dictionary with all the required files
    """
    files = {}

    # Verify if the filename is in ascii format.
    try:
        filename.encode('ascii')
    except UnicodeEncodeError:
        msg = f"Please use only characters from the english alphabet for the filename. '{os.path.basename(filename).encode('UTF-8', 'strict')}' is not yet supported."
        raise GeoNodeException(msg)

    # Let's unzip the filname in case it is a ZIP file
    import tempfile
    from geonode.utils import unzip_file
    if is_zipfile(filename):
        tempdir = tempfile.mkdtemp(dir=settings.STATIC_ROOT)
        _filename = unzip_file(filename, '.shp', tempdir=tempdir)
        if not _filename:
            # We need to iterate files as filename could be the zipfile
            import ntpath
            from geonode.upload.utils import _SUPPORTED_EXT
            file_basename, file_ext = ntpath.splitext(filename)
            for item in os.listdir(tempdir):
                item_basename, item_ext = ntpath.splitext(item)
                if ntpath.basename(item_basename) == ntpath.basename(
                        file_basename) and (item_ext.lower()
                                            in _SUPPORTED_EXT):
                    filename = os.path.join(tempdir, item)
                    break
        else:
            filename = _filename

    # Make sure the file exists.
    if not os.path.exists(filename):
        msg = f'Could not open {filename}. Make sure you are using a valid file'
        logger.debug(msg)
        raise GeoNodeException(msg)

    base_name, extension = os.path.splitext(filename)
    # Replace special characters in filenames - []{}()
    glob_name = re.sub(r'([\[\]\(\)\{\}])', r'[\g<1>]', base_name)

    if extension.lower() == '.shp':
        required_extensions = dict(shp='.[sS][hH][pP]',
                                   dbf='.[dD][bB][fF]',
                                   shx='.[sS][hH][xX]')
        for ext, pattern in required_extensions.items():
            matches = glob.glob(glob_name + pattern)
            if len(matches) == 0:
                msg = (
                    f'Expected helper file {base_name}.{ext} does not exist; a Shapefile '
                    'requires helper files with the following extensions: '
                    f'{list(required_extensions.keys())}')
                raise GeoNodeException(msg)
            elif len(matches) > 1:
                msg = ('Multiple helper files for %s exist; they need to be '
                       'distinct by spelling and not just case.') % filename
                raise GeoNodeException(msg)
            else:
                files[ext] = matches[0]

        matches = glob.glob(f"{glob_name}.[pP][rR][jJ]")
        if len(matches) == 1:
            files['prj'] = matches[0]
        elif len(matches) > 1:
            msg = ('Multiple helper files for %s exist; they need to be '
                   'distinct by spelling and not just case.') % filename
            raise GeoNodeException(msg)

    elif extension.lower() in cov_exts:
        files[extension.lower().replace('.', '')] = filename

    # Only for GeoServer
    if check_ogc_backend(geoserver.BACKEND_PACKAGE):
        matches = glob.glob(f"{os.path.dirname(glob_name)}.[sS][lL][dD]")
        if len(matches) == 1:
            files['sld'] = matches[0]
        else:
            matches = glob.glob(f"{glob_name}.[sS][lL][dD]")
            if len(matches) == 1:
                files['sld'] = matches[0]
            elif len(matches) > 1:
                msg = (
                    'Multiple style files (sld) for %s exist; they need to be '
                    'distinct by spelling and not just case.') % filename
                raise GeoNodeException(msg)

    matches = glob.glob(f"{glob_name}.[xX][mM][lL]")

    # shapefile XML metadata is sometimes named base_name.shp.xml
    # try looking for filename.xml if base_name.xml does not exist
    if len(matches) == 0:
        matches = glob.glob(f"{filename}.[xX][mM][lL]")

    if len(matches) == 1:
        files['xml'] = matches[0]
    elif len(matches) > 1:
        msg = ('Multiple XML files for %s exist; they need to be '
               'distinct by spelling and not just case.') % filename
        raise GeoNodeException(msg)

    return files