예제 #1
0
def handwritingClassTest():
    hwLabels=[]
    trainingFileList=listdir('trainingDigits')
    m=len(trainingFileList)
    trainingMat=np.zeros((m,1024))
    
    for i in range(m):
        fileNameStr=trainingFileList[i]
        fileStr=fileNameStr.split('.')[0]
        classNumStr=int(fileStr.split('_')[0])
        hwLabels.append(classNumStr)
        trainingMat[i,:]=img2vector('trainingDigits/%s' % fileNameStr)
    
    testFileList=listdir('testDigits')
    errorCount=0.0
    mTest=len(testFileList)
    
    for i in range(mTest):
        fileNameStr=testFileList[i]
        fileStr=fileNameStr.split('.')[0]
        classNumStr=int(fileStr.split('_')[0])
        vectorUnderTest=img2vector('testDigits/%s' % fileNameStr)
        classifierResult=classify0(vectorUnderTest,
                                   trainingMat,
                                   hwLabels,3)
#         print '分类器返回的结果是: %d ,真实结果是  %d' % (classifierResult,classNumStr) 
        if classifierResult!=classNumStr:
            errorCount+=1.0
    print '发生错误的总数是  %d'% errorCount
    print '总错误率为 %f'% (errorCount/float(mTest)) 
예제 #2
0
파일: video.py 프로젝트: hbradlow/ClothSim
def createVideo(kb):
        list = dircache.listdir('.')
        i = 0
        while str('tmp'+str(i)) in list:
            i+=1
        dir = 'tmp' + str(i)
        os.system('mkdir ' + dir)
        print list

        p = Popen(['./light','-outputDir',dir,'-kb',str(kb)])
        time.sleep(120)
        p.terminate()

        list = dircache.listdir(dir)
        list =  sorted(list, key=lambda file: int(file[10:(file.index('.'))]))

        convert_string = "time convert -verbose -quality 100 -delay 0 "
        i = 0
        for file in list:
            i+=1
            if i%2 == 0:
                convert_string += dir + "/" + file + " "
        convert_string += " " + dir + "/outvideo.mpeg"

        os.system(convert_string)
예제 #3
0
    def get_file_list(self, model, iter, dir):
        """ Get the file list from a given directory """

        ls = dircache.listdir(dir)
	#ls = self.get_music_library()
        ls.sort(key=str.lower)
        for i in ls:
            path = ospath.join(dir,i)
            if ospath.isdir(path) or not self.show_only_dirs :
                if i[0] != '.' or (self.show_hidden and i[0] == '.'):
                    newiter = model.append(iter)
                    if ospath.isdir(path): icon = self.get_folder_closed_icon()
                    else: icon = self.get_file_icon()
                    model.set_value(newiter, 0, icon)
                    model.set_value(newiter, 1, i)
                    model.set_value(newiter, 2, path)
                    if ospath.isdir(path):
                    	try: subdir = dircache.listdir(path)
                    	except: subdir = []
                        if subdir != []:
                            for i in subdir:
                                if ospath.isdir(ospath.join(path,i)) or not self.show_only_dirs:
                                    if i[0] != '.' or (self.show_hidden and i[0] == '.'):					
                                        self.add_empty_child(model, newiter)
                                        break
    def GetIds(self, kind, directory = '', scan_subdirs = 1):

        dirname = os.path.join(self.srcdir, directory)
        if not os.path.isdir(dirname):
            raise NoSuchSuiteError, directory

        if kind == Database.TEST:
            ids = [self.JoinLabels(directory, f)
                   for f in dircache.listdir(dirname)
                   if (os.path.isfile(os.path.join(dirname, f)) and
                       os.path.splitext(f)[1] in self.test_extensions)]

        elif kind == Database.RESOURCE:
            ids = []
            
        else: # SUITE
            ids = [self.JoinLabels(directory, d)
                   for d in self.GetSubdirectories(directory)
                   if d not in self.excluded_subdirs]

        if scan_subdirs:
            for subdir in dircache.listdir(dirname):
                if (subdir not in self.excluded_subdirs
                    and os.path.isdir(os.path.join(dirname, subdir))):
                    dir = self.JoinLabels(directory, subdir)
                    ids.extend(self.GetIds(kind, dir, True))

        return ids
예제 #5
0
def dircmp(a, b):  # Compare whether two directories are the same
    # To make this as fast as possible, it uses the statcache
    print "  dircmp", a, b
    a_list = dircache.listdir(a)
    b_list = dircache.listdir(b)
    for x in a_list:
        if skipthis(x):
            pass
        elif x not in b_list:
            return 0
        else:
            ax = os.path.join(a, x)
            bx = os.path.join(b, x)
            if statcache.isdir(ax) and statcache.isdir(bx):
                if not dircmp(ax, bx):
                    return 0
            else:
                try:
                    if not cmpcache.cmp(ax, bx):
                        return 0
                except (RuntimeError, os.error):
                    return 0
    for x in b_list:
        if skipthis(x):
            pass
        elif x not in a_list:
            return 0
    return 1
예제 #6
0
    def GetIds(self, kind, directory = "", scan_subdirs = 1):

        components = self.GetLabelComponents(directory)
        path = os.path.join(self.GetRoot(), *components)

        if kind == database.Database.TEST:

            if not components:
                return []

            ids = [self.JoinLabels(directory, t)
                   for t in dircache.listdir(path)
                   if self.is_a_test[components[0]](path, t)]

        elif kind == Database.RESOURCE:
            return [] # no resources yet

        else: # SUITE

            if directory:
                ids = [self.JoinLabels(directory, d)
                       for d in dircache.listdir(path)
                       if os.path.isdir(os.path.join(path, d))]
            else:
                ids = list(self.is_a_test.keys())

        if scan_subdirs:
            for d in dircache.listdir(path):
                if (os.path.isdir(d)):
                    ids.extend(self.GetIds(kind,
                                           self.JoinLabels(directory, d),
                                           True))

        return ids
예제 #7
0
파일: database.py 프로젝트: bambang/vsipl
    def GetIds(self, kind, directory = '', scan_subdirs = 1):
        # The CompilationTestDatabase maps all source files to tests.
        # Here, we need to filter out main.cpp, which gets linked to everything else.

        dirname = os.path.join(self.srcdir, directory)
        if not os.path.isdir(dirname):
            raise NoSuchSuiteError, directory

        if kind in (Database.TEST, Database.RESOURCE):
            ids = [self.JoinLabels(directory, f)
                   for f in dircache.listdir(dirname)
                   if (os.path.isfile(os.path.join(dirname, f)) and
                       os.path.splitext(f)[1] in self.test_extensions and
                       f is not 'main.cpp')]
            # Ids with extensions stripped off are tests.
            if kind == Database.TEST:
                ids = [os.path.splitext(i)[0] for i in ids]
            
        else: # SUITE
            ids = [self.JoinLabels(directory, d)
                   for d in self.GetSubdirectories(directory)
                   if d not in self.excluded_subdirs]

        if scan_subdirs:
            for subdir in dircache.listdir(dirname):
                if (subdir not in self.excluded_subdirs
                    and os.path.isdir(os.path.join(dirname, subdir))):
                    dir = self.JoinLabels(directory, subdir)
                    ids.extend(self.GetIds(kind, dir, True))

        return ids
예제 #8
0
    def test_listdir(self):
        ## SUCCESSFUL CASES
        entries = dircache.listdir(self.tempdir)
        self.assertEquals(entries, [])

        # Check that cache is actually caching, not just passing through.
        self.assert_(dircache.listdir(self.tempdir) is entries)

        # Directories aren't "files" on Windows, and directory mtime has
        # nothing to do with when files under a directory get created.
        # That is, this test can't possibly work under Windows -- dircache
        # is only good for capturing a one-shot snapshot there.

        if sys.platform[:3] not in ('win', 'os2', 'ms-'):
            # Sadly, dircache has the same granularity as stat.mtime, and so
            # can't notice any changes that occured within 1 sec of the last
            # time it examined a directory.
            time.sleep(1)
            self.writeTemp("test1")
            entries = dircache.listdir(self.tempdir)
            self.assertEquals(entries, ['test1'])
            self.assert_(dircache.listdir(self.tempdir) is entries)

        ## UNSUCCESSFUL CASES
        self.assertRaises(OSError, dircache.listdir, self.tempdir+"_nonexistent")
예제 #9
0
def handWritingClassTest():
    hwLabels=[]
    training_file_list=listdir('%s\\trainingDigits'%dir)
    m=len(training_file_list)
    training_mat=zeros((m,1024))
    for i in range(m):
        file_name_string=training_file_list[i]
        file_str=file_name_string.split('.')[0]
        class_num_str=int(file_str.split('_')[0])
        hwLabels.append(class_num_str)
        training_mat[i,:]=image2vector('%s\\trainingDigits\\%s'%(dir,file_name_string))
    test_file_list=listdir('%s\\testDigits'%dir)
    error_count=0.0
    mTest=len(test_file_list)
    for i in range(mTest):
        file_name_string=test_file_list[i]
        file_str=file_name_string.split('.')[0]
        class_num_str=int(file_str.split('_')[0])
        vector_under_test=image2vector('%s\\testDigits\\%s'%(dir,file_name_string))
        classifier_result=classify0(vector_under_test, training_mat, hwLabels, 3)
        print "the classifier came back with: %d, the real answer is: %d"% (classifier_result, class_num_str)
        if(classifier_result!=class_num_str):
            error_count+=1.0
    print "\nthe total number of errors is: %d" %error_count
    print "\nthe total error rate is: %f" % (error_count/float(mTest))
def handwritingClassTest():
    """
    knn on digits
    """
    hwLabels = []
    trainingFileList = listdir('trainingDigits')
    m = len(trainingFileList)
    trainMat = np.zeros((m, 1024))
    for i in range(m):
        fileNameStr = trainingFileList[i]
        fileStr = fileNameStr.split('.')[0]
        classNumStr = int(fileStr.split('_')[0])
        hwLabels.append(classNumStr)
        trainMat[i, :] = img2vector('trainingDigits/%s' % fileNameStr)
    testFileList = listdir('testDigits')
    errorCount = 0.0
    mTest = len(testFileList)
    for i in range(mTest):
        fileNameStr = testFileList[i]
        fileStr = fileNameStr.split('.')[0]
        classNumStr = int(fileStr.split('_')[0])
        vectorUnderTest = img2vector('testDigits/%s' % fileNameStr)
        classifierResult = classify0(vectorUnderTest, trainMat, hwLabels, 3)
        print "the classifier came back with: %d, the real answer is: %d"\
            % (classifierResult, classNumStr)
        if classifierResult != classNumStr:
            errorCount += 1.0
    print "the total error rate is: %f" % (errorCount / float(mTest))
    print '#' * 30
예제 #11
0
    def all(cls, precheck=False):
        """ Return a generator of all of this type """
        logger.debug("Attempting to find all %r...", cls.__name__)
        type_dir = os.path.join(cf.LOCAL_REPOSITORY_PATH, cls.required_leader.lower())
        kim_codes = (
            subpath for subpath in dircache.listdir(type_dir) if (
                os.path.isdir(os.path.join(type_dir, subpath)) and
                database.iskimcode(subpath)
            )
        )

        # If this is being used for a precheck, also search the 'precheck' local repository
        if precheck:
            type_dir_precheck = os.path.join(os.path.join(cf.LOCAL_REPOSITORY_PATH, 'precheck'),
                    cls.required_leader.lower())
            kim_codes_precheck = (
                subpath for subpath in dircache.listdir(type_dir_precheck) if (
                    os.path.isdir(os.path.join(type_dir_precheck, subpath)) and
                    database.iskimcode(subpath)
                )
            )
            kim_codes_final = itertools.chain(kim_codes, kim_codes_precheck)
        else:
            kim_codes_final = kim_codes

        for x in kim_codes_final:
            try:
                yield cls(x)
            except Exception as e:
                logger.exception("Exception on formation of kim_code (%s)", x)
예제 #12
0
	def run(self): # Compare everything except common subdirectories
		self.a_list = filter(dircache.listdir(self.a), self.hide)
		self.b_list = filter(dircache.listdir(self.b), self.hide)
		self.a_list.sort()
		self.b_list.sort()
		self.phase1()
		self.phase2()
		self.phase3()
예제 #13
0
 def run(dd): # Compare everything except common subdirectories
         dd.a_list = filter(dircache.listdir(dd.a), dd.hide)
         dd.b_list = filter(dircache.listdir(dd.b), dd.hide)
         dd.a_list.sort()
         dd.b_list.sort()
         dd.phase1()
         dd.phase2()
         dd.phase3()
예제 #14
0
def clean():
  for dirname in dircache.listdir("src/examples/"):
    if dirname[0:8] == "example_" and dirname != "example_prototype":
      print("clean " + dirname)
      for dirname2 in dircache.listdir(target_dir + dirname):
        if ".sln" in dirname2 or ".vcxproj" in dirname2:
          os.remove(target_dir + dirname + "/" + dirname2)
        elif ".xcodeproj" in dirname2:
          shutil.rmtree(target_dir + dirname + "/" + dirname2)
예제 #15
0
파일: panels.py 프로젝트: rpedroso/gweb2py
    def extendTree(self, parentID):
        '''extendTree is a semi-lazy directory tree builder. It takes
        the ID of a tree entry and fills in the tree with its child
        subdirectories and their children - updating 2 layers of the
        tree. This function is called by buildTree and onExpand methods'''


        # retrieve the associated absolute path of the parent
        parentDir = self.tree.GetPyData(parentID)


        subdirs = dircache.listdir(parentDir)
        #subdirs.sort()
        for child in subdirs:
            child_path = opj(parentDir, child)
            if not os.path.isdir(child_path) and parentID == self.rootID:
                continue
            if child.endswith('.pyc'):
                continue
            if not child.startswith('.') and not os.path.islink(child):
                to_include = False
                if  self.includeDirs and os.path.isdir(child_path):
                    #[child_path p for p in self.includeDirs]
                    for c in self.includeDirs:
                        n = len(c)
                        if c[:n] in child_path:
                            to_include = True
                    if not to_include:
                        continue
                if (self.excludeDirs
                        and os.path.isdir(child_path)
                        and child_path in self.excludeDirs):
                    continue
                # add the child to the parent
                childID = self.tree.AppendItem(parentID, child)
                # associate the full child path with its tree entry
                self.tree.SetPyData(childID, child_path)

                # Now the child entry will show up, but it current has no
                # known children of its own and will not have a '+' showing
                # that it can be expanded to step further down the tree.
                # Solution is to go ahead and register the child's children,
                # meaning the grandchildren of the original parent
                newParent = child
                newParentID = childID
                newParentPath = child_path
                newsubdirs = (dircache.listdir(newParentPath)
                                if os.path.isdir(child_path) else [])
                for grandchild in newsubdirs:
                    grandchild_path = opj(newParentPath, grandchild)
                    if (not child.startswith('.') and not
                            os.path.islink(grandchild_path)):
                        grandchildID = self.tree.AppendItem(newParentID,
                                grandchild)
                        self.tree.SetPyData(grandchildID, grandchild_path)
예제 #16
0
파일: paths.py 프로젝트: DDMAL/Gamera
def get_toolkit_names(dir):
    toolkits = []
    listing = dircache.listdir(dir)
    dircache.annotate(dir, listing)
    for toolkit in listing:
        if toolkit.endswith(".py") and toolkit != "__init__.py":
            toolkits.append(toolkit[:-3])
        elif toolkit.endswith("module.so"):
            toolkits.append(toolkit[:-9])
        elif (toolkit.endswith("/") and
              "__init__.py" in dircache.listdir(os.path.join(dir, toolkit))):
            toolkits.append(toolkit[:-1])
    return toolkits
예제 #17
0
파일: qaction.py 프로젝트: AnatomicJC/mmc
def qa_list_files():
    path = MscConfig().qactionspath
    if not os.path.exists(path):
        return [False, "Quick action path don't exists", path]

    result = {}
    d = dircache.listdir(path)
    d = d[:]

    for filename in dircache.listdir(path):
        if filename != '..' and filename != '.' and os.path.exists(os.path.join(path, filename)) and re.compile('\.msc$').search(filename):
            result[filename] = Qaction(filename).read()

    return [True, result]
    def create_model(self):
        """Populate the list of available Profiles."""
        store = gtk.ListStore(str)
        fileList = dircache.listdir(self.profileDirectory)
        store.clear()
        for cur in fileList:
            absPath = os.path.join(self.profileDirectory, cur)
            if os.path.isdir(absPath):

                if "Default" in dircache.listdir(absPath):
                    store.append([cur])
                elif len(dircache.listdir(absPath)) == 0:
                    store.append([cur])
                    
        return store
예제 #19
0
def loaddb():
	for each in dircache.listdir('db/'):
		reader = csv.reader(open('db/'+each, 'rb'))
		templist = []
		for record in reader:
			templist.append(record)
		exec("global "+str(each)+";"+str(each)+" = templist")
예제 #20
0
파일: DataAccess.py 프로젝트: ceshine/QSTK
 def get_all_symbols (self):
     '''
     @summary: Returns a list of all the symbols located at any of the paths for this source. @see: {__init__}
     @attention: This will discard all files that are not of type pkl. ie. Only the files with an extension pkl will be reported.
     '''
 
     listOfStocks=list()
     #Path does not exist
     
     if (len(self.folderList) == 0):
         raise ValueError ("DataAccess source not set")   
 
     for path in self.folderList:
         stocksAtThisPath=list ()
         #print str(path)
         stocksAtThisPath= dircache.listdir(str(path))
         #Next, throw away everything that is not a .pkl And these are our stocks!
         stocksAtThisPath = filter (lambda x:(str(x).find(str(self.fileExtensionToRemove)) > -1), stocksAtThisPath)
         #Now, we remove the .pkl to get the name of the stock
         stocksAtThisPath = map(lambda x:(x.partition(str(self.fileExtensionToRemove))[0]),stocksAtThisPath)
         
         listOfStocks.extend(stocksAtThisPath)
         #for stock in stocksAtThisPath:
             #listOfStocks.append(stock)
     return listOfStocks    
예제 #21
0
def ddc_chooser():
 alle = dircache.listdir('animations/ddc')
 ddc_list = []
 for i in alle:
  if i[0:3] == 'ddc' and i[-1] == 'y':
   ddc_list.append('ddc/'+i)
 return random.choice(ddc_list)
예제 #22
0
파일: schema.py 프로젝트: nyov/netsa-python
def analyze_paths(paths):
    """
    Analyze the schema files available in the given paths (list of
    directories) to determine what versions are available for install
    or upgrade.
    """
    if not isinstance(paths, basestring):
        for path in paths:
            analyze_paths(path)
        return
    for p in dircache.listdir(paths):
        p = os.path.join(paths, p)
        if os.path.isdir(p):
            analyze_paths(p)
        elif os.path.isfile(p):
            m = file_re.match(p)
            if m:
                m_mode = m.group('mode')
                m_schema_name = m.group('schema_name')
                m_old_version = m.group('old_version')
                m_version = m.group('version')
                if m_schema_name not in schema_upgrades:
                    schema_upgrades[m_schema_name] = {}
                if m_old_version not in schema_upgrades[m_schema_name]:
                    schema_upgrades[m_schema_name][m_old_version] = {}
                schema_upgrades[m_schema_name][m_old_version][m_version] = p
                if m_schema_name not in schema_latest:
                    schema_latest[m_schema_name] = m_version
                else:
                    if compare_versions(
                        parse_version(m_version),
                        parse_version(schema_latest[m_schema_name])) > 0:
                        schema_latest[m_schema_name] = m_version
예제 #23
0
 def _indir (p):
     if _HIDE_RE.match(p) and not _HIDE_IN:
         carp("%s is hidden. skip" %p)
         return (0, [])
     
     selfSize = 0
     buff     = []
     list     = []
 
     try:
         list = dircache.listdir(p)
     except :
         pass
     for f in list:
         if _HIDE_IN and _HIDE_RE.match(f):
             pass
         else:
             if os.path.isdir(p+'/'+f):
                 n = _indir(p+'/'+f)
                 buff += [[n[0], p+'/'+f]]
                 buff += n[1]
                 selfSize += n[0]
             else:
                 selfSize += os.path.getsize(p+'/'+f)
                 if not _DIR_SORT :
                     buff += [[ os.path.getsize(p+'/'+f), p+'/'+f]]
     return (selfSize, buff)
예제 #24
0
파일: DataAccess.py 프로젝트: ceshine/QSTK
 def getPathOfFile(self, symbol_name, bDelisted=False):
     '''
     @summary: Since a given pkl file can exist in any of the folders- we need to look for it in each one until we find it. Thats what this function does.
     @return: Complete path to the pkl file including the file name and extension
     '''
     
     if not bDelisted:
         for path1 in self.folderList:
             if (os.path.exists(str(path1)+str(symbol_name+".pkl"))):
                 # Yay! We found it!
                 return (str(str(path1)+str(symbol_name)+".pkl"))
                 #if ends
             elif (os.path.exists(str(path1)+str(symbol_name+".csv"))):
                 # Yay! We found it!
                 return (str(str(path1)+str(symbol_name)+".csv"))
             #for ends
             
     else:
         ''' Special case for delisted securities '''
         lsPaths = []
         for sPath in self.folderList:
             if re.search( 'Delisted Securities', sPath ) == None:
                 continue
             
             for sFile in dircache.listdir(sPath):
                 if not re.match( '%s-\d*.pkl'%symbol_name, sFile ) == None:
                     lsPaths.append( sPath + sFile )
                     
         lsPaths.sort()
         return lsPaths
          
     print "Did not find path to " + str (symbol_name)+". Looks like this file is missing"  
예제 #25
0
	def _getListOfComputeFolders(self):
		computeFolders=[]
		devcodeDir=self._getDevcodeDir()
		computeFolderEntries=dircache.listdir(devcodeDir)
		for entry in computeFolderEntries:
			computeFolders.append(devcodeDir+"/"+entry)
		return computeFolders
예제 #26
0
def main():
    archivos_html = []
    archivos = listdir('.')
    for archivo in archivos:
        if archivo.endswith('.html'):
            archivos_html.append(archivo)
    print archivos_html
    
    master_index = {}
    doc_index = {}
    
    for doc,archivo in enumerate(archivos_html):
        HTML = lee_archivo(archivo)        
        sopa = BeautifulSoup(HTML,'html.parser')
        texto = get_texto(sopa)        
        words = get_words (texto)
        word_index = {}
        make_index(word_index, words, doc)
        doc_index[doc] = archivo
        try:
            for key,value in word_index.items():
                if key not in master_index:
                    master_index[key] = [value]
                else:
                    valor = master_index[key]
                    valor.append([value])
                    master_index[key] = valor
        except:
            print key
                
    print master_index
    guarda_indice("indice.pickle", master_index)
    print doc_index
    guarda_indice("indice_doc.pickle", doc_index)
예제 #27
0
    def glob_impl( root_dir_path ):
        child_dirs = [root_dir_path]
        while child_dirs:
            dir_path = child_dirs.pop()
            for entry in listdir( dir_path ):
                full_path = os.path.join( dir_path, entry )
##                print 'Testing:', full_path,
                is_dir = os.path.isdir( full_path )
                if is_dir and not is_pruned_dir( entry ): # explore child directory ?
##                    print '===> marked for recursion',
                    child_dirs.append( full_path )
                included = apply_filter( full_path, include_filter )
                rejected = apply_filter( full_path, exclude_filter )
                if not included or rejected: # do not include entry ?
##                    print '=> not included or rejected'
                    continue
                link = os.path.islink( full_path )
                is_file = os.path.isfile( full_path )
                if not is_file and not is_dir:
##                    print '=> unknown entry type'
                    continue
                if link:
                    entry_type = is_file and FILE_LINK or DIR_LINK
                else:
                    entry_type = is_file and FILE or DIR
##                print '=> type: %d' % entry_type, 
                if (entry_type & entry_type_filter) != 0:
##                    print ' => KEEP'
                    yield os.path.join( dir_path, entry )
예제 #28
0
    def _convertHS2MyFaceDB(self,facedatapath):
        filepath = join( facedatapath, 'HandySolution' )
        filenames = listdir(filepath)

        return [[join(filepath,x),
                [30.0,30.0],[70.0,30.0],[50.0,55.0],
                [35.0,75.0],[50.0,75.0],[65.0,75.0]] for x in filenames]
예제 #29
0
파일: ms.py 프로젝트: provegard/pyupnp
    def make_browse_response(self, req, environ):
        serviceType = environ['upnp.soap.serviceType']
        action = environ['upnp.soap.action']

        # inargs
        id = req.get_arg('ObjectID')
        flag = req.get_arg('BrowseFlag')
        start = int(req.get_arg('StartingIndex'))
        rc = req.get_arg('RequestedCount')
        count = 0 if rc == "" else int(rc)
        order = req.get_arg('SortCriteria')
    
        parent = id
        if id == '0':
            id = self.content_dir
        else:
            id = os.path.normpath(os.path.join(self.content_dir, id))

        if not os.path.exists(id) or not id.startswith(self.content_dir):
            return upnp.SoapError(701, 'No such object')

        # out args
        objs = []
        matched = 0

        if flag == 'BrowseMetadata':
            ext = os.path.splitext(id)[1]
            objs.append((id, ext))
            matched = 1
            parent = os.path.dirname(parent)[len(self.content_dir):]
            if parent == '':
                parent = '0'
        elif flag == 'BrowseDirectChildren':
            if count == 0:
                count = 1e6
            for name in dircache.listdir(id):
                if name.startswith('.'):
                    continue
                ext = os.path.splitext(name)[1]
                name = os.path.normpath(os.path.join(id, name))
                if os.path.isfile(name) and ext in formats:
                    pass
                elif os.path.isdir(name):
                    pass
                else:
                    continue
                matched += 1
                if matched <= start:
                    continue
                if len(objs) < count:
                    objs.append((name, ext))
        else:
            return upnp.SoapError(402, 'Invalid args')

        resp = upnp.SoapMessage(serviceType, action + 'Response')
        resp.set_arg('Result', self.toresult(objs, parent, environ))
        resp.set_arg('NumberReturned', str(len(objs)))
        resp.set_arg('TotalMatches', str(matched))
        resp.set_arg('UpdateID', '0')
        return resp
예제 #30
0
def doSyncDir( report, dfrom, dto, files, recursive = 0 ):

	# files == file extenstions : ex) .c,.h,.script
	#
	# copy files
	#

	fileList = files.split(",")	
	
	filesInSrc = dircache.listdir(dfrom);
	for f in filesInSrc:
		spath = dfrom + "/" + f
		dpath = dto + "/" + f
		
		if ( os.path.isfile( spath ) ):
			for c in fileList:
				c = c.replace(" ","") # remove spaces
				if fnmatch.fnmatch( f, c ):
					# marker is filled or updated when necessary
					copyIfDiffAndNew( report, spath, dpath)
		
		elif ( os.path.isdir( spath ) ):
			if ( recursive ):
				doSyncDir( report, spath, dpath, files, recursive = 1)

	print "# %s --> %s" %(dfrom, dto)
예제 #31
0
                      action='store',
                      default='',
                      help='Directory to scan for Doxygen and asciidoc files')
    parser.add_option('-o',
                      '--output',
                      dest='output',
                      action='store',
                      default='',
                      help='Directory to output files to')
    (options, args) = parser.parse_args()

    if not options.dir or not options.output:
        parser.print_help()
        sys.exit(1)

    print "Building documentation to %s" % (options.dir)
    files = dircache.listdir(options.dir)
    asciidocs = [
        build_asciidoc(os.path.join(options.dir, asciidoc_file),
                       options.output)
        for asciidoc_file in filter(
            lambda x: os.path.splitext(x)[-1] == ".asciidoc", files)
    ]
    doxyfile = [
        build_doxygen(os.path.join(options.dir, doxyfile),
                      options.output) for doxyfile in filter(
                          lambda x: os.path.split(x)[-1] == "Doxyfile", files)
    ]
    build_doc_index(options.output, asciidocs, doxyfile)
    sys.exit(0)
#!/usr/bin/python
from __future__ import division
import dircache
import time
import os
import inspect

trainingPath = os.path.dirname(
    os.path.abspath(inspect.getfile(inspect.currentframe()))) + "/training/"
testPath = os.path.dirname(
    os.path.abspath(inspect.getfile(inspect.currentframe()))) + "/attack/"

#for each file in training trace directory
fileList = dircache.listdir(trainingPath)
training = []
for fileName in fileList:
    #open the file, split it into system calls, generate clusters
    f = open(trainingPath + fileName, 'r')
    systemCalls = f.readline().split()
    temp = []
    callList = list(set(systemCalls))
    for item in callList:
        callCount = systemCalls.count(item)
        temp.append((item, callCount))
    training.append(temp)
    f.close

folderList = dircache.listdir(testPath)
for folderName in folderList:
    # each directory is a different attack
    validate = []
예제 #33
0
	def IndexFilesInFolder( self, src_dirname, target_dirname = None, download = None, copy_file = True, overwrite_mode = False ):
		if not os.path.isdir( src_dirname ):
			return 

		for file in dircache.listdir( src_dirname ):
			current_path = os.path.join( src_dirname, file )
			if os.path.isdir( current_path ):
				try:
					self.IndexFilesInFolder( os.path.join( src_dirname, file ), target_dirname, download, copy_file = copy_file, overwrite_mode = overwrite_mode )
				except:
					import traceback
					traceback.print_exc()
					continue

			elif self.IsExecutable( current_path ):
				#Check MZ at the start of the file
				if self.DebugLevel > 2:
					print current_path

				filename = os.path.basename( current_path )
				
				if not filename in self.NotInterestedFiles:
					version_info = self.QueryFile( current_path )
					
					try:
						statinfo = os.stat( current_path )
						if self.DebugLevel > 2:
							print "%s=%s,%s" % ( file, time.ctime(statinfo.st_ctime), time.ctime(statinfo.st_mtime) )

						ctime = time.localtime( statinfo.st_ctime )
						ctime_dt = datetime.datetime( ctime.tm_year, ctime.tm_mon, ctime.tm_mday, ctime.tm_hour, ctime.tm_min, ctime.tm_sec )

						mtime = time.localtime( statinfo.st_mtime )
						mtime_dt = datetime.datetime( mtime.tm_year, mtime.tm_mon, mtime.tm_mday, mtime.tm_hour, mtime.tm_min, mtime.tm_sec )

						added_time = time.localtime( time.time() )
						added_time_dt = datetime.datetime( added_time.tm_year, added_time.tm_mon, added_time.tm_mday, added_time.tm_hour, added_time.tm_min, added_time.tm_sec )

						fd = open( current_path, "rb" )
						data = fd.read()
						fd.close()
						md5 = self.GetMD5( data )
						sha1 = self.GetSHA1( data )

						if self.DebugLevel > 2:
							print version_info

						if not sha1:
							continue

						#Put to the Index Database
						operating_system = 'Windows XP'
						patch_identifier = ''
						service_pack = ''
						company_name = ''
						file_version = ''
						if version_info.has_key( 'CompanyName' ) and version_info.has_key( 'FileVersion' ):
							company_name = version_info['CompanyName']
							file_version = version_info['FileVersion']
							target_relative_directory = os.path.join( self.SanitizeForFilename( company_name ), self.SanitizeForFilename( filename ) , self.SanitizeForFilename( file_version ) )
						else:
							target_relative_directory = "etc"

						if not target_dirname:
							target_dirname = os.getcwd()

						target_relative_filename = os.path.join( target_relative_directory, os.path.basename( current_path ) )
						files = self.Database.GetFileBySHA1( sha1, None,None,None,None,None )

						if not files or len(files) == 0 or overwrite_mode:
							if self.DebugLevel > 2:
								print 'New', download, current_path, version_info, 'filename=',filename,sha1

							target_relative_filename = os.path.join( target_relative_directory, os.path.basename( current_path ) )
							target_full_directory = os.path.join( target_dirname, target_relative_directory )
							target_full_filename = os.path.join( target_dirname, target_relative_filename )
							
							if self.DebugLevel > 2:
								print 'target_relative_directory', target_relative_directory
								print 'target_relative_filename', target_relative_filename
								print 'target_full_filename',target_full_filename

							if not os.path.isdir( target_full_directory ):
								try:
									os.makedirs( target_full_directory )
								except:
									print 'Failed to make',target_full_directory
									print 'target_full_filename=',target_full_filename

							if current_path.lower() != target_full_filename.lower():
								if self.DebugLevel > 1:
									print "Different src and target:",current_path, target_full_filename

								if os.path.exists( target_full_filename ):
									target_relative_directory = os.path.join( target_relative_directory, sha1 )
									target_relative_filename = os.path.join( target_relative_directory, os.path.basename( current_path ) )
									target_full_directory = os.path.join( target_dirname, target_relative_directory )
									target_full_filename = os.path.join( target_dirname, target_relative_filename )

									if self.DebugLevel > 2:
										print 'target_relative_directory', target_relative_directory
										print 'target_relative_filename', target_relative_filename
										print 'target_full_filename',target_full_filename

									if not os.path.isdir( target_full_directory ):
										os.makedirs( target_full_directory )

								if not os.path.exists( target_full_filename ):
									try:
										if copy_file:
											if self.DebugLevel > 1:
												print 'Copy from', current_path ,'to',target_full_filename
											shutil.copyfile( current_path, target_full_filename )
										else:
											if self.DebugLevel > 1:
												print 'Move to',target_full_filename
											shutil.move( current_path, target_full_filename )
									except:
										import traceback
										traceback.print_exc()

						if files and len(files)>0:
							#Update
							if self.DebugLevel > 2:
								print 'Already there:', current_path, version_info,sha1,files
							for file in files:
								# timestamp comparision and update
								if file.mtime < mtime_dt or overwrite_mode:
									if self.DebugLevel > 2:
										print 'Updating with older data:', current_path, version_info
								
									self.Database.UpdateFileByObject(
										file,
										download,
										operating_system, 
										service_pack, 
										filename, 
										company_name, 
										file_version, 
										patch_identifier,
										current_path,
										target_relative_filename,
										ctime = ctime_dt,
										mtime = mtime_dt,
										added_time = added_time_dt,
										md5 = md5,
										sha1 = sha1
									)
						else:
							#New
							self.Database.AddFile( 
								download,
								operating_system, 
								service_pack, 
								filename, 
								company_name, 
								file_version, 
								patch_identifier,
								current_path,
								target_relative_filename,
								ctime = ctime_dt,
								mtime = mtime_dt,
								added_time = added_time_dt,
								md5 = md5,
								sha1 = sha1
							)
					except:
						import traceback
						traceback.print_exc()

		self.Database.Commit()
예제 #34
0
def read_data():
    print "reading data..."
    intensities = []
    spotwritelist = []
    cellsperfile = []
    lout = listdir(mskpath)
    lin = listdir(locpath)

    # read in spots data:
    print "reading spots data..."
    for infilename in lout:
        if maskfilename_token in infilename:
            #print infilename
            mask = Image.open(join(mskpath, infilename)).convert("RGB")
            maskpixels = mask.load()
            #mask.show()
            colorlist = sorted([color[1] for color in mask.getcolors()
                                ])  # sorted from dark to bright
            colordict = dict(enumerate(colorlist))
            inverse_colordict = dict((v, k) for k, v in colordict.items())
            for locfilename in lin:
                if locfilename.endswith(locfilename_token):
                    #print "locfilename =", locfilename
                    if extract_loc_id(locfilename) == extract_msk_id(
                            infilename):  # for matching image IDs
                        print "found mask file for .loc file:", locfilename
                        spots = loc_spots(join(locpath, locfilename))
                        print "found", len(
                            spots), "spots. some might be outside of cells."
                        cellsperfile.append(
                            len(colorlist) - 1
                        )  # this is the number of cells in the image infilename

                        for spot in spots:
                            x = spot[0]
                            y = spot[1]
                            intensity = spot[2]
                            frame_ID = spot[3]
                            cell_ID = inverse_colordict[maskpixels[
                                spot[0], spot[1]]]  # cell_ID but also color_ID
                            spot_ID = 0  # move this line up to create a global spot_ID
                            #file_ID = extract_loc_id(locfilename)
                            file_ID = locfilename.replace(".loc", "")
                            if cell_ID != 0:  # excluding black (= outside of cells)
                                spot_ID += 1
                                intensities.append(
                                    intensity
                                )  # this is the "global" intensities list
                                spotwritelist.append([
                                    str(i) for i in [
                                        x, y, intensity, frame_ID, cell_ID,
                                        spot_ID, file_ID
                                    ]
                                ])
    #print "intensities =", intensities
    RNAs = calculate_RNA(intensities)
    for i, sublist in enumerate(spotwritelist):
        spotwritelist[i].append(str(RNAs[i]))
        #print spotwritelist[i]

    # create cells data structure (including spotless cells):
    cellsperfileiter = iter(cellsperfile)
    celldict = {}
    filedict = {}
    folderlist = []

    for locfilename in lin:
        if locfilename.endswith(
                locfilename_token) and not locfilename == spotoutfile:
            # only do this for matchable loc files:
            for infilename in lout:
                if maskfilename_token in infilename:
                    if extract_loc_id(locfilename) == extract_msk_id(
                            infilename):  # for matching image IDs
                        file_ID = locfilename.replace(".loc", "")
                        #print "file_ID =", file_ID
                        filedict[file_ID] = [0, 0, 0]  # cells, spots, RNAs
                        for cellnumber in range(1,
                                                cellsperfileiter.next() + 1):
                            ID = "_".join(file_ID.split("_")[:-1]) + "_" + str(
                                cellnumber)
                            #print "ID oben =", ID
                            # celldict[ID] will be for each cell [filename, sum(intensities_token1), sum(intensities_token2), count(spots_token1), count(spots_token2), sum(RNAs_token1), sum(RNAs_token2)] (as strings)
                            celldict[ID] = [
                                str("_".join(file_ID.split("_")[:-1])), 0.0,
                                0.0, 0, 0, 0, 0
                            ]  # file_ID, intensity_NG, intensity_Qusar, spots_NG, spots_Qusar, RNAs_NG, RNAs_Qusar

    # read in cell level data:
    for sublist in spotwritelist:
        #TODO: this is inefficient since we would only have to loop over cells not spots
        #print "================================================================="
        #print "x, y, intensity, frame_ID, cell_ID, spot_ID, file_ID =", sublist
        cell_ID_prefix = "_".join(
            sublist[6].split("_")
            [:-1])  # we skip the NG, Qusar token to aggregate across NG, Qusar
        ID = cell_ID_prefix + "_" + sublist[4]  # cell_ID
        #print "ID =", ID
        comparetoken = sublist[6].split("_")[-1]
        #print comparetoken
        # 1: 1, 3, 5
        if token_1 in comparetoken:
            celldict[ID][1] = str(
                sum(
                    float(linedata[2]) for linedata in spotwritelist
                    if token_1 in linedata[6].split("_")[-1]
                    and str("_".join(sublist[6].split("_")[:-1])) + "_" +
                    str(linedata[4]) == ID))  # intensities_NG
            celldict[ID][3] = str(
                sum(
                    int(1) for linedata in spotwritelist
                    if token_1 in linedata[6].split("_")[-1]
                    and str("_".join(sublist[6].split("_")[:-1])) + "_" +
                    str(linedata[4]) == ID)
            )  # spots, each line contributes one
            celldict[ID][5] = str(
                sum(
                    int(linedata[7]) for linedata in spotwritelist
                    if token_1 in linedata[6].split("_")[-1]
                    and str("_".join(sublist[6].split("_")[:-1])) + "_" +
                    str(linedata[4]) == ID))  # RNAs
        # 2: 2, 4, 6
        if token_2 in comparetoken:
            celldict[ID][2] = str(
                sum(
                    float(linedata[2]) for linedata in spotwritelist
                    if token_2 in linedata[6].split("_")[-1]
                    and str("_".join(sublist[6].split("_")[:-1])) + "_" +
                    str(linedata[4]) == ID))  # intensities_Qusar
            celldict[ID][4] = str(
                sum(
                    int(1) for linedata in spotwritelist
                    if token_2 in linedata[6].split("_")[-1]
                    and str("_".join(sublist[6].split("_")[:-1])) + "_" +
                    str(linedata[4]) == ID)
            )  # spots, each line contributes one
            celldict[ID][6] = str(
                sum(
                    int(linedata[7]) for linedata in spotwritelist
                    if token_2 in linedata[6].split("_")[-1]
                    and str("_".join(sublist[6].split("_")[:-1])) + "_" +
                    str(linedata[4]) == ID))  # RNAs
        #print celldict[ID]

    # create spot counts per cell:
    # spotfrequencies[token] = [count_for_0, count_for_1, count_for_2, ...]
    spotfrequencies = dict((token, {}) for token in tokens)
    for ID in celldict:  # loop over cells
        #print "celldict[ID] =", celldict[ID]
        spotcount_1 = int(celldict[ID][3])  # NG
        spotcount_2 = int(celldict[ID][4])  # Qusar
        if spotcount_1 in spotfrequencies[token_1]:
            spotfrequencies[token_1][spotcount_1] += 1
        else:
            spotfrequencies[token_1][spotcount_1] = 1
        if spotcount_2 in spotfrequencies[token_2]:
            spotfrequencies[token_2][spotcount_2] += 1
        else:
            spotfrequencies[token_2][spotcount_2] = 1
    #print spotfrequencies

    # create mRNA counts per cell:
    # mRNAfrequencies[token] = [count_for_0, count_for_1, count_for_2, ...]
    mRNAfrequencies = dict((token, {}) for token in tokens)
    for ID in celldict:  # loop over cells
        #print "celldict[ID] =", celldict[ID]
        mRNAcount_1 = int(celldict[ID][5])  # NG
        mRNAcount_2 = int(celldict[ID][6])  # Qusar
        if mRNAcount_1 in mRNAfrequencies[token_1]:
            mRNAfrequencies[token_1][mRNAcount_1] += 1
        else:
            mRNAfrequencies[token_1][mRNAcount_1] = 1
        if mRNAcount_2 in mRNAfrequencies[token_2]:
            mRNAfrequencies[token_2][mRNAcount_2] += 1
        else:
            mRNAfrequencies[token_2][mRNAcount_2] = 1
    #print mRNAfrequencies

    # read in file level data:
    for sublist in spotwritelist:
        file_ID = sublist[6]
        #TODO: aggregate cells into file level file
        #print "======================================================="
        #print celldict.keys()
        #print "======================================================="
        #print file_ID
        #print "======================================================="
        filedict[file_ID][0] = "bla"
        filedict[file_ID][1] = str(
            sum(
                int(1) for linedata in spotwritelist if str(linedata[6]) ==
                file_ID))  # spots, each line contributes one
        filedict[file_ID][2] = str(
            sum(
                int(linedata[7]) for linedata in spotwritelist
                if str(linedata[6]) == file_ID))  # RNAs

    # read in folder level data:
    folderlist.append(str(
        len(spotwritelist)))  # spots, each line contributes one
    folderlist.append(str(sum(int(linedata[7])
                              for linedata in spotwritelist)))  # RNAs
    if intensities:
        folderlist.append(str(median(intensities)))  # median intensity
    else:
        folderlist.append("")  # median intensity
    #print folderlist

    print "dumping results...",
    cPickle.dump(spotwritelist, file("spotlist.pkl", "w"))
    cPickle.dump(celldict, file("celldict.pkl", "w"))
    cPickle.dump(filedict, file("filedict.pkl", "w"))
    cPickle.dump(folderlist, file("folderlist.pkl", "w"))
    cPickle.dump(spotfrequencies, file("spotfrequencies.pkl", "w"))
    cPickle.dump(mRNAfrequencies, file("mRNAfrequencies.pkl", "w"))
    print "done."
def genData():

    op_folderpath = os.environ['QS'] + 'Tools/Visualizer/Data/Norway'
    ip_folderpath = os.environ['QS'] + 'Tools/Visualizer/Data/Norway/Raw/'

    if not os.path.exists(op_folderpath):
        os.mkdir(op_folderpath)
        print("Data was missing")
        return
    op_folderpath = op_folderpath + '/'

    files_at_this_path = dircache.listdir(ip_folderpath)
    ip_folderpath = ip_folderpath + '/'

    stationnames = []
    startyears = []
    endyears = []

    for file1 in files_at_this_path:
        file = open(ip_folderpath + file1, 'r')
        for f in file.readlines():
            if string.find(f, 'Name') != -1:
                n = string.lstrip(f, 'Name= ')
                stationnames.append(string.rstrip(n))
            if string.find(f, 'Start year') != -1:
                n = string.lstrip(f, 'Start year= ')
                startyears.append(int(string.rstrip(n)))
            if string.find(f, 'End year') != -1:
                n = string.lstrip(f, 'End year= ')
                endyears.append(int(string.rstrip(n)))
        file.close()

    timestamps = [
        dt.datetime(year, 1, 1) for year in range(min(startyears),
                                                  max(endyears) + 1)
    ]

    months = [
        'January', 'February', 'March', 'April', 'May', 'June', 'July',
        'August', 'September', 'October', 'November', 'December'
    ]

    numpyarray = np.empty([len(months), len(timestamps), len(stationnames)])
    numpyarray[:] = np.NAN

    PandasObject = Panel(numpyarray,
                         items=months,
                         major_axis=timestamps,
                         minor_axis=stationnames)

    for i, file1 in enumerate(files_at_this_path):
        flag = 0
        station = stationnames[i]
        file = open(ip_folderpath + file1, 'r')
        for f in file.readlines():
            if flag == 1:
                data = string.split(f)
                year = int(data.pop(0))
                time = dt.datetime(year, 1, 1)
                for month, val in zip(months, data):
                    PandasObject[month][station][time] = float(val)
            if string.find(f, 'Obs') != -1:
                flag = 1
        file.close()

    #Creating a txt file of timestamps
    file = open(op_folderpath + 'TimeStamps.txt', 'w')
    for onedate in timestamps:
        stringdate = dt.date.isoformat(onedate)
        file.write(stringdate + '\n')
    file.close()

    #Creating a txt file of symbols
    file = open(op_folderpath + 'Symbols.txt', 'w')
    for sym in stationnames:
        file.write(str(sym) + '\n')
    file.close()

    #Creating a txt file of Features
    file = open(op_folderpath + 'Features.txt', 'w')
    for f in months:
        file.write(f + '\n')
    file.close()

    Numpyarray_Final = PandasObject.values
    for i, month in enumerate(months):
        for j, station in enumerate(stationnames):
            for k in range(len(timestamps) - 1):
                if np.isnan(Numpyarray_Final[i][k + 1][j]):
                    Numpyarray_Final[i][k + 1][j] = Numpyarray_Final[i][k][j]

    for i, month in enumerate(months):
        for j, station in enumerate(stationnames):
            for z in range(1, len(timestamps)):
                k = len(timestamps) - z
                if np.isnan(Numpyarray_Final[i][k - 1][j]):
                    Numpyarray_Final[i][k - 1][j] = Numpyarray_Final[i][k][j]

    pickle.dump(Numpyarray_Final, open(op_folderpath + 'ALLDATA.pkl', 'wb'),
                -1)
예제 #36
0
        '@executable_path/../Frameworks/' + l[0] + '.framework/Versions/' +
        l[1] + '/' + l[0] for l in framework_names_and_versions
    ]

    rlinks_fw_line = ('--rlinks_framework=[' + ' '.join(libs_to_convert) +
                      ']:[' + ' '.join(framework_names_with_path) + ']')

    for lib, name, version in zip(libs_to_convert, framework_names,
                                  framework_versions):
        #execute rtool a crapton of times
        header_path = '/'.join(lib.split('/')[0:-1]) + '/include/' + name
        if version != '' and version != 'A':
            header_path += '-' + version
        try:
            header_path = ' '.join(
                [header_path + '/' + h for h in dircache.listdir(header_path)])
        except OSError, e:
            # the directory didn't exist, we don't care.
            pass
        args = [
            'rtool',
            '--framework_root=@executable_path/../Frameworks',
            '--framework_name=' + name,
            '--framework_version=' + version,
            '--library=' + lib,
            '--builddir=' + output_dir,
            '--headers=' + header_path,
            '--headers_no_root',
            rlinks_fw_line,
        ]
        status = os.spawnvp(os.P_WAIT, 'rtool', args)
예제 #37
0
                    if profarray[x] == unsmoothed_max
                ][0]  # hack since there might be more than one pos with the max
                #print unsmoothed_pos, unsmoothed_max
                maxpositions[
                    unsmoothed_pos] = unsmoothed_max  # hack because the list can have > 1 element
    #print "maxpositions =", maxpositions
    topvalues = sorted(maxpositions.values())[-2:]  # top 2 intensities
    maxpositions = dict((k, v) for (k, v) in maxpositions.items()
                        if v in topvalues)  # dictionary with only <=2 largest
    return maxpositions.keys()


if __name__ == "__main__":
    imagepath = "C:/Users/MJS/git/Berufspraktikum/for_Anja"

    filelist = listdir(imagepath)
    for imagename in filelist:
        if imagename.endswith(".tif"):
            print "opening image:", join(imagepath, imagename)
            imp = open_image(join(
                imagepath, imagename))  # type 'ij.ImagePlus', current image
            ip = imp.getProcessor().convertToFloat(
            )  # type 'ij.ImageProcessor'
            x, y = brightest_pixels(imp)
            width = imp.width

            length = 400 / 2  # length of intensity profile
            angular_accuracy = 20  # degrees
            tubewidth = width  # to initialize, a tube cannot be wider than the whole image
            filename = join(imagepath, imagename + "_profiles.shl")
            profiles = shelve.open(filename, 'n')
예제 #38
0
def get_file_name(id_to_file_name, id):
    return id_to_file_name[int(id)]


ANNOTATED_SENTI = "../data/as1/annotations/sentiment/sentimentAnnotations_rev_v03.csv"
VISUAL_FEATURE = "../data/as1/features/VisualFeatures/SHORE/"
id_to_file_name = {}

FEATURE = VISUAL_FEATURE

data = np.genfromtxt(ANNOTATED_SENTI,
                     delimiter=",",
                     skip_header=1,
                     usecols=[0, 1, 2, 3, 4, 5, 9])

for fn in listdir(FEATURE):
    id = fn[5:7]
    if id[1] == "(":
        id = str(id[0])
    id_to_file_name[int(str(id))] = fn

# All feature mappings
feature_mapping = {
    0: "Age",
    2: "Angry",
    3: "Happy",
    4: "MouthOpen",
    5: "Surprised"
}
# feature_mapping = {134	:"right blow raise",135	:"left brow raise",136	:"brow squint"}
예제 #39
0
def getRandomfile(): # returns a random file 
	dircache.reset()
	thisFile = ourFolder  + "\\" + random.choice(dircache.listdir(ourFolder))
	return thisFile
예제 #40
0
def update():
  for dirname in dircache.listdir("src/examples/"):
    if dirname[0:8] == "example_" and dirname != "example_prototype":
      print("updating " + dirname)
      make_example(proto_dir, target_dir + dirname, dirname, "")
예제 #41
0
import codecs, dircache, os, re
import datetime

current_date = datetime.date.today()
dir_in = '/release/cbeta-xml-2006-02-18'
dir_out = '/release/cbeta-p4'

if not os.path.exists(dir_out): os.makedirs(dir_out)

l = dircache.listdir(dir_in)
for s in l:
    if s == 'dtd' or s == 'CVS':
        continue
    d = dir_in + '/' + s
    if not os.path.isdir(d):
        continue
    os.chdir(d)
    cmd = 'add_id ' + s
    print cmd
    os.system(cmd)
    cmd = 'copy *.ent %s/%s/' % (dir_out, s)
    cmd = cmd.replace("/", "\\")
    print cmd
    os.system(cmd)
예제 #42
0
def main():
    # init app config
    global app
    app['conf'] = ConfigParser.SafeConfigParser()
    app['path'] = {}
    app['path']['app'] = os.path.dirname(os.path.realpath(__file__)) + os.sep

    # add import path
    sys.path.append(app['path']['app'] + 'lib')
    sys.path.append(app['path']['app'] + 'plugin')
    sys.path.append(app['path']['app'] + 'service')

    # add conf path
    import platformDep
    app['path']['conf'] = os.path.join(platformDep.getNmcontrolDir(),
                                       'conf') + os.sep

    import common
    common.app = app

    import console
    (cWidth, cHeight) = console.getTerminalSize()
    fmt = optparse.IndentedHelpFormatter(indent_increment=4,
                                         max_help_position=40,
                                         width=cWidth - 3,
                                         short_first=1)
    app['parser'] = optparse.OptionParser(formatter=fmt,
                                          description='nmcontrol %s' %
                                          __version__)
    app['debug'] = False

    # debug mode
    for argv in sys.argv:
        if argv in ['--debug=1', '--main.debug=1']:
            app['debug'] = True

    # init modules
    import re
    import dircache

    # init vars and main plugin
    app['services'] = {}
    app['plugins'] = {}
    import pluginMain
    app['plugins']['main'] = pluginMain.pluginMain('plugin')

    # init service & plugins
    for modType in ['service', 'plugin']:
        modules = dircache.listdir(modType)
        if modType == 'plugin': modules.remove('pluginMain.py')
        for module in modules:
            if re.match("^" + modType + ".*.py$", module):
                module = re.sub(r'\.py$', '', module)
                modulename = re.sub(r'^' + modType, '', module).lower()
                try:
                    importedModule = __import__(module)
                    importedClass = getattr(importedModule, module)
                    app[modType +
                        's'][importedClass.name] = importedClass(modType)
                    importedClass.app = app
                except Exception as e:
                    print "Exception when loading " + modType, module, ":", e

    # parse command line options
    (options, app['args']) = app['parser'].parse_args()
    if app['debug']: print "Cmdline args:", app['args']
    if app['debug']: print "Cmdline options:", options
    for option, value in vars(options).items():
        if value is not None:
            tmp = option.split('.')
            if len(tmp) == 1:
                app['plugins']['main'].conf[tmp[0]] = value
            else:
                module = tmp[0]
                tmp.remove(module)
                if module in app['plugins']:
                    app['plugins'][module].conf['.'.join(tmp)] = value
                elif module in app['services']:
                    app['services'][module].conf['.'.join(tmp)] = value

    ###### Act as client : send rpc request ######
    if len(app['args']) > 0 and app['args'][0] != 'start':
        error, data = app['plugins']['rpc'].pSend(app['args'][:])
        if error is True or data['error'] is True:
            print "ERROR:", data
        else:
            if data['result']['reply'] in [None, True]:
                print 'ok'
            else:
                print data['result']['reply']
            if app['debug'] and data['result']['prints']:
                print "LOG:", data['result']['prints']
        if app['args'][0] != 'restart':
            return

    # daemon mode
    if os.name == "nt":  # MS Windows
        print "Daemon mode not possible on MS Windows."
    elif int(app['plugins']['main'].conf['daemon']) == 1:
        print "Entering background mode"
        import daemonize
        retCode = daemonize.createDaemon()

    ###### Act as server : start plugins ######
    plugins_started = []
    for plugin in app['plugins']:
        if int(app['plugins'][plugin].conf['start']) == 1 and plugin not in [
                'rpc', 'main'
        ]:
            # exit immediatly when main is stopped, unless in debug mode
            app['plugins'][plugin].daemon = True
            if app['plugins'][plugin].running is False:
                app['plugins'][plugin].start()
                plugins_started.append(app['plugins'][plugin].name)
    print "Plugins started :", ', '.join(plugins_started)

    #services_started = []
    #for service in app['services']:
    #    if app['services'][service].running:
    #        services_started.append(app['services'][service].name)
    #print "Services started :", ', '.join(services_started)

    # stay there to catch CTRL + C and not exit when in daemon mode
    try:
        app['plugins']['main'].start2()
    except (KeyboardInterrupt, SystemExit):
        print '\n! Received keyboard interrupt, quitting threads.\n'

    # stop main program
    app['plugins']['main'].stop()
예제 #43
0
def check_install_mcl(debug, log_path=log_fname, only_warn=False):
    if which('mcl'):
        return 'mcl', 0

    mcl_bin_path = join(mcl_dir, 'bin', 'bin', 'mcl')
    if exists(mcl_bin_path):
        return mcl_bin_path, 0
    else:
        if exists(mcl_dir):
            shutil.rmtree(mcl_dir)

    tar_gz = join(src_dir, 'mcl.tar.gz')
    if not isfile(join(src_dir, 'mcl.tar.gz')):
        if not only_warn:
            log.error('Error: no file ' % tar_gz)
            return None, 1
        else:
            log.warning('Warning: no file ' % tar_gz)

    with tarfile.TarFile.open(join(src_dir, 'mcl.tar.gz'),
                              'r:gz') as mcl_tar_gz:
        mcl_tar_gz.extractall(src_dir)

    for fname in listdir(src_dir):
        if fname.startswith('mcl'):
            shutil.move(join(src_dir, fname), mcl_dir)
            break

    log.debug(log_path)
    log.debug(getcwd())

    def exec_cmdline(command):
        log.info('   ' + command)
        if debug:
            res = cmdline(command.split())()
        else:
            res = cmdline(command.split(), stdout=None, stderr=None)()

        if res != 0:
            log.debug('Running ' + command)
            if only_warn:
                log.warning(
                    'WARNING: Cannot find or install mcl. '
                    'It required to perform some steps. '
                    'Try to install it manually: http://micans.org/mcl/src')
            else:
                log.error(
                    'ERROR: Cannot find or install mcl. '
                    'Try to install it manually: http://micans.org/mcl/src')
            return None, res

    log.info('Compiling MCL...')
    cur_dir = getcwd()
    log.info('Changing to ' + cur_dir)
    chdir(mcl_dir)
    mcl_path = join(mcl_dir, 'bin')

    exec_cmdline('./configure' + ' -q --prefix=' + mcl_path)
    exec_cmdline('make')
    exec_cmdline('make check')
    exec_cmdline('make install')
    chdir(cur_dir)

    return mcl_bin_path, 0
예제 #44
0
def printListOfBenchmarks():
    benchmarksEntries = dircache.listdir(Environment.getBenchmarksPath())
    index = 0
    for benchmarkEntry in benchmarksEntries:
        print str(index) + "-> " + benchmarkEntry
        index += 1
예제 #45
0
import dircache
import os

path = '/tmp'
newfile = os.path.join(path, 'file1.txt')

# directory contents
list1 = dircache.listdir(path)

# rescan the directory
list2 = dircache.listdir(path)

print 'Identical :', list1 is list2
print 'Equal     :', list1 == list2
print 'Difference:', list(set(list2) - set(list1))

# create the new file
open(newfile, 'wt').close()

# rescan the directory
list3 = dircache.listdir(path)

# remove new file
os.unlink(newfile)

print 'Identical :', list1 is list3
print 'Equal     :', list1 == list3
print 'Difference:', list(set(list3) - set(list1))

예제 #46
0
import os
import re
import dircache

UIModules=[]
for filename in dircache.listdir("."):
	if filename[-3:]=='.ui':
		UIModules.append(filename[0:-3])

pyuic4_bat=''
for filename in [r'c:\python25\PyQt4\bin\pyuic4.bat',r'C:\Python25\Lib\site-packages\PyQt4\pyuic4.bat']:
	if os.path.exists(filename):
		pyuic4_bat=filename
		break

PrefixLines={}
for UIModule in UIModules:
	print "Converting "+UIModule+"..."
	if PrefixLines.has_key(UIModule):
		fd=open(UIModule+".py","w+")
		fd.write(PrefixLines[UIModule])
		fd.close()
		os.system(pyuic4_bat+" \""+UIModule+".ui\" >> \""+UIModule+".py\"")
	else:
		os.system(pyuic4_bat+" \""+UIModule+".ui\" > \""+UIModule+".py\"")
예제 #47
0
def main():
    data = {}
    dir_path = "D:\KCBLA-Data"
    xml_files = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
    for fname in xml_files:
        index = xml_files.index(fname)
        if index % 5000 == 0:
            print "%d/%d\r" % (index, len(xml_files)),
#             pass
        if not fname.endswith('xml'):
            continue

        # Extra conditions
        if not '2-2-4' in fname and not '2-2-6' in fname:
            continue

        fpath = dir_path + "\\" + fname
        #         stage = fpath.split(' ')[1][0:3]
        doc = ElementTree.parse(fpath)
        try:
            battle = Battle(doc)
            if len(battle.bbPhase.attacks) > 0:
                attack = battle.bbPhase.attacks[0]
                if attack.target == u'輸送ワ級':
                    if attack.damage == 0:
                        continue
                    pow = int(doc.find('.//Value').text)
                    eq = ''
                    legal = True
                    c = 0
                    for v in doc.findall('.//ShipKF/Ship/SlotItem/Name'):
                        c += 1
                        if c == 4:
                            break
                        eq += v.text + ' '
                        if v.text in outs:
                            legal = False
                            break
                        elif v.text in pow1s:
                            pow += 1
                        elif v.text in pow2s:
                            pow += 2
                        elif v.text in pow3s:
                            pow += 3
                        elif v.text in pow4s:
                            pow += 4
                        elif v.text in pow0s or v.text is None:
                            pow += 0
                        else:
                            print v.text
                    if pow < 70 and legal:
                        apow = pow + 5
                        cri = ''
                        if battle.encount == u'反航戦':
                            apow = apow * 4 / 5
                        elif battle.encount == u'T有利':
                            apow = apow * 6 / 5
                        elif battle.encount == u'T不利':
                            apow = apow * 3 / 5
                        if apow < attack.damage:
                            apow = apow * 3 / 2
                            cri = 'Cri'
                        arm = apow - attack.damage
                        print '%d - %s %d->%d %d %s %s %s %s' % \
                            (arm, attack.attacker, pow, apow, attack.damage, battle.encount, eq, cri, fname)
                        if not arm in data:
                            data[arm] = 0
                        data[arm] += 1
        except AttributeError:
            continue


#         try:
#             air = Air(doc)
#         except AttributeError:
#             continue
#         if hasattr(air, 'stage1'):
#             if not air.stage1.seiku in data:
#                 data[air.stage1.seiku] = []
#             data[air.stage1.seiku].append(air.stage1.elost)

#             print '%s,%d,%d,(%s)' % (air.stage1.seiku, air.stage1.ecount, air.stage1.elost, fname)
#         for a in battle.stage3.attacks:
#             if a.flags & Attack.OUTLET == Attack.OUTLET:
#                 print fname
#         battles.append(battle)

#         ec = battle.encount
#         if not ec in data:
#             data[ec] = {}
#         for at in battle.opening.attacks + battle.tpdPhase.attacks:
#             if at.flags == 0:
#                 continue
#             if not u'駆逐' in at.target:
#                 continue
#             if not at.target in data[ec]:
#                 data[ec][at.target] = {}
#                 data[ec][at.target][1] = { 'min': 9999, 'max': -9999, 'count': 0 }
#                 data[ec][at.target][3] = { 'min': 9999, 'max': -9999, 'count': 0 }
#             sto = data[ec][at.target][at.flags]
#             if at.dmg > sto['max']:
#                 sto['max'] = at.dmg
#             if at.dmg < sto['min']:
#                 sto['min'] = at.dmg
#             sto['count'] += 1
#
#     for enc in [u'同航戦', u'反航戦', u'T有利', u'T不利' ]:
#         print '--- %s ---' % enc
#         for target in data[enc]:
#             d = data[enc][target]
#             print target
#             print " 通常 %d 回 %d~%d\n 急所 %d 回 %d~%d" % (d[1]['count'], d[1]['min'], d[1]['max'],
#                                                    d[3]['count'], d[3]['min'], d[3]['max'])
#     for k, v in sorted(total.items(), key=lambda x:x[1]):
#         for stage in res[k]:
#             print '%s %d %s' % (k, res[k][stage], stage)
#     for k, v in sorted(total.items(), key=lambda x:x[1]):

#     for k in data:
#         min = 999
#         max = -1
#         total = 0
#         size = len(data[k])
#         for v in data[k]:
#             if v < min:
#                 min = v
#             if v > max:
#                 max = v
#             total += v
# #             print v
#         mean = total / size
#         variancettl = 0
#         for v in data[k]:
#             variancettl += (v - mean) * (v - mean)
#         variance = variancettl / size
#
#         print '%s' % k
#         print 'Total: %d' % size
#         print 'Min: %d' % min
#         print 'Max: %d' % max
#         print 'Mean: %d' % mean
#         print 'SD: %d' % math.sqrt(variance)
#         print

#     out = codecs.open("..\output.html", "w", "utf-8-sig")
#     out.write('''
#     <html><head><style type="text/css">
#         table { border: solid 1px; }
#         th { padding: 2px; border: solid 1px; }
#         td { padding: 2px; border: solid 1px; text-align: center; }
#         .hit { background-color: #E0FFE0; }
#         .miss { background-color: #FFE0E0; }
#         .blue { background-color: #E0E0FF; }
#     </style></head><body>
#     ''')
#     output(out)
#     out.write('</body></html>')
#     out.close()
#

    for k in data:
        print '%d\t%d' % (k, data[k])

    print "Done."
예제 #48
0
    raise ValueError, ('Library ' + library_path + ' with name ' +
                       library_name +
                       ' did not match any known format, please update the'
                       ' script.')


if __name__ == '__main__':
    if len(sys.argv) < 3:
        print 'Usage:', sys.argv[0], '/paths/to/libraries', 'output_plugin_dir'
        sys.exit(1)

    plugins_dir = sys.argv[1]
    output_dir = sys.argv[1]

    known_frameworks = [
        d[0:len(d) - len(".subproj")] for d in dircache.listdir("Frameworks/")
        if d.endswith(".subproj")
    ]

    plugins = [
        plugins_dir + '/' + d for d in dircache.listdir(plugins_dir)
        if d.endswith('.so')
    ]
    for library in plugins:
        libs_to_convert = discover_all_dependencies(library)
        libs_to_convert.sort()

        new_paths = []
        for l in libs_to_convert:
            is_known_framework = 0
            for known_framework in known_frameworks:
예제 #49
0
파일: route.py 프로젝트: urakagi/KCJA
# -*- coding: utf-8 -*-

'''
Created on 2014/2/6

@author: romulus
'''
from dircache import listdir
from genericpath import isfile
from ntpath import join
from xml.etree import ElementTree


dir_path = "D:/KCBLA-Data/"
xml_files = [ f for f in listdir(dir_path) if isfile(join(dir_path, f)) ]

def calc54ss(xml_files):
    sortie = 0
    boss = 0
    evasion = { '4':0, '6':0, '10':0 }
    result = { 'S':0, 'A':0, 'B':0, 'C':0, 'D':0, 'E':0 }
    casualities = [0, 0, 0, 0, 0]
    lastcell = 0
    lastxml = ''
#     debug = ''
    for fname in sorted(xml_files):
        if not '5-4-' in fname: continue
        ss = fname.split(' ')
        stage = ss[1]
        cell = stage[4:]
        if cell == '4':
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import dircache

path = '/tmp'
first = dircache.listdir(path)
dircache.reset()
second = dircache.listdir(path)

print 'Identical :', first is second
print 'Equal     :', first == second
print 'Difference:', list(set(second) - set(first))
예제 #51
0
        verbose = True
    elif o in ("-l", "--list"):
        listlibrary = True
        librarydir = a
    elif o in ("-p", "--part"):
        printpart = True
        librarydir = a
        symbolname = args[0]
    else:
        assert False, "unhandled option"

if verbose:
    log(sys.argv[0] + ": opts:" + str(opts) + " args: " + str(args))

elif listlibrary:
    parts = [ f[:-4] for f in dircache.listdir(librarydir) if f[-4:] == ".sym" ]
    sys.stdout.write("\n".join(parts))
    sys.exit()

elif printpart:
    filename = os.path.join(librarydir, symbolname + ".sym")
    sys.stderr.write(filename +"\n")

    ## run gsymcheck over the symbol
    command = "gsymcheck -vv " + filename
    pop = popen2.Popen3(command, capturestderr=True)
    message = pop.fromchild.read()
    err = pop.childerr.read()
    mlines = message.split("\n")
    if len(mlines) > 9:
        message = "\n".join(mlines[7:-1]) + "\n"
예제 #52
0
    def getDirsMtimesUnicode(self, dirs, yieldcall=None):

        list = {}

        for directory in dirs:

            directory = os.path.expanduser(directory.replace("//", "/"))

            u_directory = u"%s" % directory
            str_directory = str(directory)

            if self.hiddenCheck({'dir': directory}):
                continue

            try:
                contents = dircache.listdir(u_directory)
                mtime = os.path.getmtime(u_directory)
            except OSError, errtuple:
                message = _(
                    "Scanning Directory Error: %(error)s Path: %(path)s") % {
                        'error': errtuple,
                        'path': u_directory
                    }
                print str(message)
                self.logMessage(message)
                displayTraceback(sys.exc_info()[2])
                continue

            contents.sort()

            list[str_directory] = mtime

            for filename in contents:

                path = os.path.join(directory, filename)

                # force Unicode for reading from disk in win32
                u_path = u"%s" % path
                s_path = str(path)

                try:
                    isdir = os.path.isdir(u_path)
                except OSError, errtuple:
                    message = _("Scanning Error: %(error)s Path: %(path)s") % {
                        'error': errtuple,
                        'path': u_path
                    }
                    print str(message)
                    self.logMessage(message)
                    continue

                try:
                    mtime = os.path.getmtime(u_path)
                except OSError, errtuple:
                    try:
                        mtime = os.path.getmtime(s_path)
                    except OSError, errtuple:
                        message = _(
                            "Scanning Error: %(error)s Path: %(path)s") % {
                                'error': errtuple,
                                'path': u_path
                            }
                        print str(message)
                        self.logMessage(message)
                        continue
예제 #53
0
count = 10
mix_length = 11000
#mixed = AudioSegment.silent(duration=10000)

try:
	while count < 50:
		for x in range(count):
			if x == 1:
				""" 
				Here we declare our final mixed clip duration.
				To avoid confusion, overlay function in pydub uses first sound clip
				to determine the permitted length for all following sound clips
				"""
				sounds.append(AudioSegment.silent(duration=mix_length))
			else:
				filename = random.choice(dircache.listdir(dir))
				print "file is ",filename 
				path = os.path.join(dir, filename)

				clip_length = len(AudioSegment.from_wav(path))
				cutoff = mix_length - clip_length
				pos_x.append(random.randint(0, cutoff))
				sounds.append(AudioSegment.from_wav(path))

		mixed = sounds[1].overlay(sounds[2])

		pos_count = 0
		for sound in sounds:
			try:
				x_int = pos_x[pos_count]
			except IndexError: 
예제 #54
0
    def GetExtension(self, id):

        if not id:
            return DirectorySuite(self, id)

        elif id == 'compiler_table':
            return CompilerTable({}, qmtest_id=id, qmtest_database=self)

        elif id == 'parallel_service':
            return ParallelService({}, qmtest_id=id, qmtest_database=self)

        resources = ['compiler_table', 'parallel_service']

        id_components = self.GetLabelComponents(id)
        # 'data' subdirectories have special meaning, and so
        # are not allowed as label components.
        if 'data' in id_components:
            return None

        dirname = os.path.join(self.srcdir, *id_components[:-1])
        basename = id_components[-1]

        file_ext = os.path.splitext(basename)[1]

        # If <dirname>/data is an existing directory...
        if os.path.isdir(os.path.join(dirname, 'data')):

            if file_ext in self.test_extensions:

                executable = os.path.splitext(os.path.basename(id))[0]
                if sys.platform == 'win32':
                    executable += '.exe'

                # ...<dirname>/<basename> is a resource.
                src = os.path.abspath(os.path.join(self.srcdir, id))
                return self._MakeTest(id,
                                      CompiledResource,
                                      language=self.test_extensions[file_ext],
                                      source_files=[src],
                                      executable=executable,
                                      resources=resources)
            else:
                # ...<dirname>/<basename> is a test.
                path = os.path.join(dirname, 'data', basename)
                if not os.path.isfile(path):
                    return None

                src = [
                    f for f in dircache.listdir(dirname)
                    if os.path.splitext(f)[1] in self.test_extensions
                ]
                # There must be exactly one source file, which
                # is our resource.
                if len(src) > 1:
                    raise DatabaseError('multiple source files found in %s' %
                                        dirname)

                resources.append(self.JoinLabels(*(id_components[:-1] + src)))
                return self._MakeTest(id,
                                      ExecutableTest,
                                      resources=resources,
                                      args=[path])

        src = os.path.join(self.srcdir, id)
        if file_ext in self.test_extensions and os.path.isfile(src):
            if file_ext == '.py':
                return self._MakePythonTest(id, src)
            else:
                executable = os.path.splitext(os.path.basename(id))[0]
                if sys.platform == 'win32':
                    executable += '.exe'

                return self._MakeTest(id,
                                      CompilationTest,
                                      language=self.test_extensions[file_ext],
                                      source_files=[src],
                                      executable=executable,
                                      resources=resources)

        elif os.path.isfile(src + '.qms'):
            qms = src + '.qms'
            # Expose the flags to the suite file so it can exclude ids
            # the same way the database itself does in the constructor.
            context = dict(flags=self.flags,
                           excluded_subdirs=self.excluded_subdirs)
            try:
                content = open(qms).read()
                exec content in context
            except:
                print 'Error parsing', qms
            test_ids = context.get('test_ids', [])
            suite_ids = context.get('suite_ids', [])
            return ExplicitSuite(is_implicit=False,
                                 test_ids=test_ids,
                                 suite_ids=suite_ids,
                                 qmtest_id=id,
                                 qmtest_database=self)

        elif os.path.isdir(src):
            if not basename in self.excluded_subdirs:
                return DirectorySuite(self, id)

        else:
            return None
예제 #55
0
import xbmc, xbmcgui, dircache, random, math
# Python script to play an intro before the main movie
# Written by Maverick214 for Plexaeon

movie_title = xbmc.getInfoLabel( "ListItem.Title" )
movie_file = xbmc.getInfoLabel( "ListItem.Filenameandpath" )

path_to_files = xbmc.getInfoLabel( "Skin.String(CinemaPath)" )
fn = dircache.listdir( path_to_files )
r_num = int(math.floor( random.random() * len(fn) ))
IntroFile = path_to_files + fn[r_num]

pl=xbmc.PlayList(1)
pl.clear()
listitem = movie_file
trailitem = 'Movie Intro'
#IntroFile = xbmc.getInfoLabel( "Skin.String(CinemaPath)" )

xbmc.executebuiltin("Activate.Window(10006)")
pl.add(IntroFile, trailitem)
pl.add(movie_file, movie_title)
xbmc.Player().play(pl)
예제 #56
0
파일: crawler.py 프로젝트: asdbaihu/Arachne
 def getFilelist(self, directory="/home/" + User() + "/test/small/"):
     self.base_dir = directory
     return dircache.listdir(directory)
예제 #57
0
#! usr/bin/python

import dircache
import getpass
import time

logfile = open("spam.txt", "w+")

localtime = time.asctime( time.localtime(time.time()) )
print >> logfile, 'local current time :', localtime

usr = getpass.getuser()
print >> logfile, 'current user :'******'/')
print >> logfile, lst

logfile.close()
예제 #58
0
 def caseOk(filename):
     files = dircache.listdir(os.path.dirname(filename))
     return os.path.basename(filename) in files
예제 #59
0
baseFileName = 'MarlinSteeringFileTemplate.xml'

currentDirectory = os.getcwd()
oneUpDirectory = os.path.dirname(currentDirectory)

marlinPath = os.path.join(oneUpDirectory, 'MarlinXml')
rootFilePath = os.path.join(oneUpDirectory, 'RootFiles')

jobList = ''

base = open(baseFile, 'r')
baseContent = base.read()
base.close()

fileDirectory = slcioPath
allFilesInDirectory = dircache.listdir(fileDirectory)
inputFileExt = 'slcio'

allFiles = []
allFiles.extend(allFilesInDirectory)
allFiles[:] = [
    item for item in allFiles
    if re.match('.*\.' + inputFileExt + '$', item.lower())
]
allFiles.sort()

if allFiles:
    array_size = len(allFiles)

    for nfiles in range(array_size):
        newContent = baseContent
예제 #60
0
def getMax(fname):
    g = pyhc.PGrid()
    g.open(fname)
    if not g.isok():
        print 'Error: could not open hcfile'
        sys.exit(0)
    B = array([])
    for x in points:
        B = append(B, g.zintpol(x, 0., 0., 'Bz'))
    return max(abs(B))


if len(sys.argv) != 2:
    print 'usage: ', sys.argv[0], 'hcfile_prefix'
    sys.exit(-1)
prefix = sys.argv[1]
prelen = len(prefix)

ion()
maxBs = []
files = dircache.listdir('./')
for file in files:
    if file[-3:] == '.hc' and file[:prelen] == prefix:
        m = getMax(file)
        maxBs.append(m)
        print m

plot(range(len(maxBs)), maxBs, 'ro-')
title('Max B on line depending on timestep')
show()