def file_exists(filename):
    if hasattr(os, 'access'):
        return os.access(filename, os.F_OK)
    elif using_java:
        return File(filename).exists()
예제 #2
0
def saveGitBranch(branch):
    RELOAD_LOCK.lock()
    try:
        FileUtils.writeStringToFile(File(getGitBranchFile()), branch)
    finally:
        RELOAD_LOCK.unlock()
예제 #3
0
import scala.collection.mutable.HashMap
import scala.collection.mutable.ListBuffer
import scala.collection.mutable.HashSet
import scala.xml.XML
import graph.STNode
import graph.TEdge
import models.{EdgePathEvent, MotionPathEvent}
import timeseries.TimeSeries
import ent.Weight
import ent.Point

case class EdgePair(srcId: String, dstId: String)

object JavaSea {
  def loadGraph: (Set[STNode], Set[TEdge], TimeSeries) = {
    val directory = new File("C:\\Users\\cmichael\\IdeaProjects\\sst\\data\\JavaSea")
    val files = directory.listFiles.sortWith(_.getName < _.getName)

    val nodesHash = new HashMap[String, ListBuffer[Point]]()
    val edgesHash = new HashMap[EdgePair, ListBuffer[Weight]]()

    val stNodes = new HashSet[STNode]()
    val tEdges = new HashSet[TEdge]()

    val timeSeries = new TimeSeries
    var timeStep = 0

    var prevTouchedNodes = new HashSet[String]()
    var prevTouchedEdges = new HashSet[EdgePair]()

    def newEdgePathEvent(edge: EdgePair) = {
예제 #4
0
    def test_listdir(self):
        # It is hard to avoid Unicode paths on systems like OS X. Use relative
        # paths from a temp CWD to work around this. But when you don't,
        # it behaves like this ...
        with test_support.temp_cwd() as new_cwd:

            basedir = os.path.join(".", "unicode")
            self.assertIs(type(basedir), bytes)
            chinese_path = os.path.join(basedir, u"中文")
            self.assertIs(type(chinese_path), unicode)
            home_path = os.path.join(chinese_path, u"首页")
            os.makedirs(home_path)

            FS = sys.getfilesystemencoding()

            with open(os.path.join(home_path, "test.txt"), "w") as test_file:
                test_file.write("42\n")

            # listdir(bytes) includes encoded form of 中文
            entries = os.listdir(basedir)
            self.assertIn(u"中文".encode(FS), entries)
            for entry in entries:
                self.assertIs(type(entry), bytes)

            # listdir(unicode) includes unicode form of 首页
            entries = os.listdir(chinese_path)
            self.assertIn(u"首页", entries)
            for entry in entries:
                self.assertIs(type(entry), unicode)

            # glob.glob builds on os.listdir; note that we don't use
            # Unicode paths in the arg to glob so the result is bytes
            self.assertEqual(glob.glob(os.path.join("unicode", "*")),
                             [os.path.join(u"unicode", u"中文").encode(FS)])
            self.assertEqual(
                glob.glob(os.path.join("unicode", "*", "*")),
                [os.path.join(u"unicode", u"中文", u"首页").encode(FS)])
            self.assertEqual(glob.glob(os.path.join("unicode", "*", "*", "*")),
                             [
                                 os.path.join(u"unicode", u"中文", u"首页",
                                              "test.txt").encode(FS)
                             ])

            # Now use a Unicode path as well as in the glob arg
            self.assertEqual(glob.glob(os.path.join(u"unicode", "*")),
                             [os.path.join(u"unicode", u"中文")])
            self.assertEqual(glob.glob(os.path.join(u"unicode", "*", "*")),
                             [os.path.join(u"unicode", u"中文", u"首页")])
            self.assertEqual(
                glob.glob(os.path.join(u"unicode", "*", "*", "*")),
                [os.path.join(u"unicode", u"中文", u"首页", "test.txt")])

            # Verify Java integration. But we will need to construct
            # an absolute path since chdir doesn't work with Java
            # (except for subprocesses, like below in test_env)
            for entry in entries:  # list(unicode)
                # new_cwd is bytes while chinese_path is unicode.
                # But new_cwd is not guaranteed to be just ascii, so decode it.
                new_cwd = new_cwd.decode(FS)
                entry_path = os.path.join(new_cwd, chinese_path, entry)
                f = File(entry_path)
                self.assertTrue(
                    f.exists(),
                    "File %r (%r) should be testable for existence" %
                    (f, entry_path))
def validateServerProperty(domainProperties):
    error = 0
    
    machines = domainProperties.getProperty('wls.domain.machines')
    clusters = domainProperties.getProperty('wls.clusters')
    servers = domainProperties.getProperty('wls.servers')
    if not servers is None and len(servers)>0:
        serverList = servers.split(',')
        for server in serverList:
            helper.printHeader('[VALIDATING] server ' + str(server) + ' properties')
    
            serverName = domainProperties.getProperty('wls.server.' + str(server) + '.name')
            if serverName is None or len(serverName)==0:
                error = 1
                log.error('Please verify wls.server.' + str(server) + '.name property if it exists in configuration.')
            else:
                log.debug('Server [' + str(server) + '] name property [' + str(serverName) + '] is valid.')

            targetCluster = domainProperties.getProperty('wls.server.' + str(server) + '.cluster')
            if targetCluster:
                if not clusters is None and len(clusters)>0:
                    clusterList = clusters.split(',')
                    exist = 0
                    for cluster in clusterList:
                        clusterName = domainProperties.getProperty('wls.cluster.' + str(cluster) + '.name')
                        if cluster==targetCluster:
                            exist = 1
                            break
                    if not exist:
                        error = 1
                        log.error('wls.server.' + str(server) + '.cluster property refers to a cluster [' + targetCluster + '] that does not exist within wls.clusters property.')
                    else:
                        log.debug('Server [' + str(server) + '] cluster property [' + str(clusterName) + '] is valid.')

            serverHost = domainProperties.getProperty('wls.server.' + str(server) + '.listener.address')
            if serverHost is None or len(serverHost)==0:
                serverHost = 'localhost'
            
            serverPort = domainProperties.getProperty('wls.server.' + str(server) + '.listener.port')
            if not serverPort is None and len(serverPort)>0:
                try:
                    int(serverPort)
                except ValueError:
                    log.error('Please verify wls.server.' + str(server) + '.listener.port [' + str(serverPort) + '] property.')
                else:
                    if int(serverPort)<0 or int(serverPort)>65535:
                        log.error('Please verify wls.server.' + str(server) + '.listener.port property, port number is not in valid range [0-65535].')
                    else:
                        log.debug('Server [' + str(server) + '] server port property [' + str(serverPort) + '] is valid.')
        
            enableSSL = domainProperties.getProperty('wls.server.' + str(server) + '.listener.enableSSL')
            if not enableSSL is None and len(enableSSL)>0:
                if not enableSSL.upper()=='TRUE' and not enableSSL.upper()=='FALSE':
                    error = 1
                    log.error('The wls.server.' + str(server) + '.listener.enableSSL property supports only [true,false].')
                else:
                    log.debug('Server [' + str(server) + '] ssl enable property [' + str(enableSSL) + '] is valid.')
                    
                    if enableSSL.upper()=='TRUE':
                        sslPort = domainProperties.getProperty('wls.server.' + str(server) + '.listener.sslPort')
                        if not sslPort is None and len(sslPort)>0:
                            try:
                                int(sslPort)
                            except ValueError:
                                log.error('Please verify wls.server.' + str(server) + '.listener.sslPort [' + str(sslPort) + '] property.')
                            else:
                                if int(sslPort)<0 or int(sslPort)>65535:
                                    log.error('Please verify wls.server.' + str(server) + '.listener.sslPort property, port number is not in valid range [0-65535].')
                                else:
                                    log.debug('Server [' + str(server) + '] ssl port property [' + str(sslPort) + '] is valid.')

            customvars = domainProperties.getProperty('wls.server.' + str(server) + '.customenvvars')
            if not customvars is None and len(customvars)>0:
                customvarList = customvars.split(',')
                for customvar in customvarList:
                    helper.printHeader('[VALIDATING] Custom environment variable ' + str(customvar) + ' properties')
                    
                    customvarText = domainProperties.getProperty('wls.server.' + str(server) + '.customenvvar.' + str(customvar) + '.text')
                    if customvarText is None or len(customvarText)==0:
                        error = 1
                        log.error('Please verify wls.server.' + str(server) + '.customenvvar.' + str(customvar) + '.text property if it exists in configuration.')
                    else:
                        if customvarText.find('=')!=-1:
                            log.debug('Custome environment variable [' + str(customvar) + '] text property [' + str(customvarText) + '] is valid.')
                        else:
                            error = 1
                            log.error('Please verify wls.server.' + str(server) + '.customenvvar.' + str(customvar) + '.text property, this is applicable only for key-value pairs format [<name>=<value>].')

            serverChannelName = domainProperties.getProperty('wls.server.' + str(server) + '.channel.name')
            if not serverChannelName is None and len(serverChannelName)>0:
            
                serverChannelProtocol = domainProperties.getProperty('wls.server.' + str(server) + '.channel.protocol')
                if not serverChannelProtocol=='t3' and not serverChannelProtocol=='t3s' and not serverChannelProtocol=='http' and not serverChannelProtocol=='https' and not serverChannelProtocol=='iiop' and not serverChannelProtocol=='iiops' and not serverChannelProtocol=='ldap' and not serverChannelProtocol=='ldaps' and not serverChannelProtocol=='admin':
                    error = 1
                    log.error('The wls.server.' + str(server) + '.channel.protocol property supports only [t3,t3s,http,https,iiop,iiops,ldap,ldaps,admin].')
                else:
                    log.debug('Server [' + str(server) + '] channel protocol property [' + str(serverChannelProtocol) + '] is valid.')
                    
            serverChannelPort = domainProperties.getProperty('wls.server.' + str(server) + '.channel.listener.port')
            if not serverChannelPort is None and len(serverChannelPort)>0:
                try:
                    int(serverChannelPort)
                except ValueError:
                    log.error('Please verify wls.server.' + str(server) + '.channel.listener.port [' + str(serverChannelPort) + '] property.')
                else:
                    if int(serverChannelPort)<0 or int(serverChannelPort)>65535:
                        log.error('Please verify wls.server.' + str(server) + '.channel.listener.port property, port number is not in valid range [0-65535].')
                    else:
                        log.debug('Server [' + str(server) + '] channel port [' + str(serverChannelPort) + '] is valid.')
        
            serverChannelPublicPort = domainProperties.getProperty('wls.server.' + str(server) + '.channel.listener.publicPort')
            if not serverChannelPublicPort is None and len(serverChannelPublicPort)>0:
                try:
                    int(serverChannelPublicPort)
                except ValueError:
                    log.error('Please verify wls.server.' + str(server) + '.channel.listener.publicPort [' + str(serverChannelPublicPort) + '] property.')
                else:
                    if int(serverChannelPublicPort)<0 or int(serverChannelPublicPort)>65535:
                        log.error('Please verify wls.server.' + str(server) + '.channel.listener.publicPort property, port number is not in valid range [0-65535].')
                    else:
                        log.debug('Server [' + str(server) + '] channel public port [' + str(serverChannelPublicPort) + '] is valid.')
        
            httpEnable = domainProperties.getProperty('wls.server.' + str(server) + '.channel.httpEnable')
            if not httpEnable is None and len(httpEnable)>0:
                if not httpEnable.upper()=='TRUE' and not httpEnable.upper()=='FALSE':
                    error = 1
                    log.error('The wls.server.' + str(server) + '.channel.httpEnable property supports only [true,false].')
                else:
                    log.debug('Server [' + str(server) + '] http channel enable property [' + str(httpEnable) + '] is valid.')
        
            enableTunneling = domainProperties.getProperty('wls.server.' + str(server) + '.enableTunneling')
            if not enableTunneling is None and len(enableTunneling)>0:
                if not enableTunneling.upper()=='TRUE' and not enableTunneling.upper()=='FALSE':
                    error = 1
                    log.error('The wls.server.' + str(server) + '.enableTunneling property supports only [true,false].')
                else:
                    log.debug('Server [' + str(server) + '] tunnelling enable property [' + str(enableTunneling) + '] is valid.')
            
            targetMachine = domainProperties.getProperty('wls.server.' + str(server) + '.machine')
            if not targetMachine is None and len(targetMachine)>0:

                if not machines is None and len(machines)>0:
                    machineList = machines.split(',')
                    exist = 0
                    for machine in machineList:
                        machineName = domainProperties.getProperty('wls.domain.machine.' + str(machine) + '.name')
                        if machine==targetMachine:
                            exist = 1
                            break
                    if not exist:
                        error = 1
                        log.error('wls.server.' + str(server) + '.machine property refers to a machine that does not exist within the wls.domain.machines property list.')
                    else:
                        log.debug('Server [' + str(server) + '] machine property [' + str(targetMachine) + '] is valid.')
                        
            servercustomlog = domainProperties.getProperty('wls.server.' + str(server) + '.log.custom')
            if not servercustomlog is None and len(servercustomlog)>0:
               
                if not servercustomlog.upper()=='TRUE' and not servercustomlog.upper()=='FALSE':
                    error = 1
                    log.error('The wls.server.' + str(server) + '.log.custom property supports only [true,false].')
                else:
                    log.debug('Server [' + str(server) + '] custom log enable property [' + str(servercustomlog) + '] is valid.')
                    if servercustomlog.upper()=='TRUE':
                        filename = domainProperties.getProperty('wls.server.' + str(server) + '.log.filename')
                        if not filename is None and len(filename)>0:
                            file = File(filename)
                            if file.isAbsolute():
                                if not file.exists():
                                    log.debug('[NOTE] Please make sure the user running this script has permission to create directory and file [' + str(filename) + '] on host [' + str(serverHost) + '].')

                        limitNumberOfFile = domainProperties.getProperty('wls.server.' + str(server) + '.log.limitNumOfFile')
                        if not limitNumberOfFile is None and len(limitNumberOfFile)>0:
                            if not limitNumberOfFile.upper()=='TRUE' and not limitNumberOfFile.upper()=='FALSE':
                                error = 1
                                log.error('The wls.admin.log.limitNumOfFile property supports only [true,false].')
                            else:
                                log.debug('Server [' + str(server) + '] log limit number of file property [' + str(limitNumberOfFile) + '] is valid.')
        
                        fileToRetain = domainProperties.getProperty('wls.server.' + str(server) + '.log.fileToRetain')
                        if not fileToRetain is None and len(fileToRetain)>0:
                            if not fileToRetain is None and len(fileToRetain)>0:
                                try:
                                    int(fileToRetain)
                                except ValueError:
                                    log.error('Please verify wls.server.' + str(server) + '.log.fileToRetain [' + str(fileToRetain) + '] property.')
                                else:
                                    if int(fileToRetain)<1 or int(fileToRetain)>99999:
                                        log.error('Please verify wls.server.' + str(server) + '.log.fileToRetain property, number is not in valid range [1-99999].')
                                    else:
                                        log.debug('Server [' + str(server) + '] log file to retain [' + str(fileToRetain) + '] is valid.')
        
                        logRotateOnStartup = domainProperties.getProperty('wls.server.' + str(server) + '.log.rotateLogOnStartup')
                        if not logRotateOnStartup is None and len(logRotateOnStartup)>0:
                            if not logRotateOnStartup.upper()=='TRUE' and not logRotateOnStartup.upper()=='FALSE':
                                error = 1
                                log.error('The wls.server.' + str(server) + '.log.rotateLogOnStartup property supports only [true,false].')
                            else:
                                log.debug('Server [' + str(server) + '] log rotate on startup property [' + str(logRotateOnStartup) + '] is valid.')

                        rotationType = domainProperties.getProperty('wls.server.' + str(server) + '.log.rotationType')
                        if not rotationType is None and len(rotationType)>0:
                            if not rotationType == 'bySize' and not rotationType == 'byTime':
                                error = 1
                                log.error('The wls.server.' + str(server) + '.log.rotationType property supports only [bySize,byTime].')
                            else:
                                log.debug('Server [' + str(server) + '] log rotation type property [' + str(rotationType) + '] is valid.')

                            if rotationType == 'bySize':
                                fileMinSize = domainProperties.getProperty('wls.server.' + str(server) + '.log.fileMinSize')
                                if not fileMinSize is None and len(fileMinSize)>0:
                                    try:
                                        int(fileMinSize)
                                    except ValueError:
                                        log.error('Please verify wls.server.' + str(server) + '.log.fileMinSize [' + str(fileMinSize) + '] property.')
                                    else:
                                        if int(fileMinSize)<0 or int(fileMinSize)>65535:
                                            log.error('Please verify wls.server.' + str(server) + '.log.fileMinSize [' + str(fileMinSize) + '] property, number is not in valid range [0-65535].')
                                        else:
                                            log.debug('Server [' + str(server) + '] log file min size [' + str(fileMinSize) + '] is valid.')
                                
                            if rotationType == 'byTime':
                                rotationTime = domainProperties.getProperty('wls.server.' + str(server) + '.log.rotationTime')
                                if not rotationTime is None and len(rotationTime)>0:
                                    if rotationTime.find(':')==-1:
                                        error = 1
                                        log.error('Please verify wls.server.' + str(server) + '.log.rotationTime [' + str(rotationTime) + '] property, the property supports time format [HH:MM].')
                                    else:
                                        if len(rotationTime)<4 or len(rotationTime)>5:
                                            error = 1
                                            log.error('The wls.server.' + str(server) + '.log.rotationTime [' + str(rotationTime) + '] property, the property supports time format [HH:MM].')
                                        else:
                                            log.debug('Server [' + str(server) + '] log rotation time [' + str(rotationTime) + '] is valid.')
                                
                                fileTimespan = domainProperties.getProperty('wls.server.' + str(server) + '.log.fileTimeSpan')
                                if not fileTimespan is None and len(fileTimespan)>0:
                                    try:
                                        int(fileTimespan)
                                    except ValueError:
                                        log.error('Please verify wls.server.' + str(server) + '.log.fileTimeSpan [' + str(fileTimespan) + '] property.')
                                    else:
                                        if int(fileTimespan)<1:
                                            log.error('Please verify wls.server.' + str(server) + '.log.fileTimeSpan [' + str(fileTimespan) + '] property, number is not in valid range [<=1].')
                                        else:
                                            log.debug('Server [' + str(server) + '] log file timespan [' + str(fileTimespan) + '] is valid.')
         
                        rotationDir = domainProperties.getProperty('wls.server.' + str(server) + '.log.rotationDir')
                        if not rotationDir is None and len(rotationDir)>0:
                            file = File(rotationDir)
                            if file.isAbsolute():
                                if not file.exists():
                                    log.debug('[NOTE] Please make sure the user running this script has permission to create directory and file [' + str(rotationDir) + '] on host [' + str(serverHost) + '].')

                        fileSeverity = domainProperties.getProperty('wls.server.' + str(server) + '.log.logFileSeverity')
                        if not fileSeverity is None and len(fileSeverity)>0:
                            if not fileSeverity == 'Debug' and not fileSeverity == 'Info' and not fileSeverity == 'Warning':
                                error = 1
                                log.error('The wls.server.' + str(server) + '.log.logFileSeverity property supports only [Debug,Info,Warning].')
                            else:
                                log.debug('Server [' + str(server) + '] log file severity property [' + str(fileSeverity) + '] is valid.')
                                
                        broadcastSeverity = domainProperties.getProperty('wls.server.' + str(server) + '.log.broadcastSeverity')
                        if not broadcastSeverity is None and len(broadcastSeverity)>0:
                            if not broadcastSeverity == 'Trace' and not broadcastSeverity == 'Debug' and not broadcastSeverity == 'Info' and not broadcastSeverity == 'Notice' and not broadcastSeverity == 'Warning' and not broadcastSeverity == 'Error' and not broadcastSeverity == 'Critical' and not broadcastSeverity == 'Alert' and not broadcastSeverity == 'Emergency' and not broadcastSeverity == 'Off':
                                error = 1
                                log.error('The wls.server.' + str(server) + '.log.broadcastSeverity property supports only [Trace,Debug,Info,Notice,Warning,Error,Critical,Alert,Emergency,Off].')
                            else:
                                log.debug('Server [' + str(server) + '] broadcast severity log property [' + str(broadcastSeverity) + '] is valid.')
                                
                        memoryBufferSeverity = domainProperties.getProperty('wls.server.' + str(server) + '.log.memoryBufferSeverity')
                        if not memoryBufferSeverity is None and len(memoryBufferSeverity)>0:
                            if not memoryBufferSeverity == 'Trace' and not memoryBufferSeverity == 'Debug' and not fileSeverity == 'Info' and not fileSeverity == 'Notice' and not fileSeverity == 'Warning' and not fileSeverity == 'Error' and not fileSeverity == 'Critical' and not fileSeverity == 'Alert' and not fileSeverity == 'Emergency' and not fileSeverity == 'Off':
                                error = 1
                                log.error('The wls.server.' + str(server) + '.log.memoryBufferSeverity property supports only [Trace,Debug,Info,Notice,Warning,Error,Critical,Alert,Emergency,Off].')
                            else:
                                log.debug('Server [' + str(server) + '] memory buffer severity log property [' + str(memoryBufferSeverity) + '] is valid.')
    
            serverhttpcustomlog = domainProperties.getProperty('wls.server.' + str(server) + '.httplog.enable')
            if not serverhttpcustomlog is None and len(serverhttpcustomlog)>0:
                if not serverhttpcustomlog.upper()=='TRUE' and not serverhttpcustomlog.upper()=='FALSE':
                    error = 1
                    log.error('The wls.server.' + str(server) + '.httplog.enable property supports only [true,false].')
                else:
                    log.debug('Server [' + str(server) + '] http custom log enable property [' + str(serverhttpcustomlog) + '] is valid.')
                    
                    if serverhttpcustomlog.upper()=='TRUE':
                        filename = domainProperties.getProperty('wls.server.' + str(server) + '.httplog.filename')
                        if not filename is None and len(filename)>0:
                            file = File(filename)
                            if file.isAbsolute():
                                if not file.exists():
                                    log.debug('[NOTE] Please make sure the user running this script has permission to create directories and directory and file [' + str(filename) + '] on host [' + str(serverHost) + '].')

                        rotationType = domainProperties.getProperty('wls.server.' + str(server) + '.httplog.rotationType')
                        if not rotationType is None and len(rotationType)>0:
                            if not rotationType == 'bySize' and not rotationType == 'byTime':
                                error = 1
                                log.error('The wls.server.' + str(server) + '.httplog.rotationType property supports only [bySize,byTime].')
                            else:
                                log.debug('Server [' + str(server) + '] http log rotation type property [' + str(rotationType) + '] is valid.')

                            if rotationType == 'bySize':
                                fileMinSize = domainProperties.getProperty('wls.server.' + str(server) + '.httplog.fileMinSize')
                                if not fileMinSize is None and len(fileMinSize)>0:
                                    try:
                                        int(fileMinSize)
                                    except ValueError:
                                        log.error('Please verify wls.server.' + str(server) + '.httplog.fileMinSize [' + str(fileMinSize) + '] property.')
                                    else:
                                        if int(fileMinSize)<0 or int(fileMinSize)>65535:
                                            log.error('Please verify wls.server.' + str(server) + '.httplog.fileMinSize [' + str(fileMinSize) + '] property, number is not in valid range [0-65535].')
                                        else:
                                            log.debug('Server [' + str(server) + '] http log file min size [' + str(fileMinSize) + '] is valid.')
                                
                            if rotationType == 'byTime':
                                rotationTime = domainProperties.getProperty('wls.server.' + str(server) + '.httplog.rotationTime')
                                if not rotationTime is None and len(rotationTime)>0:
                                    if rotationTime.find(':')==-1:
                                        error = 1
                                        log.error('Please verify wls.server.' + str(server) + '.httplog.rotationTime [' + str(rotationTime) + '] property, the property supports time format [HH:MM].')
                                    else:
                                        if len(rotationTime)<4 or len(rotationTime)>5:
                                            error = 1
                                            log.error('The wls.server.' + str(server) + '.httplog.rotationTime [' + str(rotationTime) + '] property, the property supports time format [HH:MM].')
                                        else:
                                            log.debug('Server [' + str(server) + '] http log rotation time [' + str(rotationTime) + '] is valid.')
                                
                                fileTimespan = domainProperties.getProperty('wls.server.' + str(server) + '.httplog.fileTimeSpan')
                                if not fileTimespan is None and len(fileTimespan)>0:
                                    try:
                                        int(fileTimespan)
                                    except ValueError:
                                        log.error('Please verify wls.server.' + str(server) + '.httplog.fileTimeSpan [' + str(fileTimespan) + '] property.')
                                    else:
                                        if int(fileTimespan)<1:
                                            log.error('Please verify wls.server.' + str(server) + '.httplog.fileTimeSpan [' + str(fileTimespan) + '] property, number is not in valid range [>=1].')
                                        else:
                                            log.debug('Server [' + str(server) + '] log file timespan [' + str(fileTimespan) + '] is valid.')
        
                        rotationDir = domainProperties.getProperty('wls.server.' + str(server) + '.httplog.rotationDir')
                        if not rotationDir is None and len(rotationDir)>0:
                            file = File(rotationDir)
                            if file.isAbsolute():
                                if not file.exists():
                                    log.debug('[NOTE] Please make sure the user running this script has permission to create directory and file [' + str(rotationDir) + '] on host [' + str(serverHost) + '].')

    return error
예제 #6
0
import sys
import os

try:
    from java.io import File
except ImportError:
    print "Note: this file should be run using ..\\..\\..\\nC.bat -python XXX.py' or '../../../nC.sh -python XXX.py'"
    print "See http://www.neuroconstruct.org/docs/python.html for more details"
    quit()

sys.path.append(os.environ["NC_HOME"] + "/pythonNeuroML/nCUtils")

import ncutils as nc  # Many useful functions such as SimManager.runMultipleSims found here
from ucl.physiol.neuroconstruct.hpc.mpi import MpiSettings

projFile = File(os.getcwd(), "../Thalamocortical.ncx")

##############  Main settings  ##################

mpiConfig = MpiSettings.MATLEM_1PROC
mpiConfig = MpiSettings.LOCAL_SERIAL

simConfigs = []

simConfigs.append("Default Simulation Configuration")

##########################################################################
#
#          Note: any of the sim configs below will need a small dt and
#          a fine spatial discretisation (maxElecLens) to have a close
#          match between NEURON, MOOSE & GENESIS
예제 #7
0
from sys import *
from time import *

from java.io import File

from ucl.physiol.neuroconstruct.project import ProjectManager
from ucl.physiol.neuroconstruct.utils import NumberGenerator
from ucl.physiol.neuroconstruct.hpc.mpi import MpiSettings
from ucl.physiol.neuroconstruct.simulation import SimulationsInfo
from ucl.physiol.neuroconstruct.cell.utils import CellTopologyHelper
from ucl.physiol.neuroconstruct.project import SimPlot

path.append(environ["NC_HOME"]+"/pythonNeuroML/nCUtils")
import ncutils as nc

projFile = File("../../Thalamocortical.ncx")


###########  Main settings  ###########

simConfig=              "TempSimConfig"
simDuration =           20 # ms                                ##
simDt =                 0.025 # ms
neuroConstructSeed =    12443                                   ##
simulatorSeed =         234434                                   ##

simulators =             ["NEURON"]

simRefPrefix =          "N_"                               ##
suggestedRemoteRunTime = 1620                                     ##
예제 #8
0
def main(args):
    vcf_reader = hgsc_vcf.Reader(args.INPUTVCF)
    vcf_container_cosmic = VCFContainer(hgsc_vcf.Reader(args.COSMICVCF),
                                        args.buffer)
    # read in the dbsnp data
    # connect to the reference file
    ifasta = IndexedFastaSequenceFile(File(args.reference))
    add_command_to_reader(
        vcf_reader, '##COMMAND=<ID=annotate_vcf_cosmic.py,Params="%s">' %
        " ".join(sys.argv))
    # add the COSMIC header info
    add_info_to_reader(
        vcf_reader, '##INFO=<ID=COSMIC,Number=.,Type=String,Description="' +
        'COSMIC info, can be one of NONE, BUFFER, CODON, SITE.  ' +
        'All but NONE are accompanied by AA|CDS|CNT BUFFER indicates the COSMIC site is within %(buffer)sbp of the position.  example: '
        +
        'SITE|p.P228fs*227|c.682_683insT|3 or NONE.  VCF file used was %(cosmicvcf)s.">\n'
        % {
            'buffer': str(args.buffer),
            'cosmicvcf': args.COSMICVCF
        })

    # add the context
    add_info_to_reader(
        vcf_reader,
        "##INFO=<ID=CONTEXT,Number=1,Type=String,Description=\"Base context around variant. [POS - 5, POS + len(REF) + 4]\">\n"
    )
    # add the validatio status info
    add_info_to_reader(
        vcf_reader,
        "##INFO=<ID=DBVS,Number=1,Type=String,Description=\"dbSNP validation status, | separated\">\n"
    )
    # get the format for the vep annotations
    _vep_format = get_csq_format([
        h for h in vcf_reader.header.get_headers('INFO', 'CSQ')
    ][0].fields['Description'])

    vcf_writer = hgsc_vcf.Writer(args.OUTPUTVCF, vcf_reader.header)
    vcf_writer.write_header()
    for record in vcf_reader:
        try:
            ## check that the position is annotated with CSQ, if not then this is a write through
            if 'CSQ' in record['INFO']:
                # matches are intersecting hits in the VCF

                _matches = vcf_container_cosmic.intersect(record)
                _csq_l = [
                    dict(zip(_vep_format, _csq.split('|')))
                    for _csq in record['INFO'].get('CSQ')
                ]
                _info = generate_cosmic_info(_matches, _csq_l, record)
                record['INFO']['COSMIC'] = _info
                # extract the dbsnp validation rsids
                _existing_ids = [
                    _id for _csq in _csq_l
                    for _id in _csq['Existing_variation'].split('&')
                ]
                record['INFO']['DBVS'] = [
                    generate_valstatus_info(_existing_ids, args.DBSNPVAL)
                ]
            record['INFO']['CONTEXT'] = [
                str(
                    String(
                        ifasta.getSubsequenceAt(
                            record['CHROM'], record['POS'] - 5, record['POS'] +
                            len(record['REF']) + 4).getBases()))
            ]

        except:
            logger.exception("Error in record modification")
        vcf_writer.write_record(record)
예제 #9
0
    def handleImpl(self, exchange, ibuff):
        r = ""
        try:
            uri = exchange.getRequestURI()
            command = uri.getPath().split("/")
            if not command[1] == "ajax":
                raise

            query = {}
            try:
                for x in uri.getRawQuery().split("&"):
                    x2 = URLDecoder().decode(x, 'UTF-8')
                    k, v = x2.split("=", 1)
                    query[k] = v
            except:
                pass

            if command[2] == "webadmin":

                if command[3] == "stop":
                    q = QuestManager.getInstance().getQuest(WebAdmin.qn)
                    try:
                        q.cancelQuestTimers("webadmin_stop")
                        q.startQuestTimer("webadmin_stop", 1000, None, None)
                    except:
                        pass
                    r = "webAdmin stop"
                elif command[3] == "restart":
                    q = QuestManager.getInstance().getQuest(WebAdmin.qn)
                    try:
                        q.cancelQuestTimers("webadmin_restart")
                        q.startQuestTimer("webadmin_restart", 1000, None, None)
                    except:
                        pass
                    r = "webAdmin restart"
                else:
                    exchange.sendResponseHeaders(501, 0)
                    return

            elif command[2] == "quest":

                if command[3] == "list":
                    r2 = {}
                    for quest in QuestManager.getInstance(
                    ).getAllManagedScripts():
                        qn = quest.getName()
                        r2[qn] = {}
                        if 'req' in query:
                            for c in query['req'].split(","):
                                r2[qn][c] = self.getQuestInfo(c, quest)
                    r2 = r2.items()
                    r2.sort()
                    r = JSON().toJSON(r2)
                    rh = exchange.getResponseHeaders()
                    rh.set("Content-Type", "application/json")

                elif command[3] == "unload":
                    if not self.checkQuery(query, ['name']):
                        raise
                    QuestManager.getInstance().getQuest(query['name']).unload()

                elif command[3] == "reload":
                    if not self.checkQuery(query, ['name']):
                        raise
                    QuestManager.getInstance().getQuest(query['name']).reload()

                elif command[3] == "get_source":
                    if not self.checkQuery(query, ['name']):
                        raise
                    file = QuestManager.getInstance().getQuest(
                        query['name']).getScriptFile()
                    try:
                        i = open(file.toString(), "r")
                    except:
                        exchange.sendResponseHeaders(404, 0)
                        return
                    r = i.read()
                    i.close()
                    rh = exchange.getResponseHeaders()
                    rh.set("Content-Type", "text/plain; charset=utf-8")

                else:
                    exchange.sendResponseHeaders(501, 0)
                    return

            elif command[2] == "script":
                if command[3] == "writefile_exec":
                    if not self.checkQuery(query, ['file']):
                        raise
                    query['file'] = query['file'].replace("\\", "/")
                    query['file'] = query['file'].split("/")[-1]
                    path = sourcepath.replace(
                        "\\",
                        "/") + "/custom/WebAdmin/WebRoot/temp/" + query['file']
                    o = open(path, "w")
                    o.write(ibuff)
                    o.close()
                    file = File(path)
                    try:
                        L2ScriptEngineManager.getInstance().executeScript(file)
                    except ScriptException, e:
                        L2ScriptEngineManager.getInstance(
                        ).reportScriptFileError(file, e)

                elif command[3] == "execjy":
                    pre_script = """
import sys
sys.stdout = out_writer
sys.stderr = out_writer
"""
                    post_script = """
import sys
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
"""
                    r = self.exec_script("jython", pre_script + ibuff,
                                         post_script)

                elif command[3] == "execbsh":
                    r = self.exec_script("bsh", ibuff)

                elif command[3] == "execjs":
                    r = self.exec_script("js", ibuff)

                elif command[3] == "execjava":
                    r = self.exec_script("java", ibuff)

                else:
                    exchange.sendResponseHeaders(501, 0)
                    return

            elif command[2] == "player":

                if command[3] == "list":
                    r2 = {}
                    for player in L2World.getInstance().getAllPlayersArray():
                        objid = self.getPlayerInfo("objid", player)
                        r2[objid] = {}
                        if 'req' in query:
                            for c in query['req'].split(","):
                                r2[objid][c] = self.getPlayerInfo(c, player)
                    r = JSON().toJSON(r2)
                    rh = exchange.getResponseHeaders()
                    rh.set("Content-Type", "application/json")

                elif command[3] == "info":
                    if not self.checkQuery(query, ['objid', 'req']):
                        raise
                    player = L2World.getInstance().getPlayer(
                        int(query['objid']))
                    if not player:
                        raise
                    r2 = {}
                    for c in query['req'].split(","):
                        r2[c] = self.getPlayerInfo(c, player)
                    r = JSON().toJSON(r2)
                    rh = exchange.getResponseHeaders()
                    rh.set("Content-Type", "application/json")

                elif command[3] == "edit":
                    if not self.checkQuery(query, ['objid']):
                        raise
                    player = L2World.getInstance().getPlayer(
                        int(query['objid']))
                    if not player:
                        raise
                    del query['objid']
                    for c in query:
                        self.setPlayerInfo(c, query[c], player=player)

                elif command[3] == "teleport":
                    if not self.checkQuery(query, ['objid', 'x', 'y', 'z']):
                        raise
                    x = int("%d" % float(query['x']))
                    y = int("%d" % float(query['y']))
                    z = int("%d" % float(query['z']))
                    player = L2World.getInstance().getPlayer(
                        int(query['objid']))
                    if not player:
                        raise
                    player.teleToLocation(x, y, z, 0, False)
                else:
                    exchange.sendResponseHeaders(501, 0)
                    return
예제 #10
0
    def process(self, dataSource, progressBar):

        # we don't know how much work there is yet
        progressBar.switchToIndeterminate()

        skCase = Case.getCurrentCase().getSleuthkitCase()
        fileManager = Case.getCurrentCase().getServices().getFileManager()

        # Create iTunes directory in temp directory, if it exists then continue on processing
        tempDir = os.path.join(Case.getCurrentCase().getTempDirectory(),
                               "iTunes")
        self.log(Level.INFO, "create Directory " + tempDir)
        try:
            os.mkdir(tempDir)
        except:
            self.log(Level.INFO, "iTunes Directory already exists " + tempDir)

        # Create iTunes directory in modules directory, if it exists then continue on processing
        modDir = os.path.join(Case.getCurrentCase().getModuleDirectory(),
                              "iTunes")
        self.log(Level.INFO, "create Directory " + modDir)
        try:
            os.mkdir(modDir)
        except:
            self.log(Level.INFO, "iTunes Directory already exists " + modDir)

        files = fileManager.findFiles(dataSource, "Manifest.db",
                                      "Apple Computer/MobileSync/Backup/")
        numFiles = len(files)
        self.log(Level.INFO,
                 "Number of Manifestdb Files found ==> " + str(numFiles))

        for file in files:

            # Check if the user pressed cancel while we were busy
            if self.context.isJobCancelled():
                return IngestModule.ProcessResult.OK

            #self.log(Level.INFO, "Parent Path ==> " + str(file.getParentPath()))
            if "Apple Computer/MobileSync/Backup/" in file.getParentPath():
                #self.log(Level.INFO, str(file))
                # This is to get the base directory of the itunes backup incase there is more then one backup
                (head, tail) = os.path.split(file.getParentPath())
                (head2, backupDir) = os.path.split(head)
                self.log(Level.INFO, "Backup Dir is ==> " + backupDir)
                try:
                    os.mkdir(os.path.join(modDir, backupDir))
                except:
                    self.log(
                        Level.INFO, "Failed to create directory " +
                        os.path.join(modDir, backupDir))

                # Save the DB locally in the temp folder. use file id as name to reduce collisions
                lclDbPath = os.path.join(
                    tempDir,
                    str(file.getId()) + "-" + file.getName())
                ContentUtils.writeToFile(file, File(lclDbPath))
                # Process the SAM Registry File getting the Username and RID
                dbConnection = self.connnectToManifestDb(lclDbPath)
                fileInfo = self.processManifestDb(
                    dbConnection, os.path.join(modDir, backupDir))
                dbConnection.close()
                #                self.log(Level.INFO, str(fileInfo))
                self.writeBackupFiles(fileInfo,
                                      os.path.join(modDir, backupDir),
                                      file.getParentPath(), fileManager,
                                      dataSource)
            else:
                self.log(
                    Level.INFO, "Skipping File " + file.getName() +
                    " In Path " + file.getParentPath())

        # Add Backup Files back into Autopsy as its own Data Sourse
        self.addBackupFilesToDataSource(dataSource, modDir)

        # After all databases, post a message to the ingest messages in box.
        message = IngestMessage.createMessage(
            IngestMessage.MessageType.DATA, "ItunesBackup",
            " Itunes Backup has been analyzed ")
        IngestServices.getInstance().postMessage(message)

        return IngestModule.ProcessResult.OK
예제 #11
0
#! /usr/bin/env jython

###
# mainLoop.py
# Wrapper to enable running outside of JAS3
# 03-AUG-2005 Jan Strube
###
from java.io import File
from org.lcsim.util.aida import AIDA
from org.lcsim.util.loop import LCSimLoop
## importing the Java analysis module
import Analysis101
## if Analysis102 cannot be found, please uncomment and modify
## the following two lines to tell Jython where to find it
# import sys
# sys.path.append('full path to Python module')
# importing the Analysis102 class in the Jython module Analysis102
from Analysis102 import Analysis102

loop = LCSimLoop()
input = File("psiMuMu.slcio")
loop.setLCIORecordSource(input)
loop.add(Analysis101())
loop.add(Analysis102())
# loop over all events with -1 or over any other positive number
loop.loop(-1)
loop.dispose()
예제 #12
0
    def process(self, dataSource, progressBar):

        # we don't know how much work there is yet
        progressBar.switchToIndeterminate()

        # case insensitive SQL LIKE clause is used to query the case database
        # FileManager API: http://sleuthkit.org/autopsy/docs/api-docs/4.4.1/classorg_1_1sleuthkit_1_1autopsy_1_1casemodule_1_1services_1_1_file_manager.html
        fileManager = Case.getCurrentCase().getServices().getFileManager()

        files = []
        for extension in self.extensions:
            try:
                files.extend(fileManager.findFiles(dataSource,
                                                   "%" + extension))
            except TskCoreException:
                self.log(Level.INFO,
                         "Error getting files from: '" + extension + "'")

        numFiles = len(files)
        if not numFiles:
            self.log(Level.WARNING, "Didn't find any usable files!")
            return IngestModule.ProcessResult.OK

        # Check if the user pressed cancel while we were busy
        if self.context.isJobCancelled():
            return IngestModule.ProcessResult.OK

        self.log(Level.INFO, "Got " + str(numFiles) + " images!")

        tempDir = Case.getCurrentCase().getTempDirectory()
        #
        # Copying the files to temp directory
        #
        try:
            os.mkdir(tempDir + "\\" + dataSource.getName())
            i = 0
            for file in files:
                # Checking if we didn't got any currupted files
                if file.getSize() > 0:
                    filename, file_extension = os.path.splitext(file.getName())
                    ContentUtils.writeToFile(
                        file,
                        File(tempDir + "\\" + dataSource.getName() + "\\" +
                             str(i) + file_extension))
                i += 1
        except:
            self.log(
                Level.INFO,
                "Directory already exists for this data source skipping file copy"
            )

        # Location of data to search
        source = tempDir + "\\" + dataSource.getName()
        # Location where the output of executable will appear
        outFile = source + "\\facesFound.txt"

        if os.path.exists(outFile):
            os.remove(outFile)

        returnCode = 0
        try:
            #
            # Blocking call, we will wait until it finishes which will take a while
            #
            returnCode = subprocess.call(
                [self.pathToExe, source, outFile, self.detector])
        except OSError:
            self.log(Level.SEVERE, "Couldn't run Facial_Detection.exe!")
            return IngestModule.ProcessResult.OK

        if returnCode:
            if returnCode <= len(self.errorListDetection):
                self.log(Level.SEVERE, self.errorListDetection[returnCode])
            else:
                self.log(
                    Level.SEVERE,
                    "unknown error ocurred in Facial_Detection.exe! it returned: "
                    + str(returnCode))
            if self.deleteAfter:
                self.deleteFiles(tempDir + "\\" + dataSource.getName())
            return IngestModule.ProcessResult.ERROR

        self.log(Level.INFO, "Face detection terminated with no problems")

        # Checking if cancel was pressed before starting another job
        if self.context.isJobCancelled():
            return IngestModule.ProcessResult.OK

        outRec = source + "\\ImagesWithEspecificFace.txt"

        # Use blackboard class to index blackboard artifacts for keyword search
        blackboard = Case.getCurrentCase().getServices().getBlackboard()
        artifactType = BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT

        if self.localSettings.getFace():
            self.log(Level.INFO,
                     "Looking for person in: " + self.localSettings.getFace())

            if os.path.exists(outRec):
                os.remove(outRec)
            try:
                #
                # Blocking call, we will wait until it finishes
                #
                returnCode = subprocess.call([
                    self.pathToExeRec, source,
                    self.localSettings.getFace(), self.shape, self.rec, outRec
                ])
            except OSError:
                self.log(Level.SEVERE, "Couldn't run Facial_Recognition.exe!")
                return IngestModule.ProcessResult.OK

            if returnCode:
                if returnCode <= len(self.errorListRecognition):
                    self.log(Level.SEVERE,
                             self.errorListRecognition[returnCode])
                else:
                    self.log(
                        Level.SEVERE,
                        "unknown error ocurred in Facial_Recognition.exe! it returned: "
                        + str(returnCode))
                if self.deleteAfter:
                    self.deleteFiles(tempDir + "\\" + dataSource.getName())
                return IngestModule.ProcessResult.ERROR

            self.log(Level.INFO,
                     "Face recognition terminated with no problems")

            with open(outRec, "r") as out:

                for line in out:

                    data = line.split('.')
                    pos = int(data[0])

                    interestingFile = files[pos]

                    artifactList = interestingFile.getArtifacts(artifactType)

                    if artifactList:
                        self.log(Level.INFO,
                                 "Artifact already exists! ignoring")
                    else:
                        # Make an artifact on the blackboard.  TSK_INTERESTING_FILE_HIT is a generic type of
                        # artfiact.  Refer to the developer docs for other examples.
                        art = interestingFile.newArtifact(artifactType)

                        att = BlackboardAttribute(
                            BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME,
                            FaceModuleFactory.moduleName,
                            "Wanted face founded in")
                        art.addAttribute(att)

                        try:
                            # index the artifact for keyword search
                            blackboard.indexArtifact(art)
                        except Blackboard.BlackboardException as e:
                            self.log(
                                Level.SEVERE, "Error indexing artifact " +
                                art.getDisplayName())

        else:
            self.log(Level.INFO,
                     "No Positive folder given, will only do detection")

        # Parse output file for files with faces and mark them as interesting
        count = 0
        with open(outFile, "r") as out:

            for line in out:
                count += 1

                data = line.split('.')
                pos = int(data[0])

                interestingFile = files[pos]

                artifactList = interestingFile.getArtifacts(artifactType)
                if artifactList:
                    self.log(Level.INFO, "Artifact already exists! ignoring")
                else:
                    # Make an artifact on the blackboard.  TSK_INTERESTING_FILE_HIT is a generic type of
                    # artfiact.  Refer to the developer docs for other examples.
                    art = interestingFile.newArtifact(artifactType)
                    att = BlackboardAttribute(
                        BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME,
                        FaceModuleFactory.moduleName, "Image with faces")
                    art.addAttribute(att)

                    try:
                        # index the artifact for keyword search
                        blackboard.indexArtifact(art)
                    except Blackboard.BlackboardException as e:
                        self.log(
                            Level.SEVERE,
                            "Error indexing artifact " + art.getDisplayName())

        if self.deleteAfter:
            self.deleteFiles(tempDir + "\\" + dataSource.getName())

        message = IngestMessage.createMessage(
            IngestMessage.MessageType.DATA,
            "Face Detector Data Source Ingest Module",
            "Found %d images with faces" % count)
        IngestServices.getInstance().postMessage(message)

        return IngestModule.ProcessResult.OK
예제 #13
0
# Make sure we have site-packages in our classpath
from java.io import File
from os import path
from sys.path import append
from org.python.core.util import FileUtil as _dummy

# To avoid hardcoding myapp.jar...
_jar_file = File(_dummy().__class__.getProtectionDomain() \
  .getCodeSource() \
  .getLocation() \
  .getPath()) \
  .getName()

append(path.join(path.dirname(path.abspath(__file__)) \
  .replace('__pyclasspath__',''), _jar_file, 'Lib', 'site-packages'))
예제 #14
0
    def execute(self):
        cache = __jitar__.cacheProvider.getCache('main')
        cache.clear()
        cache_key_list = cache.getAllKeys()
        if cache_key_list != None:
            for key in cache_key_list:
                cache.remove(key)

        cacheService = __spring__.getBean("cacheService")

        cacheService.remove("new_user_list")
        cacheService.remove("rcmd_wr_list")
        cacheService.remove("hot_wr_list")
        cacheService.remove("rcmd_group_list")
        cacheService.remove("new_group_list")
        cacheService.remove("best_group_list")
        cacheService.remove("famous_teachers")
        cacheService.remove("expert_list")
        cacheService.remove("teacher_star")
        cacheService.remove("instructor_list")
        cacheService.remove("new_video_list")
        cacheService.remove("hot_video_list")
        cacheService.remove("school_list")
        cacheService.remove("course_list")
        cacheService.remove("special_subject_list")
        cacheService.remove("hot_photo_list")
        cacheService.remove("new_photo_list")
        cacheService.remove("site_stat")
        cacheService.remove("teacher_show")
        cacheService.remove("jitar_actions")
        cacheService.remove("famous_article_list")
        cacheService.remove("hot_article_list")
        cacheService.remove("newest_article_list")
        cacheService.remove("hot_resource_list")
        cacheService.remove("new_resource_list")
        cacheService.remove("site_placard_list")
        cacheService.remove("jitar_news")
        cacheService.remove("pic_news")
        cacheService.remove("show_custorm_part")

        cacheService.remove("all_subject")
        cacheService.remove("all_grade")
        cacheService.remove("all_meta_subject")

        cache = __jitar__.cacheProvider.getCache('siteTheme')
        cache.remove("siteTheme")

        cache = __jitar__.cacheProvider.getCache('user')
        cache.clear()

        cache = __jitar__.cacheProvider.getCache('group')
        cache.clear()

        cache = __jitar__.cacheProvider.getCache('page')
        cache.clear()

        cache = __jitar__.cacheProvider.getCache('category')
        cache.clear()

        cache = __jitar__.cacheProvider.getCache('subject')
        cache.clear()

        cache = __jitar__.cacheProvider.getCache('unit')
        cache.clear()

        cache = __jitar__.cacheProvider.getCache('defaultCache')
        cache.clear()

        cache = __jitar__.cacheProvider.getCache('rootUnit')
        cache.remove("rootUnit")

        subjectService = __jitar__.subjectService
        subjectService.clearCacheData()

        servlet_ctxt = request.getServletContext()
        servlet_ctxt.removeAttribute("metaGrade")
        servlet_ctxt.removeAttribute("meta_Grade")
        servlet_ctxt.removeAttribute("SubjectNav")
        servlet_ctxt.removeAttribute(ConfigService.CONFIG_KEY_NAME)

        siteNavigationService = __spring__.getBean("siteNavigationService")
        siteNavigationService.renderSiteNavition()

        self.params = ParamUtil(request)

        cachetype = self.params.safeGetStringParam("cachetype")
        if cachetype == "index":
            strFile = request.getServletContext().getRealPath("/")
            file = File(strFile + "index.html")
            if file.exists() and file.isFile():
                file.delete()
            file = None
        elif cachetype == "user":
            strFile = request.getServletContext().getRealPath("/")
            strFile = strFile + "html" + File.separator + "user" + File.separator
            file = File(strFile)
            if file.exists():
                self.deleteDirectory(strFile)
            file = None
        elif cachetype == "unit":
            strFile = request.getServletContext().getRealPath("/")
            strFile = strFile + "html" + File.separator + "unit" + File.separator
            file = File(strFile)
            if file.exists():
                self.deleteDirectory(strFile)
            file = None
        response.contentType = "text/html; charset=UTF-8"
        return "/WEB-INF/ftl/admin/clear_cache.ftl"
예제 #15
0
        maxAmplitude = 1.0 / 3.0
    if network.getName() == "adapting":
        maxAmplitude = 1.0
    if network.getName() == "depression":
        maxAmplitude = 1.0

    exporter = MatlabExporter()
    exporter.add("frequencies", [frequencies])
    for i in range(len(frequencies)):
        frequency = frequencies[i]
        print "Network: ", network.getName(), " Frequency: ", frequency, "Hz"

        angularFrequency = 2 * math.pi * frequency
        amplification = angularFrequency
        if network.getName() == "depression":
            amplification = min(amplification, 10.0)
        amplitude = min(
            maxAmplitude, 1.0 / amplification
        )  #normalize so that input, output, and state magnitudes <= 1

        network.setInputFunction(SineFunction(angularFrequency, amplitude))

        network.run(0, 1.0 + 5.0 / frequency)
        exporter.add("in%i" % i, network.getInputData())
        exporter.add("out%i" % i, network.getOutputData())

    #export simulation results to a Matlab file
    exporter.write(File(network.getName() + "_frequency_parisien.mat"))

    network.setStepSize(.001)
    network.disableParisien()
예제 #16
0
import sys
import os

try:
    from java.io import File
except ImportError:
    print "Note: this file should be run using nC.bat -python XXX.py' or 'nC.sh -python XXX.py'"
    print "See http://www.neuroconstruct.org/docs/python.html for more details"
    quit()

sys.path.append(os.environ["NC_HOME"] + "/pythonNeuroML/nCUtils")

import ncutils as nc

projFile = File(os.getcwd(), "../Ex10_NeuroML2.ncx")

print "Project file for this test: " + projFile.getAbsolutePath()

##############  Main settings  ##################

simConfigs = []

simConfigs.append("AbstractCells")

simDt = 0.001

#simulators =            ["NEURON"]
simulators = ["NEURON", "LEMS"]

numConcurrentSims = 4
예제 #17
0
        for k,v in command_dict.iteritems():            
            if(k=='site'):
                t = Term('url','*'+v.strip()+'*')
                query = WildcardQuery(t)
            else:
                query = QueryParser(Version.LUCENE_CURRENT, k,analyzer).parse(v)
            querys.add(query, BooleanClause.Occur.MUST)
        
        scoreDocs = searcher.search(querys, 50).scoreDocs
        print "%s total matching documents." % len(scoreDocs)

        for scoreDoc in scoreDocs:
            doc = searcher.doc(scoreDoc.doc)
            print '------------------------------------------'
            #print 'path:', doc.get("path"), 'name:', doc.get("name"),'site:', doc.get('site')
            print 'title:',doc.get('title'),
            print 'url:',doc.get('url')

if __name__ == '__main__':
    STORE_DIR = "index_lucene"
    initVM()
    print 'lucene', VERSION
    if VERSION[0]<'4':
        print 'Please read the pdf. This program only supports Lucene 4.x'
        sys.exit(1)
    directory = SimpleFSDirectory(File(STORE_DIR))
    searcher = IndexSearcher(DirectoryReader.open(directory))
    analyzer = WhitespaceAnalyzer(Version.LUCENE_CURRENT)
    run(searcher, analyzer)
    del searcher
예제 #18
0
from java.io import File, FileInputStream
from org.openstreetmap.josm.data.osm import DataSet
from org.openstreetmap.josm.gui.layer import OsmDataLayer
from org.openstreetmap.josm.io import OsmReader, OsmExporter
from org.openstreetmap.josm.gui.progress import NullProgressMonitor
from org.openstreetmap.josm.plugins.utilsplugin2.selection import NodeWayUtils

fis = FileInputStream("cuadricula.osm")
cuadricula = OsmReader.parseDataSet(fis, NullProgressMonitor.INSTANCE)
fis = FileInputStream("infraestructuras.osm")
infraestructuras = OsmReader.parseDataSet(fis, NullProgressMonitor.INSTANCE)

for hoja in cuadricula.getWays():
    dentro = NodeWayUtils.selectAllInside([hoja], infraestructuras)
    print "hoja: %s (%d)" % (hoja.get('hoja'), len(dentro))
    task = DataSet()
    for v in dentro:
        infraestructuras.removePrimitive(v)
        task.addPrimitive(v)
    if len(dentro) > 0:
        name = 'task%s.osm' % hoja.get('hoja')
        layer = OsmDataLayer(task, name, File(name))
        OsmExporter().exportData(layer.getAssociatedFile(), layer)

예제 #19
0
def createEmpty(dir):
    if os.path.exists(dir):
        FileUtil.deleteContents(File(dir))
    else:
        os.mkdir(dir)
예제 #20
0
stats = False
for o, a in options:
    if o == "--format":
        format = a
    elif o == "--index":
        indexDir = a
    elif o == "--stats":
        stats = True


class CustomTemplate(Template):
    delimiter = '#'

template = CustomTemplate(format)

fsDir = SimpleFSDirectory(File(indexDir))
searcher = IndexSearcher(DirectoryReader.open(fsDir))

analyzer = StandardAnalyzer(Version.LUCENE_CURRENT)
parser = QueryParser(Version.LUCENE_CURRENT, "keywords", analyzer)
parser.setDefaultOperator(QueryParser.Operator.AND)
query = parser.parse(' '.join(args))
start = datetime.now()
scoreDocs = searcher.search(query, 50).scoreDocs
duration = datetime.now() - start
if stats:
    print >>sys.stderr, "Found %d document(s) (in %s) that matched query '%s':" %(len(scoreDocs), duration, query)

for scoreDoc in scoreDocs:
    doc = searcher.doc(scoreDoc.doc)
    table = dict((field.name(), field.stringValue())
예제 #21
0
    def process(self):
        """
        run DMR, creating an output file divided by time
        """

        if self.named_args is not None:
            self.tfidf = self.named_args['tfidf']
            self.min_df = int(self.named_args['min_df'])
            self.stemming = self.named_args['stemming']
            self.topics = int(self.named_args['topics'])
            self.lang = self.named_args['lang']
        else:
            self.tfidf = True
            self.min_df = 5
            self.topics = 50
            self.stemming = True
            self.lang = 'en'

        self._setup_mallet_instances(tfidf=self.tfidf, stemming=self.stemming)

        os.chdir(self.mallet_out_dir)

        # from cc.mallet.topics.DMRTopicModel import main as DMRTopicModel
        from cc.mallet.topics import DMRTopicModel
        process_args = [self.instance_file, str(self.topics)]
        logging.info('begin DMR')

        start_time = time.time()
        self.parameter_file = os.path.join(self.mallet_out_dir,
                                           'dmr.parameters')
        self.state_file = os.path.join(self.mallet_out_dir, 'dmr.state.gz')
        if not self.dry_run:
            # DMRTopicModel(process_args)
            from java.io import File, PrintStream, FileOutputStream
            from java.lang import System

            self.progress_file.close()
            progress_file = File(self.progress_filename)
            System.setOut(PrintStream(FileOutputStream(progress_file)))

            from cc.mallet.types import InstanceList
            training = InstanceList.load(File(self.instance_file))
            numTopics = int(self.topics)
            lda = DMRTopicModel(numTopics)
            lda.setOptimizeInterval(100)
            lda.setTopicDisplay(100, 10)
            lda.addInstances(training)
            lda.estimate()
            lda.writeParameters(File(self.parameter_file))
            lda.printState(File(self.state_file))

        logging.info('DMR complete in ' + str(time.time() - start_time) +
                     ' seconds')

        self.topic_features = {}
        with codecs.open(self.parameter_file, 'r', encoding='utf-8') as f:
            topic = 0
            for line in f:
                new_topic = re.match('FEATURES FOR CLASS topic([0-9]+)', line)
                if new_topic is not None:
                    topic = int(new_topic.group(1))
                else:
                    if not topic in self.topic_features:
                        self.topic_features[topic] = {}
                    this_line = line.split(' ')
                    feature = this_line[1]
                    self.topic_features[topic][feature] = \
                        float(this_line[2])

        self.progress_file = file(self.progress_filename, 'r')
        self.progress_file.seek(0, os.SEEK_SET)
        self.alphas = {}
        for line in self.progress_file:
            if re.match('[0-9]+\t[0-9.]+', line) is not None:
                this_line = line.split('\t')
                topic = int(this_line[0])
                alpha = float(this_line[1])
                tokens = int(this_line[2])

                self.alphas[topic] = alpha

        self.alpha_sum = sum(self.alphas.values())

        self.topic_words = {}
        self.doc_topics = {}

        with gzip.open(self.state_file, 'rb') as state_file:
            state_file.next()
            for line in state_file:
                this_line = line.split(' ')
                topic = int(this_line[5])
                word = this_line[4]
                doc = int(this_line[0])
                position = int(this_line[2])

                if not doc in self.doc_topics:
                    self.doc_topics[doc] = {}
                if not topic in self.doc_topics[doc]:
                    self.doc_topics[doc][topic] = 0
                self.doc_topics[doc][topic] += 1

                if not topic in self.topic_words:
                    self.topic_words[topic] = {}
                if not word in self.topic_words[topic]:
                    self.topic_words[topic][word] = 0
                self.topic_words[topic][word] += 1

        # total_tokens = float(sum([sum(y.values()) for x, y in self.topic_words.iteritems()]))

        for topic in self.topic_words.keys():
            total = float(sum(self.topic_words[topic].values()))
            for k in self.topic_words[topic].keys():
                self.topic_words[topic][k] /= total

        top_N = 20
        top_topic_words = dict(
            (x,
             dict((word, y[word])
                  for word in self.argsort(y, reverse=True)[:top_N]))
            for (x, y) in self.topic_words.iteritems())
        wordProbs = [[{
            'text': word,
            'prob': prob
        } for (word, prob) in y.iteritems()]
                     for (x, y) in top_topic_words.iteritems()]

        DEFAULT_DOC_PROPORTIONS = [
            0.01,
            0.02,
            0.05,
            0.1,
            0.2,
            0.3,
            0.5,
        ]
        numDocumentsAtProportions = dict(
            (topic, dict((k, 0.0) for k in DEFAULT_DOC_PROPORTIONS))
            for topic in self.topic_words.keys())
        for (doc, topics) in self.doc_topics.iteritems():
            doc_length = sum(topics.values())
            for (topic, count) in topics.iteritems():
                proportion = (self.alphas[topic] + count) \
                    / (self.alpha_sum + doc_length)
                for min_proportion in DEFAULT_DOC_PROPORTIONS:
                    if proportion < min_proportion:
                        break
                    numDocumentsAtProportions[topic][min_proportion] += \
                        1

        allocationRatios = dict(
            (topic, proportions[0.5] / proportions[0.02])
            for (topic, proportions) in numDocumentsAtProportions.iteritems()
            if proportions[0.02] > 0.0)

        labels = dict((topic, {
            'label': self.argsort(words, reverse=True)[:3],
            'fulltopic': wordProbs[topic],
            'allocation_ratio': allocationRatios.get(topic, 0)
        }) for (topic, words) in top_topic_words.iteritems())

        doc_metadata = {}

        for doc in self.doc_topics.keys():
            total = float(sum(self.doc_topics[doc].values()))
            for k in self.doc_topics[doc].keys():
                self.doc_topics[doc][k] /= total

        for (id, topics) in self.doc_topics.iteritems():
            try:
                filename = self.docs[int(id)]

                itemid = self.metadata[filename]['itemID']

                doc_metadata[itemid] = \
                    {'label': self.metadata[filename]['label'],
                     'title': self.metadata[filename]['title']}

                freqs = topics
                main_topic = None
                topic_max = 0.0
                for i in freqs.keys():
                    if freqs[i] > topic_max:
                        main_topic = i
                        topic_max = freqs[i]
                doc_metadata[itemid]['main_topic'] = main_topic
                self.metadata[filename]["topics"] = freqs
            except KeyboardInterrupt:
                sys.exit(1)
            except:
                logging.error(traceback.format_exc())

        self.template_filename = os.path.join(self.cwd, 'templates',
                                              self.template_name + '.html')

        if getattr(self, "index", None) is not None:
            for term in self.index:
                if isinstance(self.index[term], set):
                    self.index[term] = list(self.index[term])
            self.index = dict(self.index)

        params = {
            "CATEGORICAL": self.categorical,
            "TOPIC_LABELS": labels,
            "TOPIC_COHERENCE": {},
            "TAGS": getattr(self, "tags", {}),
            "INDEX": getattr(self, "index", {})
        }

        self.write_html(params)
예제 #22
0
 
from java.io import File
from org.apache.lucene.analysis.standard import StandardAnalyzer
from org.apache.lucene.document import Document, Field
from org.apache.lucene.search import IndexSearcher
from org.apache.lucene.index import IndexReader
from org.apache.lucene.queryparser.classic import QueryParser
from org.apache.lucene.store import SimpleFSDirectory
from org.apache.lucene.util import Version
 
if __name__ == "__main__":
    lucene.initVM()
    print "lucene version is:", Version
    # Get the analyzer
    analyzer = StandardAnalyzer(Version.LUCENE_CURRENT)

    # Get index storage
    indexDir = SimpleFSDirectory(File("index/"))
    reader = IndexReader.open(SimpleFSDirectory(File("index/")))
    searcher = IndexSearcher(reader)
 
    query = QueryParser(Version.LUCENE_CURRENT, "country", analyzer).parse("India")
    MAX = 1000
    hits = searcher.search(query, MAX)
 
    print "Found %d document(s) that matched query '%s':" % (hits.totalHits, query)
    for hit in hits.scoreDocs:
        print hit.score, hit.doc, hit.toString()
        doc = searcher.doc(hit.doc)
        print doc.get("country").encode("utf-8")
예제 #23
0
import org.openlca.core.model as model
from org.openlca.core.database import UnitGroupDao, FlowPropertyDao, CategoryDao,\
    FlowDao, Daos, EntityCache
from java.util import UUID, Date
from org.openlca.core.model import FlowPropertyFactor

import util
from org.openlca.core.matrix import ProductSystemBuilder
from org.openlca.core.math import CalculationSetup, SystemCalculator
from org.openlca.eigen import NativeLibrary
from org.openlca.eigen.solvers import DenseSolver
from org.openlca.core.matrix.cache import MatrixCache
from org.openlca.core.results import FullResultProvider

folder = 'C:/Users/Besitzer/openLCA-data-1.4/databases/example_db1'
db = DerbyDatabase(File(folder))

mass = util.find(db, model.FlowProperty, 'Mass')
if mass is None:
    
    kg = model.Unit()
    kg.name = 'kg'
    kg.conversionFactor = 1.0
    
    mass_units = model.UnitGroup()
    mass_units.name = 'Units of mass'
    mass_units.units.add(kg)
    mass_units.referenceUnit = kg
    mass_units.refId = UUID.randomUUID().toString()
    dao = UnitGroupDao(db)
    dao.insert(mass_units)
예제 #24
0
 def get_file_contents(self, path):
     return FileUtils.readFileToString(File(path))
예제 #25
0
def saveGitUrl(url):
    RELOAD_LOCK.lock()
    try:
        FileUtils.writeStringToFile(File(getGitUrlFile()), url)
    finally:
        RELOAD_LOCK.unlock()
예제 #26
0
    if model_file_name is None:
        add_to_archive = True
        try:
            domain_name = model_context.get_domain_name()
            model_file = File.createTempFile(domain_name,
                                             '.yaml').getCanonicalFile()
            model_file_name = model_context.get_domain_name() + '.yaml'
        except (IllegalArgumentException, IOException), ie:
            ex = exception_helper.create_discover_exception(
                'WLSDPLY-06008', ie.getLocalizedMessage(), error=ie)
            __logger.throwing(ex,
                              class_name=_class_name,
                              method_name=_method_name)
            raise ex
    else:
        model_file = FileUtils.getCanonicalFile(File(model_file_name))

    try:
        model_translator.PythonToFile(model.get_model()).write_to_file(
            model_file.getAbsolutePath())
    except TranslateException, ex:
        # Jython 2.2.1 does not support finally so use this like a finally block...
        if add_to_archive and not model_file.delete():
            model_file.deleteOnExit()
        raise ex

    if add_to_archive:
        try:
            archive_file = model_context.get_archive_file()
            archive_file.addModel(model_file, model_file_name)
            if not model_file.delete():
예제 #27
0
    def process(self, dataSource, progressBar):

        self.log(Level.INFO,
                 "Starting to process Hiberfil.sys and Crash Dumps")

        # we don't know how much work there is yet
        progressBar.switchToIndeterminate()

        # Get the temp directory and create the sub directory
        if self.hiber_flag:
            Mod_Dir = Case.getCurrentCase().getModulesOutputDirAbsPath()
            try:
                ModOut_Dir = os.path.join(Mod_Dir,
                                          "Volatility\\Memory-Image-hiberfil")
                self.log(Level.INFO,
                         "Module Output Directory ===>  " + ModOut_Dir)
                #dir_util.mkpath(ModOut_Dir)
                os.mkdir(Mod_Dir + "\\Volatility")
                os.mkdir(ModOut_Dir)
            except:
                self.log(
                    Level.INFO,
                    "***** Error Module Output Directory already exists " +
                    ModOut_Dir)

            # Set the database to be read to the once created by the prefetch parser program
            skCase = Case.getCurrentCase().getSleuthkitCase()
            fileManager = Case.getCurrentCase().getServices().getFileManager()
            files = fileManager.findFiles(dataSource, "hiberfil.sys", "/")
            numFiles = len(files)
            self.log(Level.INFO,
                     "Number of files to process ==> " + str(numFiles))

            for file in files:
                self.log(Level.INFO, "File to process is ==> " + str(file))
                self.log(Level.INFO,
                         "File name to process is ==> " + file.getName())
                tmp_Dir = Case.getCurrentCase().getTempDirectory()
                Hiber_File = os.path.join(tmp_Dir, file.getName())
                ContentUtils.writeToFile(file, File(Hiber_File))
                self.log(Level.INFO,
                         "File name to process is ==> " + Hiber_File)
                # Create the directory to dump the hiberfil
                dump_file = os.path.join(ModOut_Dir,
                                         "Memory-Image-from-hiberfil.img")
                if self.Python_Program:
                    self.log(Level.INFO, "Running program ==> " + self.Volatility_Executable + " imagecopy -f " + Hiber_File + " " + \
                             " -O " + dump_file)
                    pipe = Popen([
                        "Python.exe", self.Volatility_Executable, "imagecopy",
                        "-f", Hiber_File, "-O" + dump_file
                    ],
                                 stdout=PIPE,
                                 stderr=PIPE)
                else:
                    self.log(Level.INFO, "Running program ==> " + self.Volatility_Executable + " imagecopy -f " + Hiber_File + " " + \
                             " -O " + dump_file)
                    pipe = Popen([
                        self.Volatility_Executable, "imagecopy", "-f",
                        Hiber_File, "-O" + dump_file
                    ],
                                 stdout=PIPE,
                                 stderr=PIPE)
                out_text = pipe.communicate()[0]
                self.log(Level.INFO, "Output from run is ==> " + out_text)

                # Add hiberfil memory image to a new local data source
                services = IngestServices.getInstance()

                progress_updater = ProgressUpdater()
                newDataSources = []

                dump_file = os.path.join(ModOut_Dir,
                                         "Memory-Image-from-hiberfil.img")
                dir_list = []
                dir_list.append(dump_file)

                # skCase = Case.getCurrentCase().getSleuthkitCase();
                fileManager_2 = Case.getCurrentCase().getServices(
                ).getFileManager()
                skcase_data = Case.getCurrentCase()

                # Get a Unique device id using uuid
                device_id = UUID.randomUUID()
                self.log(Level.INFO, "device id: ==> " + str(device_id))

                skcase_data.notifyAddingDataSource(device_id)

                # Add data source with files
                newDataSource = fileManager_2.addLocalFilesDataSource(
                    str(device_id), "Hiberfile Memory Image", "", dir_list,
                    progress_updater)

                newDataSources.append(newDataSource.getRootDirectory())

                # Get the files that were added
                files_added = progress_updater.getFiles()
                #self.log(Level.INFO, "Fire Module1: ==> " + str(files_added))

                for file_added in files_added:
                    skcase_data.notifyDataSourceAdded(file_added, device_id)
                    self.log(Level.INFO,
                             "Fire Module1: ==> " + str(file_added))

        # After all databases, post a message to the ingest messages in box.
        message = IngestMessage.createMessage(
            IngestMessage.MessageType.DATA, "HiberFil_Crash",
            " Hiberfil/Crash Dumps have been extracted fro Image. ")
        IngestServices.getInstance().postMessage(message)

        return IngestModule.ProcessResult.OK
def main(*args):

    i18nManager = ToolsLocator.getI18nManager()
    i18nManager.addResourceFamily("text", File(getResource(__file__, "i18n")))

    selfRegister()
예제 #29
0
		assert(textToSynthesize != null);
		assert(subscriptionKey != null);

		String outputFormat = AudioOutputFormat.Riff24Khz16BitMonoPcm;
//		String deviceLanguage = "en-US";
		String deviceLanguage = language;
        String subscriptionKey = "9853527f1472f8c49be158d6d4fb1903"
		String genderName = Gender.Male;
		String voiceName = "Microsoft Server Speech Text to Speech Voice (en-US, Guy24KRUS)";

		try{
			byte[] audioBuffer = TTSService.Synthesize(textToSynthesize, outputFormat, deviceLanguage, genderName, voiceName, subscriptionKey);

			// write the pcm data to the file
			String outputWave = ".\\output.pcm";
			File outputAudio = new File(outputWave);
			FileOutputStream fstream = new FileOutputStream(outputAudio);
			fstream.write(audioBuffer);
			fstream.flush();
			fstream.close();


			// specify the audio format
			AudioFormat audioFormat = new AudioFormat(
				AudioFormat.Encoding.PCM_SIGNED,
				24000,
				16,
				1,
				1 * 2,
				24000,
				false);
예제 #30
0
def track_cells(folder_w, filename, imp, correction):
    #imp = IJ.openImage(os.path.join(folder,filename))
    #imp.show()

    #get image dimensions, set ROI remove part of flouresncent ring
    x_size = ImagePlus.getDimensions(imp)[0]
    y_size = ImagePlus.getDimensions(imp)[1]
    x_start = 0
    y_start = 0
    #calculate alternative ROI
    if crop_ring:
        x_start = 170 / 2
        y_start = 170 / 2
        x_size = x_size - 170
        y_size = y_size - 170
    print(
        str(x_start) + ", " + str(y_start) + ", " + str(x_size) + ", " +
        str(y_size))
    imp.setRoi(OvalRoi(x_start, y_start, x_size, y_size))
    #imp_dup = imp.duplicate()
    #imp_dup.show()
    #red_corrected_img.show()

    IJ.run(imp, "Make Inverse", "")
    IJ.setForegroundColor(0, 0, 0)
    IJ.run(imp, "Fill", "stack")
    imp.killRoi()

    #imp.show()
    #sys.exit()

    #img_filename = filename+"_corrected_red_stack.tif"
    #folder_filename= os.path.join(well_folder,img_filename)
    #IJ.save(imp, folder_filename)

    #----------------------------
    # Create the model object now
    #----------------------------

    # Some of the parameters we configure below need to have
    # a reference to the model at creation. So we create an
    # empty model now.

    model = Model()

    # Send all messages to ImageJ log window.
    model.setLogger(Logger.IJ_LOGGER)

    #------------------------
    # Prepare settings object
    #------------------------

    settings = Settings()
    settings.setFrom(imp)

    # Configure detector - We use the Strings for the keys
    settings.detectorFactory = LogDetectorFactory()
    settings.detectorSettings = {
        'DO_SUBPIXEL_LOCALIZATION': SUBPIXEL_LOCALIZATION,
        'RADIUS': RADIUS,
        'TARGET_CHANNEL': TARGET_CHANNEL,
        'THRESHOLD': THRESHOLD,
        'DO_MEDIAN_FILTERING': MEDIAN_FILTERING,
    }

    # Configure spot filters - Classical filter on quality
    settings.initialSpotFilterValue = SPOT_FILTER
    settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory())
    settings.addSpotAnalyzerFactory(SpotContrastAndSNRAnalyzerFactory())
    settings.addSpotAnalyzerFactory(SpotMorphologyAnalyzerFactory())
    settings.addSpotAnalyzerFactory(SpotRadiusEstimatorFactory())

    filter1 = FeatureFilter('QUALITY', QUALITY, True)
    filter2 = FeatureFilter('CONTRAST', CONTRAST, True)
    filter2a = FeatureFilter('ESTIMATED_DIAMETER', MAX_ESTIMATED_DIAMETER,
                             False)
    filter2b = FeatureFilter('MEDIAN_INTENSITY', MAX_MEDIAN_INTENSITY, False)

    settings.addSpotFilter(filter1)
    settings.addSpotFilter(filter2)
    settings.addSpotFilter(filter2a)
    settings.addSpotFilter(filter2b)
    print(settings.spotFilters)

    # Configure tracker - We want to allow merges and fusions
    settings.trackerFactory = SparseLAPTrackerFactory()
    settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap(
    )  # almost good enough

    ##adapted from https://forum.image.sc/t/trackmate-scripting-automatically-exporting-spots-in-tracks-links-in-tracks-tracks-statistics-and-branching-analysis-to-csv/6256
    #linking settings
    settings.trackerSettings['LINKING_MAX_DISTANCE'] = LINKING_MAX_DISTANCE
    if LINKING_FEATURE_PENALTIES == True:
        settings.trackerSettings['LINKING_FEATURE_PENALTIES'] = {
            LINKING_FEATURE_PENALTIES_TYPE: LINKING_FEATURE_PENALTIES_VALUE
        }
    else:
        settings.trackerSettings['LINKING_FEATURE_PENALTIES'] = {}

    #gap closing settings
    settings.trackerSettings['ALLOW_GAP_CLOSING'] = ALLOW_GAP_CLOSING
    if ALLOW_GAP_CLOSING == True:
        settings.trackerSettings[
            'GAP_CLOSING_MAX_DISTANCE'] = GAP_CLOSING_MAX_DISTANCE
        settings.trackerSettings['MAX_FRAME_GAP'] = MAX_FRAME_GAP
        if GAP_CLOSING_FEATURE_PENALTIES == True:
            settings.trackerSettings['GAP_CLOSING_FEATURE_PENALTIES'] = {
                GAP_CLOSING_FEATURE_PENALTIES_TYPE:
                GAP_CLOSING_FEATURE_PENALTIES_VALUE
            }
        else:
            settings.trackerSettings['GAP_CLOSING_FEATURE_PENALTIES'] = {}

    #splitting settings
    settings.trackerSettings['ALLOW_TRACK_SPLITTING'] = ALLOW_TRACK_SPLITTING
    if ALLOW_TRACK_SPLITTING == True:
        settings.trackerSettings[
            'SPLITTING_MAX_DISTANCE'] = SPLITTING_MAX_DISTANCE
        if SPLITTING_FEATURE_PENALTIES == True:
            settings.trackerSettings['SPLITTING_FEATURE_PENALTIES'] = {
                SPLITTING_FEATURE_PENALTIES_TYPE:
                SPLITTING_FEATURE_PENALTIES_VALUE
            }
        else:
            settings.trackerSettings['SPLITTING_FEATURE_PENALTIES'] = {}

    #merging settings
    settings.trackerSettings['ALLOW_TRACK_MERGING'] = ALLOW_TRACK_MERGING
    if ALLOW_TRACK_MERGING == True:
        settings.trackerSettings['MERGING_MAX_DISTANCE'] = MERGING_MAX_DISTANCE
        if MERGING_FEATURE_PENALTIES == True:
            settings.trackerSettings['MERGING_FEATURE_PENALTIES'] = {
                MERGING_FEATURE_PENALTIES_TYPE: MERGING_FEATURE_PENALTIES_VALUE
            }
        else:
            settings.trackerSettings['MERGING_FEATURE_PENALTIES'] = {}

    print(settings.trackerSettings)

    # Configure track analyzers - Later on we want to filter out tracks
    # based on their displacement, so we need to state that we want
    # track displacement to be calculated. By default, out of the GUI,
    # not features are calculated.

    # The displacement feature is provided by the TrackDurationAnalyzer.

    settings.addTrackAnalyzer(TrackDurationAnalyzer())
    settings.addTrackAnalyzer(TrackSpotQualityFeatureAnalyzer())

    # Configure track filters - We want to get rid of the two immobile spots at
    # the bottom right of the image. Track displacement must be above 10 pixels.

    filter3 = FeatureFilter('TRACK_DISPLACEMENT', TRACK_DISPLACEMENT, True)
    filter4 = FeatureFilter('TRACK_START', TRACK_START, False)
    #filter5 = FeatureFilter('TRACK_STOP', float(imp.getStack().getSize())-1.1, True)

    settings.addTrackFilter(filter3)
    settings.addTrackFilter(filter4)
    #settings.addTrackFilter(filter5)

    #-------------------
    # Instantiate plugin
    #-------------------

    trackmate = TrackMate(model, settings)

    #--------
    # Process
    #--------

    ok = trackmate.checkInput()
    if not ok:
        sys.exit(str(trackmate.getErrorMessage()))

    ok = trackmate.process()

    #	if not ok:
    #sys.exit(str(trackmate.getErrorMessage()))

    #----------------
    # Display results
    #----------------

    #Set output folder and filename and create output folder
    well_folder = os.path.join(folder_w, filename)
    output_folder = os.path.join(well_folder, "Tracking")
    create_folder(output_folder)
    xml_file_name = filename + "_" + correction + "_trackmate_analysis.xml"
    folder_filename_xml = os.path.join(output_folder, xml_file_name)

    #ExportTracksToXML.export(model, settings, File(folder_filename_xml))
    outfile = TmXmlWriter(File(folder_filename_xml))
    outfile.appendSettings(settings)
    outfile.appendModel(model)
    outfile.writeToFile()

    # Echo results with the logger we set at start:
    #model.getLogger().log(str(model))

    #create araray of timepoint length with filled 0
    cell_counts = zerolistmaker(imp.getStack().getSize())
    if ok:
        for id in model.getTrackModel().trackIDs(True):
            # Fetch the track feature from the feature model.
            track = model.getTrackModel().trackSpots(id)
            for spot in track:
                # Fetch spot features directly from spot.
                t = spot.getFeature('FRAME')
                print(t)
                cell_counts[int(t)] = cell_counts[int(t)] + 1
    else:
        print("No spots detected!")

    if HEADLESS == False:
        selectionModel = SelectionModel(model)
        displayer = HyperStackDisplayer(model, selectionModel, imp)
        displayer.render()
        displayer.refresh()
    del imp
    return (cell_counts + [len(model.getTrackModel().trackIDs(True))])