Exemplo n.º 1
0
    def test_jarray(self):
        from java.lang import String

        self.assertEqual(sum(jarray.array(range(5), "i")), 10)
        self.assertEqual(",".join(jarray.array([String("a"), String("b"), String("c")], String)), u"a,b,c")
        self.assertEqual(sum(jarray.zeros(5, "i")), 0)
        self.assertEqual([x for x in jarray.zeros(5, String)], [None, None, None, None, None])
Exemplo n.º 2
0
 def test_jarray(self): # until it is fully formally removed
     # While jarray is still being phased out, just flex the initializers.
     # The rest of the test for array will catch all the big problems.
     import jarray
     jarray.array(range(5), 'i')
     jarray.array([String("a"), String("b"), String("c")], String)
     jarray.zeros(5, 'i')
     jarray.zeros(5, String)
Exemplo n.º 3
0
def listGroup(list, group):
	threads = zeros(group.activeCount(), Thread)
	group.enumerate(threads, 0)
	groups = zeros(group.activeGroupCount(), ThreadGroup)
	group.enumerate(groups, 0)
	for t in threads:
		if None is not t: list.append(t.getName())
	for g in groups:
		if None is not g: listGroup(list, g)
Exemplo n.º 4
0
def gCalc(ndo,beta,g0=None):
    """ Calculates antecedent outflow from a stream of ndo
        Arguments:
          ndo: a regular time series. Must be 15MIN, 1HOUR. Thus, NDO has been interpolated first.
          g0:  initial condition. If g0 is not given it is equal to ndo at the first time step.
          beta: g-model parameter.
        Output:
          g:  a regular time series, same sampling rate as input
              with the same start time as ndo
    """

    ti = ndo.getTimeInterval()
    if not ((ti.toString() == "15MIN") | (ti.toString() == "1HOUR")):
        raise "NDO time step must be 15MIN or 1HOUR."
    dt=1
    nstep=ndo.size()
    g=zeros(nstep,'d')
    
    if ti.toString() == "15MIN":
        dt=0.25
    g=zeros(nstep,'d')
    g=map(lambda x: -901.0, g)
    beta=beta*24*365
    div2dt=2*beta/dt
    dsi=ndo.getIterator()
    q0 = dsi.getElement().getY()
    # Set initial condition
    if g0==None:
        g[0]= q0        # ???
    else:
        g[0]=g0

    # Loop through and integrate gmodel using trapazoidal.
    atend=0
    i=1
    dsi.advance()

    while atend == 0:
        el=dsi.getElement()
        if el and Constants.DEFAULT_FLAG_FILTER.isAcceptable(el):
            q=el.getY()
            qterm=(q-div2dt)
            g[i]= 0.5*( qterm + sqrt(qterm*qterm + 4*g[i-1]*(q0-g[i-1]+div2dt)) )
            if dsi.atEnd():
                atend=1
            else:
                dsi.advance()
                q0=q
                i=i+1
        else:
            atend=1


    rts = RegularTimeSeries("/gcalc//////",ndo.getStartTime().toString(),
			    ndo.getTimeInterval().toString(),g)
    return rts
Exemplo n.º 5
0
Arquivo: mig.py Projeto: amunozNFX/lss
def getSourcesAndReceivers():
    src = zeros(ns, Source)  # source
    rco = zeros(ns, Receiver)  # observed data
    xr = rampint(0, 1, 102)
    zr = zeroint(102)
    for isou in range(ns):
        xs = isou * ds
        src[isou] = Source.RickerSource(xs, 0, dt, fpeak)
        rco[isou] = Receiver(xr, zr, nt)
    return src, rco
 def createMBeanInfo( self ):
     
     import jarray
     mmbai = jarray.zeros( 2, jmanage.modelmbean.ModelMBeanAttributeInfo )
     mmbai[ 0 ] = jmanage.modelmbean.ModelMBeanAttributeInfo( "Value", "java.lang.String", "what is the value", 1, 1, 0 )
     mmbai[ 1 ] = jmanage.modelmbean.ModelMBeanAttributeInfo( "Name", "java.lang.String", "what is the name", 1,1, 0 )
     
     mmboi = jarray.zeros( 0, jmanage.modelmbean.ModelMBeanOperationInfo )
     mmbci = jarray.zeros( 0, jmanage.modelmbean.ModelMBeanConstructorInfo )
     mmis = jmanage.modelmbean.ModelMBeanInfoSupport( str( self.__class__ ), 'support', mmbai, mmbci, mmboi, None )
     return mmis
Exemplo n.º 7
0
def toreg(irts,tw=None,ti='1hour'):
    """
    toreg(irts,tw=None,ti='1hour'):
    Converts irregular time series to regular time series with
    a time window and time interval.
    """
    ti = vutils.timeinterval(ti)
    if isinstance(irts,vutils.DataReference):
	irts = irts.getData() # initialize time window accurately
    if tw == None:
	tw = irts.getTimeWindow()
    else:
	tw = vutils.timewindow(tw)
    st = vutils.time(tw.getStartTime().ceiling(ti))
    et = vutils.time(tw.getEndTime().floor(ti))
    #print 'Start time: ',st
    #print 'End time: ',et
    nvals = st.getNumberOfIntervalsTo(et,ti)+1
    path = irts.getName()
    parts = string.split(path,'/')
    if len(parts) == 8: parts[6] = parts[6]+'-REG'
    name = string.join(parts,'/')
    #print name
    yvals = jarray.zeros(nvals,'d')
    flags = jarray.zeros(nvals,'i')
    index=0
    iterator = irts.getIterator()
    last_val = vutils.Constants.MISSING_VALUE
    last_flag = 0
    # get first time value in irregular time series
    next_time = vutils.time(long(iterator.getElement().getX()))
    # get starting time of regular time series
    time_val = vutils.time(st)
    # loop to fill values
    while index < nvals:
	#print index,time_val
	#print next_time,last_val,last_flag
	# if time value of rts is >= irts then update values
	if not iterator.atEnd() and time_val.compare(next_time) >= 0:
	    last_val = iterator.getElement().getY()
	    last_flag = iterator.getElement().getFlag()
	    # advance by one & update next time value
	    iterator.advance()
	    if not iterator.atEnd():
		next_time = vutils.time(long(iterator.getElement().getX()))
	yvals[index] = last_val
	flags[index] = last_flag
	time_val.incrementBy(ti)
	index=index+1
    attr = irts.getAttributes().createClone()
    attr.setType(vutils.DataType.REGULAR_TIME_SERIES)
    rts = vutils.RegularTimeSeries(name,str(st),str(ti),yvals,flags,attr)
    return rts
Exemplo n.º 8
0
 def read(self, size=None):
     if size is not None:
         buffer = jarray.zeros(size, 'c')
         r = self._input.read(buffer, 0, size)
         if r == -1: return None
         else: return unicode(java.lang.String(buffer, 0, r))
     else:
         buffer = jarray.zeros(64, 'c')
         ret = java.lang.StringBuilder()
         while True:
             r = self._input.read(buffer, 0, size)
             if r == -1: return unicode(ret.toString())
             else: ret.append(buffer, 0, r)
Exemplo n.º 9
0
def _test4():
    from ANN import fnet_cccec
    sac = vutils.RegularTimeSeries('inp1','31jan1990 2400', '1mon', [10000.0, 11000.0, 12000.0, 15000.0])
    sac = sac*5.40394e-06+0.178546
    sjr = vutils.RegularTimeSeries('inp1','31jan1990 2400', '1mon', [1000.0, 1100.0, 1200.0, 1500.0])
    sjr = sjr*1.34396e-05+0.199247
    exp = vutils.RegularTimeSeries('inp1','31jan1990 2400', '1mon', [5000.0, 6000.0, 7000.0, 8000.0])
    exp = exp*-4.86697e-05+0.178537
    dxc = vutils.RegularTimeSeries('inp1','31jan1990 2400', '1mon', [0.0, 0.0, 0.0, 0.0])
    dxc = (1.-dxc/31)*0.6+0.2
    inps = [dxc,exp,sac,sjr]
    inps = map(lambda x: vutils.interpolate(x,'1day'), inps)
    inputs = []
    for i in range(len(inps)):
	print 'Building inputs for ',inps[i]
	inputs=inputs+buildinput(inps[i],7,10,7) # weekly averages
    print 'Built inputs'
    #outputs = []
    #ccc_ref = vutils.findpath(g2,'//ccc/ec///%s/'%fpart)[0]
    #outputs.append(vutils.interpolate(DataReference.create(ccc_ref,tw).getData(),'1day'))
    mi = MultiIterator(inputs)
    import jarray
    ninps = len(inputs)
    input = jarray.zeros(ninps,'f')
    output = jarray.zeros(1,'f')
    ann = fnet_cccec()
    ndata = len(inputs[0])
    for input_no in range(31+28+31+1):
	mi.advance()
    outdata = jarray.zeros(ndata,'f')
    fh = open('junk.out','w',15000)
    while input_no < ndata:
	el = mi.getElement()
	i=0
	while i < ninps:
	    input[i] = el.getY(i)
	    i=i+1
	ann.engine(input,output,0)
	fh.write('Input #: %d\n'%input_no)
	i=0
	while i < ninps:
	    fh.write('%13.6f'%input[i])
	    if i%5 == 4: fh.write('\n')
	    i = i+1
	fh.write('\nOutput #: %d\n'%input_no)
	fh.write('%13.6f\n'%output[0])
	outdata[input_no] = output[0]
	mi.advance()
	input_no=input_no+1
    fh.close()
Exemplo n.º 10
0
def _test3():
    from ANN import fnet_cccec
    fpart = 'dxc-op'
    g1 = vutils.opendss('anninputs.dss')
    g2 = vutils.opendss('annoutputs.dss')
    g1.filterBy(fpart)
    g2.filterBy(fpart)
    refs = g1[:]
    tw = vutils.timewindow('01oct1975 0000 - 01sep1991 0000')
    #tw = vutils.timewindow('01oct1975 0000 - 01oct1976 0000')
    inps = []
    for i in range(len(refs)):
	inps.append(vutils.interpolate(DataReference.create(refs[i],tw).getData(),'1day'))
    inputs = []
    for i in range(len(inps)):
	print 'Building inputs for ',inps[i]
	inputs=inputs+buildinput(inps[i],7,10,7) # weekly averages
    print 'Built inputs'
    outputs = []
    ccc_ref = vutils.findpath(g2,'//ccc/ec///%s/'%fpart)[0]
    outputs.append(vutils.interpolate(DataReference.create(ccc_ref,tw).getData(),'1day'))
    mi = MultiIterator(inputs)
    import jarray
    ninps = len(inputs)
    input = jarray.zeros(ninps,'f')
    output = jarray.zeros(1,'f')
    ann = fnet_cccec()
    ndata = len(inputs[0])
    for input_no in range(365):
	mi.advance()
    outdata = jarray.zeros(ndata,'f')
    while input_no < ndata:
	el = mi.getElement()
	i=0
	while i < ninps:
	    input[i] = el.getY(i)
	    i=i+1
	ann.engine(input,output,0)
	outdata[input_no] = output[0]
	mi.advance()
	input_no=input_no+1
    #
    stime = inputs[0].getStartTime()
    ti = inputs[0].getTimeInterval()
    rtsout = vutils.RegularTimeSeries('/ann/ccc_out/ec///annutils/',str(stime),\
				      str(ti),outdata)
    vutils.plot(rtsout,outputs[0])
    rtsout = (rtsout-0.140516)/0.000396563
    vutils.writedss('annout.dss','/ann/ccc_out/ec///annutils/',rtsout)
Exemplo n.º 11
0
def extractres(file='calibrate.res',stime='01jan1975 0000',time_intvl='1day'):
    """
    extractres(file='calibrate.res',stime='01jan1975 0000',time_intvl='1day'):

    Used to extract from result file a target and a simulated time series which
    starts at the start time and of the given time interval.

    The return value is a tuple of the target and simulated regular time series

    e.g.
    target,simulated = extractres('calibrate.res','01jan1975 0000','1day')
    """
    fh = open(file)
    nvals=0
    # get number of values... & loop till first pattern start
    while 1:
	line = fh.readline()
	if string.find(line,'#') >=0 :
	    fh.readline()
	    break
	if string.find(line,'No. of patterns') >= 0 :
	    line = string.strip(line)
	    nvals=int(string.split(line,':')[1])
    # make array of nvals
    if nvals == 0: raise "No data found --> No. of patterns = %d"%nvals
    print 'nvals = %d'%nvals
    target = jarray.zeros(nvals,'d')
    simulated = jarray.zeros(nvals,'d')
    # loop till end of file
    index=0
    last1 = ''
    last2 = ''
    display_intvl = 500
    while 1:
	line = fh.readline()
	if line == None or line == '': break
	if string.find(line,'#') >= 0:
	    if index%display_intvl == 0: print 'Done: %d of %d'%(index,nvals)
	    target[index]=float(last2)
	    simulated[index]=float(last1)
	    index=index+1
	else:
	    last2=last1
	    last1=line
    fh.close()
    rts_target = vutils.RegularTimeSeries('target',stime,time_intvl,target)
    rts_simulated = vutils.RegularTimeSeries('simulated',stime,time_intvl,simulated)
    return rts_target, rts_simulated
Exemplo n.º 12
0
        def write(self, f, encoding=None, method="xml", pretty_print=False, xml_declaration=None, with_tail=True, standalone=None, compression=0, exclusive=False, with_comments=True, inclusive_ns_prefixes=None):
            if encoding is not None or method != "xml" or pretty_print is not False or xml_declaration is not None or with_tail is not True or standalone is not None or exclusive is not False or with_comments is not True or inclusive_ns_prefixes is not None:
                raise NotImplementedError

            if compression == 0 and isinstance(f, (basestring, file, File)):
                # direct
                source = DOMSource(self._document.getDocumentElement())
                result = StreamResult(f)
                identityTransformation.transform(source, result)
            else:
                # first to a BAOS
                f2 = ByteArrayOutputStream()
                source = DOMSource(self._document.getDocumentElement())
                result = StreamResult(f2)
                identityTransformation.transform(source, result)

                if compression > 0:
                    bytes = f2.toByteArray()
                    deflater = Deflater(compression)
                    deflater.setInput(bytes)
                    deflater.finish()
                    output = jarray.zeros(2 * len(bytes), "b")
                    length = deflater.deflate(output)
                    output = output[:length]
                else:
                    output = f2.toByteArray()

                if isinstance(f, basestring):
                    open(f, "wb").write(output.tostring())
                else:
                    f.write(output.tostring())
Exemplo n.º 13
0
def pn_data_get_uuid(data):
  u = data.getUUID()
  ba = zeros(16, 'b')
  bb = ByteBuffer.wrap(ba)
  bb.putLong(u.getMostSignificantBits())
  bb.putLong(u.getLeastSignificantBits())
  return ba.tostring()
Exemplo n.º 14
0
 def stringToJavaByteArray(s):
     bytes = jarray.zeros(len(s), 'b')
     for count, c in enumerate(s):
         x = ord(c)
         if x >= 128: x -= 256
         bytes[count] = x
     return bytes
def extend_flow(nodes_to_extend):
    """ Copying WY1922 data to WY1921 for allowing to preprocessing and running DSM2
        from 01Jan1921.
    """
    calsimfile=getAttr("CALSIMFILE") 
    f=opendss(calsimfile)           # open CALSIM file
    outfile=getAttr("BOUNDARYFILE")
    if not outfile or outfile == "":
        raise "Config variable BOUNDARYFILE not set and needed for prepro output"    
    tw=timewindow("01OCT1921 0000 - 01OCT1922 0000")

    for calsimname in nodes_to_extend:    
        print calsimname
        dsspath = calsim_path(calsimname)
        paths = findpath(f,dsspath)
        if not paths or len(paths)>1:
            print "File: %s" % calsimfile
            raise "Path %s not found or not unique" % dsspath
        ref=DataReference.create(paths[0],tw)
        monthly=ref.getData()

        itr = monthly.getIterator()
        d=zeros(len(monthly),'d')
        count=0
        while not itr.atEnd():
           el = itr.getElement()
           d[count] = el.getY()
           count = count + 1
           itr.advance()
        stime = "01OCT1920 0000" 
        rts = RegularTimeSeries(monthly.getName(),stime, \
              timeinterval("1MON").toString(), d, None, monthly.getAttributes())
        writedss(calsimfile,ref.getPathname().toString(),rts)
Exemplo n.º 16
0
 def getRGBData(self):
     "Return byte array of RGB data as string"
     if self._data is None:
         if sys.platform[0:4] == 'java':
             import jarray
             from java.awt.image import PixelGrabber
             width, height = self.getSize()
             buffer = jarray.zeros(width*height, 'i')
             pg = PixelGrabber(self._image, 0,0,width,height,buffer,0,width)
             pg.grabPixels()
             # there must be a way to do this with a cast not a byte-level loop,
             # I just haven't found it yet...
             pixels = []
             a = pixels.append
             for i in range(len(buffer)):
                 rgb = buffer[i]
                 a(chr((rgb>>16)&0xff))
                 a(chr((rgb>>8)&0xff))
                 a(chr(rgb&0xff))
             self._data = ''.join(pixels)
             self.mode = 'RGB'
         else:
             im = self._image
             mode = self.mode = im.mode
             if mode not in ('L','RGB','CMYK'):
                 im = im.convert('RGB')
                 self.mode = 'RGB'
             self._data = im.tostring()
     return self._data
Exemplo n.º 17
0
def get_rom():
    """
    Returns the ROM in bytes.
    """
    rom_array = jarray.zeros(Gb.ROM_SIZE, "i")
    Gb.getROM(rom_array)
    return RomList(rom_array)
 def recurse_dir(self, root, outdir):
    #Cache DirectoryNode and directory name
    dircache = {'object': False, 'directory': False}
    
    for obj in root:   
       fname = obj.getShortDescription()
       
       if type(obj) is DirectoryNode:
          tmpoutdir = outdir + '/' + fname
          os.makedirs(tmpoutdir)
          if dircache['object'] is False:
             dircache['object'] = obj
             dircache['directory'] = tmpoutdir
          else:
             sys.stderr.write("Check container in 7-Zip, likely more dirs at a root DirectoryNode than expected.")
       else:         
          #replace strange ole2 characters we can't save in filesystem, todo: check spec
          #this seems to be the convention in 7-Zip, and it seems to work...
          fname = fname.replace(self.replacechar1, '[1]').replace(self.replacechar5, '[5]')
          
          f = open(outdir + "/" + fname, "wb")
          size = obj.getSize()
          stream = DocumentInputStream(obj); 
          bytes = zeros(size, 'b')
          n_read = stream.read(bytes)
          data = bytes.tostring()         
          f.write(data)
          f.close()
    
    #only recurse if we have an object to recurse into after processing DocumentNodes
    if dircache['object'] != False:
       self.recurse_dir(dircache['object'], dircache['directory'])
Exemplo n.º 19
0
 def init(self, frequency=22050, size=-16, channels=2, buffer=4096):
     """
     Mixer initialization.
     Argument sampled frequency, bit size, channels, and buffer.
     Currently implements PCM 16-bit audio.
     Plays WAV, AIFF, and AU sampled audio.
     To specify the BigEndian format of AIFF and AU, use -16L for size.
     The mixing is done by Mixer.class, compiled with 'javac Mixer.java'.
     When a JAR is created, include with 'jar uvf Pyj2d_App.jar pyj2d/Mixer.class'.
     """
     if not self._initialized:
         encoding = {True:AudioFormat.Encoding.PCM_SIGNED, False:AudioFormat.Encoding.PCM_UNSIGNED}[size<0]
         channels = {True:1, False:2}[channels<=1]
         framesize = int((abs(size)/8) * channels)
         isBigEndian = isinstance(size,long)
         self._audio_format = AudioFormat(encoding, int(frequency), int(abs(size)), channels, framesize, int(frequency), isBigEndian)
         self._bufferSize = buffer
         try:
             self._mixer = AudioMixer(self._audio_format, self._bufferSize)
         except TypeError:
             self._mixer = None
             return None
         if not self._mixer.isInitialized():
             return None
         self._bufferSize = self._mixer.getBufferSize()
         self._byteArray = jarray.zeros(self._bufferSize, 'b')
         self._initialized = True
         self._thread = Thread(self)
         self._thread.start()
     return None
Exemplo n.º 20
0
def get_ram():
    """
    Returns the RAM in bytes.
    """
    ram_array = jarray.zeros(Gb.RAM_SIZE, "i")
    Gb.getRAM(ram_array)
    return RomList(ram_array)
Exemplo n.º 21
0
    def copy_zip_input_stream(self, zip_input_stream, parent=None):
        """Given a `zip_input_stream`, copy all entries to the output jar"""

        chunk = jarray.zeros(8192, "b")
        while True:
            entry = zip_input_stream.getNextEntry()
            if entry is None:
                break
            try:
                # NB: cannot simply use old entry because we need
                # to recompute compressed size
                if parent:
                    name = "/".join([parent, entry.name])
                else:
                    name = entry.name
                output_entry = JarEntry(name)
                output_entry.time = entry.time
                self.jar.putNextEntry(output_entry)
                while True:
                    read = zip_input_stream.read(chunk, 0, 8192)
                    if read == -1:
                        break
                    self.jar.write(chunk, 0, read)
                self.jar.closeEntry()
            except ZipException, e:
                if not "duplicate entry" in str(e):
                    log.error("Problem in copying entry %r", output_entry, exc_info=True)
                    raise
def MakeMultiChannelPhantom (ops, size):

	if len(size)>3:
		numChannels=size[3]
	else:
		numChannels=1

	image=ops.run("create", size,  FloatType())
	ax=[Axes.X, Axes.Y, Axes.Z, Axes.CHANNEL]
	imgPlus=ImgPlus(image, "phantom", ax)
	
	location=zeros(3,'i')
	location[0]=40;
	location[1]=size[1]/2;
	location[2]=size[2]/2;
	
	#ops.run("addsphere",  image, location, radius, 1.0)
	#ops.run("addassymetricspherel",  image, location, 1.0, radius1, radius2)

 	shapes=Add3DShapes(ops, size)

 	def AddShapes(hyperSlice):
 		#shapes.addRandomPointsInROI(hyperSlice, 100.0, 20)
		shapes.addCenterSphere(hyperSlice, 5.0, 20)

	if (numChannels>1):
		for d in range(0,numChannels):
			hyperSlice= Views.hyperSlice(image, 3, d)
			AddShapes(hyperSlice)
			location[0]+=10
	else:
		AddShapes(image)

	return imgPlus
Exemplo n.º 23
0
    def copy_zip_input_stream(self, zip_input_stream, parent=None):
        """Given a `zip_input_stream`, copy all entries to the output jar"""

        chunk = jarray.zeros(8192, "b")
        while True:
            entry = zip_input_stream.getNextEntry()
            if entry is None:
                break
            try:
                # NB: cannot simply use old entry because we need
                # to recompute compressed size
                if parent:
                    name = "/".join([parent, entry.name])
                else:
                    name = entry.name
                if name.startswith("META-INF/") and name.endswith(".SF"):
                    # Skip signature files - by their nature, they do
                    # not work when their source jars are copied
                    log.debug("Skipping META-INF signature file %s", name)
                    continue
                output_entry = JarEntry(name)
                output_entry.time = entry.time
                self.jar.putNextEntry(output_entry)
                while True:
                    read = zip_input_stream.read(chunk, 0, 8192)
                    if read == -1:
                        break
                    self.jar.write(chunk, 0, read)
                self.jar.closeEntry()
            except ZipException, e:
                if not "duplicate entry" in str(e):
                    log.error("Problem in copying entry %r", output_entry, exc_info=True)
                    raise
Exemplo n.º 24
0
def get_registers():
    """
    Returns a list of current register values.
    """
    register_array = jarray.zeros(Gb.NUM_REGISTERS, "i")
    Gb.getRegisters(register_array)
    return list(register_array)
Exemplo n.º 25
0
 def recv(self, n):
     assert self.sock
     data = jarray.zeros(n, 'b')
     try:
         m = self.istream.read(data)
     except java.io.InterruptedIOException , jiiie:
         raise timeout('timed out')
Exemplo n.º 26
0
 def getRGBData(self):
     "Return byte array of RGB data as string"
     if self._data is None:
         self._dataA = None
         if sys.platform[0:4] == 'java':
             import jarray  # TODO: Move to top.
             from java.awt.image import PixelGrabber
             width, height = self.getSize()
             buffer = jarray.zeros(width * height, 'i')
             pg = PixelGrabber(self._image, 0, 0, width, height, buffer, 0, width)
             pg.grabPixels()
             # there must be a way to do this with a cast not a byte-level loop,
             # I just haven't found it yet...
             pixels = []
             a = pixels.append
             for rgb in buffer:
                 a(chr((rgb >> 16) & 0xff))
                 a(chr((rgb >> 8) & 0xff))
                 a(chr(rgb & 0xff))
             self._data = ''.join(pixels)
             self.mode = 'RGB'
         else:
             im = self._image
             mode = self.mode = im.mode
             if mode == 'RGBA':
                 im.load()
                 self._dataA = PmlImageReader(im.split()[3])
                 im = im.convert('RGB')
                 self.mode = 'RGB'
             elif mode not in ('L', 'RGB', 'CMYK'):
                 im = im.convert('RGB')
                 self.mode = 'RGB'
             self._data = im.tostring()
     return self._data
Exemplo n.º 27
0
def pn_transport_peek(trans, size):
  size = min(trans.impl.pending(), size)
  ba = zeros(size, 'b')
  if size:
    bb = trans.impl.head()
    bb.get(ba)
  return 0, ba.tostring()
    def run():
        parser = get_args_parser()
        try:
            parse_result = parser.parse_args()

            topic_name = parse_result.topic
            num_records = parse_result.num_records
            record_size = parse_result.record_size
            producer_props = parse_result.producer_config

            props = {}
            for prop in producer_props:
                k, v = prop.split('=')
                props[k] = v

            props[ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG] = 'org.apache.kafka.common.serialization.ByteArraySerializer'
            props[ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG] = 'org.apache.kafka.common.serialization.ByteArraySerializer'
            producer = KafkaProducer(props)

            payload = jarray.zeros(100, "b")
            record = ProducerRecord(topic_name, payload)
            stats = Stats(num_records, 5000)

            for i in xrange(num_records):
                send_start_ms = get_time_millis()
                cb = stats.next_completion(send_start_ms, record_size, stats)
                producer.send(record, cb)

            producer.close()
            stats.print_total()
        except Exception as e:
            exc_info = sys.exc_info()
            traceback.print_exception(*exc_info)
            sys.exit(1)
Exemplo n.º 29
0
def urandom(n):
    global urandom_source
    if urandom_source is None:
        urandom_source = SecureRandom()
    buffer = jarray.zeros(n, 'b')
    urandom_source.nextBytes(buffer)
    return buffer.tostring()
Exemplo n.º 30
0
 def _read(self):
     data = ""
     if self._stdout.available():
         buf = jarray.zeros(self._stdout.available(), "b")
         self._stdout.read(buf)
         data += "".join([chr(b) for b in buf])
     return data
Exemplo n.º 31
0
    def getRGBData(self):
        "Return byte array of RGB data as string"
        if self._data is None:
            self._dataA = None
            if sys.platform[0:4] == 'java':
                import jarray  # TODO: Move to top.
                from java.awt.image import PixelGrabber

                width, height = self.getSize()
                buffer = jarray.zeros(width * height, 'i')
                pg = PixelGrabber(self._image, 0, 0, width, height, buffer, 0,
                                  width)
                pg.grabPixels()
                # there must be a way to do this with a cast not a byte-level loop,
                # I just haven't found it yet...
                pixels = []
                a = pixels.append
                for rgb in buffer:
                    a(chr((rgb >> 16) & 0xff))
                    a(chr((rgb >> 8) & 0xff))
                    a(chr(rgb & 0xff))
                self._data = ''.join(pixels)
                self.mode = 'RGB'
            else:
                im = self._image
                mode = self.mode = im.mode
                if mode == 'RGBA':
                    im.load()
                    self._dataA = PmlImageReader(im.split()[3])
                    im = im.convert('RGB')
                    self.mode = 'RGB'
                elif mode not in ('L', 'RGB', 'CMYK'):
                    im = im.convert('RGB')
                    self.mode = 'RGB'
                self._data = im.tobytes()
        return self._data
Exemplo n.º 32
0
def seq_better_applicable(val, cand_cl, cl, parm_ar, j):
    """
       >>> import Dbg,jarray; a=jarray.zeros(1,lang.Object)
       >>> seq_better_applicable([1,2],atyp("I[]"),None,a,0)
       1
       >>> seq_better_applicable([1,2],atyp("I[]"),lang.Object,a,0)
       1
       >>> seq_better_applicable([1,2],atyp("I[]"),None,None,0)
       1
       >>> seq_better_applicable([1,2],atyp("I[]"),atyp("Z[]"),None,0)
       1
       >>> seq_better_applicable([1,2],atyp("I[]"),atyp("Z[]"),a,0)
       1
       >>> Dbg.types(a) == jarray.array([atyp("I[]")],lang.Class)
       1
       >>> seq_better_applicable([1,2],atyp("I[]"),atyp("J[]"),None,0)
       -1
       >>> seq_better_applicable([1,2],atyp("I[]"),atyp("J[]"),a,0)
       -1
       >>> seq_better_applicable([1,2],atyp("I[]"),core.PyList,a,0)
       -1
       >>> seq_better_applicable([1,2],core.PyList,atyp("J[]"),a,0)
       1
       >>> Dbg.types(a) == jarray.array([core.PyList],lang.Class)
       1
       >>> seq_better_applicable([['a','b']],atyp("C[][]"),None,a,0)
       1
       >>> Dbg.types(a) == jarray.array([atyp("C[][]")],lang.Class)
       1
       >>> seq_better_applicable([['a','b'],[]],atyp("C[][]"),None,a,0)
       1
       >>> seq_better_applicable(['ab','bc'],atyp("C[][]"),atyp("java.lang.Character[]"),a,0)
       1
       >>> seq_better_applicable(['a','b'],atyp("java.lang.Character[]"),atyp("C[][]"),a,0)
       -1
       >>> seq_better_applicable(['a','b'],atyp("java.lang.Character[]"),atyp("org.python.core.PyObject[]"),a,0)
       1
       >>> seq_better_applicable(['a','bc'],atyp("java.lang.Character[]"),None,None,0)
       -1
       >>> seq_better_applicable([['a','b'],[]],atyp("C[][]"),atyp("org.python.core.PyList[]"),a,0)
       -1
       >>> seq_better_applicable([['a','b'],[]],atyp("org.python.core.PyList[]"),atyp("C[][]"),a,0)
       1
       >>> seq_better_applicable([['a','b'],[]],atyp("C[][]"),atyp("java.lang.Object[]"),a,0)
       1
       >>> seq_better_applicable([['a','b'],[]],atyp("java.lang.Object[]"),atyp("org.python.core.PyList[]"),None,0)
       -1
       >>> seq_better_applicable([['a','b'],"ab"],atyp("C[][]"),atyp("org.python.core.PyObject[]"),a,0)
       >>> # ambiguous
       >>> seq_better_applicable([[],[]],atyp("C[][][]"),None,a,0)
       1
       >>> Dbg.types(a) == jarray.array([atyp("C[][][]")],lang.Class)
       1
       >>> seq_better_applicable([[],[['a','b'],"ab"]],atyp("C[][][]"),atyp("org.python.core.PyObject[][]"),a,0) 
       >>> # ambiguous
       >>> seq_better_applicable([],lang.Object,atyp("java.lang.Object[]"),None,0)
       -1
       >>> seq_better_applicable([],lang.Integer,atyp("java.lang.Object[]"),None,0)
       -1
       >>> seq_better_applicable([],core.PyList,lang.Object,a,0)
       1
       >>> seq_better_applicable([],core.PyList,core.PyObject,a,0)
       1
       >>> seq_better_applicable([],core.PyObject,None,None,0)
       1
       >>> seq_better_applicable([],lang.Integer,core.PyList,a,0)
       -1
    """

    if cand_cl is cl:
        return 0
    if cl and is_ass_from(cand_cl, cl):  # cl < cand_cl
        return -1

    # order: ... PyObject arrays ... interfaces ... Object

    if is_array(cand_cl):
        cl_comp_type = None
        if not cl:  # ignore cl
            pass
        elif is_array(cl):  # cl,cand_cl in <arrays>
            cl_comp_type = comp_type(cl)
        else:
            if is_ass_from(core.PyObject,
                           cl):  # cl in <...PyObject>, cand_cl in <arrays>
                return -1
            # cand_cl in <arrays>, cl in <interfaces ... Object>
        cand_comp_type = comp_type(cand_cl)
    else:
        if cl and is_array(cl):  # cl in <arrays>
            if is_ass_from(core.PyObject, cand_cl) and is_ass_from(
                    cand_cl, type(val)):  # cand_cl applicable in <...PyObject>
                if parm_ar: parm_ar[j] = val  # no conv
                return 1
            else:
                return -1
        else:
            if (not cl or is_ass_from(cl, cand_cl)) and is_ass_from(
                    cand_cl, type(val)):  # type(v) < cand_cl < cl
                if parm_ar: parm_ar[j] = val  # no conv
                return 1
            else:
                return -1

    prim = 0
    if parm_ar:
        import jarray
        dest = jarray.zeros(len(val), cand_comp_type)
        if is_prim(cand_comp_type):
            data = jarray.zeros(1, lang.Object)
            prim = 1
        else:
            data = dest
    else:
        dest = None
        data = None

    n = len(val)
    lt = 0
    gt = 0

    for k in range(n):
        cmp = arg_better_applicable(val[k], cand_comp_type, cl_comp_type, data,
                                    (not prim and k or 0))
        if cmp == 1:
            lt += 1
            if prim:
                dest[k] = data[0]  # ??? tricky: use Array.set
        else:
            if cmp == -1:
                # inefficency to favor all other situations,
                # otherwise the better_applicable
                # interface needs to be even
                # more complex and distinguish between
                # non-applicable and > cases
                # when cl is not None
                if arg_better_applicable(val[k], cand_comp_type, None, None,
                                         -1) == -1:
                    return -1
                gt += 1
            data = None
            prim = 0

    if lt == n:
        if parm_ar:
            parm_ar[j] = dest
        return 1

    if gt == n:
        return -1

    return None
Exemplo n.º 33
0
Arquivo: sim.py Projeto: cygmris/enos
def javaByteArray(a):
    b = jarray.zeros(len(a), 'b')
    for i in range(len(a)):
        b[i] = struct.unpack('b', struct.pack('B', a[i]))[0]
    return b
Exemplo n.º 34
0
Arquivo: util.py Projeto: supkk/WDR
def generateUuid(length):
    rnd = java.security.SecureRandom()
    bytes = jarray.zeros(length, 'b')
    rnd.nextBytes(bytes)
    bigInt = java.math.BigInteger(bytes)
    return string.upper(bigInt.toString(16))
Exemplo n.º 35
0
def linear(ref,myfilter=Constants.DEFAULT_FLAG_FILTER):
    '''
    Linearly interpolate missing data in a time series
    Eli Ateljevich 9/27/99

    '''
    got_ref = 0
    if isinstance(ref, DataReference):
        data = ref.getData()
        got_ref = 1
    else:
        data = ref
        got_ref = 0
    # check for regular time series
    if not isinstance(data,RegularTimeSeries):
        print ref, " is not a regular time-series data set"
        return None
    
    yt = data.getIterator()
    st = data.getStartTime()
    et = data.getEndTime()
    ti = data.getTimeInterval()
    dsi = data.getIterator()
    n = st.getExactNumberOfIntervalsTo(et,ti) + 1

    from jarray import zeros
    vals = zeros(n,'d')
    vals=map(lambda x: -901.0, vals)
    i=0
    if myfilter.isAcceptable(dsi.getElement()):
        firstmissing=0
    else:
        firstmissing=1

    while not dsi.atEnd():
         el=dsi.getElement()
         if el:
             if myfilter.isAcceptable(el):
                 vals[i]=el.getY()
                 lasty=vals[i]
                 lasti=i
             else:
                 while not dsi.atEnd():
                     el=dsi.getElement()
                     if myfilter.isAcceptable(el):
                         nexty=el.getY()
                         nexti=i
                         if not firstmissing:    # no interpolation at begin or end of record
                             for ii in range(lasti,nexti):
                                 vals[ii]=lasty + (ii-lasti)*(nexty-lasty)/(nexti-lasti)
                         vals[nexti]=nexty       # this one gets filled even at beginning of record.
                         firstmissing=0
                         break
                     else:
                         i=i+1
                         dsi.advance()
                        
                 #
         else:
             if not firstmissing: break
             #
         #
         dsi.advance()
         i=i+1
    #

    rts = RegularTimeSeries(data.getName(),
			    data.getStartTime().toString(),
			    data.getTimeInterval().toString(),vals)
 
    return rts
Exemplo n.º 36
0
 def read(self):
     if self._output_available():
         read_bytes = jarray.zeros(self._output_available(), 'b')
         self._stdout.read(read_bytes)
         return ''.join(chr(b & 0xFF) for b in read_bytes)
     return ''
Exemplo n.º 37
0
    def eventGrabber(self, ev, vs):
        self.ev = ev

        mainNode = self.mainNodeOfEvent(vs, ev, self.fenPDF)

        # if there has been some kind of change!
        if self.drewMainNode != mainNode:
            self.fenPDF.structure.canvas2d.removeNode(self.node)
            if self.isCanvas(mainNode, self.fenPDF):
                canvas = mainNode.getPlane()
                self.fenPDF.structure.canvas2d.placeOnCanvas(
                    canvas, self.node, self.x, self.y)
            p('regenerate')
            self.fenPDF.animation.regenerateVS()
        else:
            if self.isCanvas(mainNode, self.fenPDF):
                # set coordinates if inside of some foci
                xy = mainNode.getXYHit(vs, ev.getX(), ev.getY())
                self.fenPDF.structure.canvas2d.setCoordinates(
                    self.node, xy[0], xy[1])
                mainNode.chgFast(vs, -1)
            else:
                vs.coords.setTranslateParams(vs.matcher.getCS('NODE_CS'),
                                             ev.getX(), ev.getY())
            self.fenPDF.animation.reuseVS = 1
            self.fenPDF.animation.animate = 0

        # if there are a change by offset!
        if self.isCanvas(mainNode, self.fenPDF):
            node = self.getNodeOnPlane(vs, ev, self.node)

            if node != None:
                #p('there\'s a node')
                viewFunction = self.fenPDF.views.getMultiplexerNodeContentFunction(
                )
                placeable = viewFunction.f(self.fenPDF.fen.graph, node)
                if isinstance(placeable, vob.lava.placeable.TextPlaceable):
                    #p('textplaceable')
                    cs = self.getNodeCSbyNode(mainNode, self.fenPDF, vs, node)
                    ptsIn = jarray.array([ev.getX(), ev.getY(), 0], 'f')
                    ptsOut = jarray.zeros(3, 'f')
                    vs.coords.inverseTransformPoints3(cs, ptsIn, ptsOut)
                    offset = placeable.getCursorPos(ptsOut[0], ptsOut[1])
                    if self.tipOffset != offset:
                        p('regenerate')
                        self.fenPDF.animation.regenerateVS()
                    self.tipOffset = offset

                    self.fenPDF.structure.ff.setContent(self.tipNode, \
                         self.fenPDF.structure.ff.getContent(node))
                    self.fenPDF.structure.ff.insert(self.tipNode, offset, \
                         self.fenPDF.structure.ff.getContent(self.node))
                else:
                    self.hideTipNode(vs)
            else:
                self.hideTipNode(vs)

        if ev.getType() != ev.MOUSE_DRAGGED:
            # XXX others also ?
            self.fenPDF.structure.canvas2d.removeNode(self.node)
            if not self.isCanvas(mainNode, self.fenPDF):
                self.fenPDF.structure.ff.insert(
                    self.originalNode, self.originalOffset,
                    self.fenPDF.structure.ff.getContent(self.node))
            else:
                node = self.getNodeOnPlane(vs, ev, self.node)
                viewFunction = self.fenPDF.views.getMultiplexerNodeContentFunction(
                )
                placeable = viewFunction.f(self.fenPDF.fen.graph, node)

                # check if there are text under the mouse cursor
                if node != None and isinstance(
                        placeable, vob.lava.placeable.TextPlaceable):
                    self.fenPDF.structure.canvas2d.removeNode(self.node)
                    cs = self.getNodeCSbyNode(mainNode, self.fenPDF, vs, node)
                    ptsIn = jarray.array([ev.getX(), ev.getY(), 0], 'f')
                    ptsOut = jarray.zeros(3, 'f')
                    vs.coords.inverseTransformPoints3(cs, ptsIn, ptsOut)
                    offset = placeable.getCursorPos(ptsOut[0], ptsOut[1])
                    self.fenPDF.structure.ff.insert(node, offset, \
                        self.fenPDF.structure.ff.getContent(self.node))

                # there's no text so just drop the text on canvas
                else:
                    xy = mainNode.getXYHit(vs, ev.getX(), ev.getY())
                    self.fenPDF.structure.canvas2d.placeOnCanvas(
                        mainNode.getPlane(), self.node, xy[0], xy[1])

            # temporarily solution, flush drags away...
            self.fenPDF.events.mouse.mainMouse.flush()
            self.fenPDF.events.eventHandler.eventGrabber = None
            self.fenPDF.animation.regenerateVS()

        vob.AbstractUpdateManager.chg()
Exemplo n.º 38
0
def gCalcFlatQ(ndo, beta, g0, out="inst"):
    """ Calculates antecedent outflow from ndo based on the flat ndo 
        assumption in the g documentation. In this case, the integration of g is exact
        rather than numeric, but the approximation to ndo is a series of flat lines. In the
        case of daily data this is probably acceptable. In the case of monthly data it leads
        to large errors, though it is common
          Arguments:
           ndo: a regular time series. Must be 1DAY, 1MONTH
           g0:  initial condition. If g0 is not given it is equal to ndo at the first time step.
           beta: g-model parameter.
           out: must be "inst" to calculate instantaneous values of g or "ave" to calculate averages over
               the time step. 
          Output:
           g:  a regular time series, same sampling rate as input
               with the same start time as ndo, ending at the end of ndo or the first piece of bad
                 data in ndo.

    """

    if ndo.getTimeInterval().toString(
    ) != "1DAY" | ndo.getTimeInterval().toString() != "1MONTH":
        raise "Time step for input must be 1DAY or 1MONTH"

    dsi = ndo.getIterator()
    nstep = ndo.size()
    g = zeros(nstep, 'd')
    g = map(lambda x: -901.0, g)
    bdt = beta / dt
    if g0 == None:
        g[0] = q[0]
    else:
        g[0] = g0

    atend = 0
    i = 1
    if out[:4] == "inst":
        while atend == 0:
            el = dsi.getElement()
            if Constants.DEFAULT_FLAG_FILTER.isAcceptable(el):
                q = el.getY()
                g[i] = q / (1 + (q / g[i - 1] - 1) * exp(-q / bdt))
                i = i + 1
                if not dsi.atEnd():
                    dsi.advance()
                else:
                    atend = 1
            else:
                atend = 1

    elif out[:3] == "ave":
        while atend == 0:
            el = dsi.getElement()
            if Constants.DEFAULT_FLAG_FILTER.isAcceptable(el):
                q = el.getY()
                g[i] = q / (1 + (q / g[i - 1] - 1) * exp(-q / bdt))
                i = i + 1
                if not dsi.atEnd():
                    dsi.advance()
                else:
                    atend = 1
            else:
                atend = 1
    else:
        raise "Argument out must be either \"inst\" or \"ave\")"
    rts = RegularTimeSeries("gcalc",
                            ndo.getStartTime().toString(),
                            ndo.getTimeInterval(), g)
    if out[:4] == "inst":
        raise "dummy exception"
    if out[:3] == "ave":
        rts.getAttributes().setXType("PER-VAL")
    return rts
    def process(self, file):

        skCase = Case.getCurrentCase().getSleuthkitCase()

        artID_ns_rgh_id = skCase.getArtifactTypeID("TSK_ART_NS_RGH")
        artID_ns_rgh = skCase.getArtifactType("TSK_ART_NS_RGH")

        attID_ns_rgh_gid = skCase.getAttributeType("TSK_ATT_NS_RGH_GAME")
        attID_ns_rgh_ts = skCase.getAttributeType("TSK_ATT_NS_RGS_TS")
        attID_ns_rgh_e = skCase.getAttributeType("TSK_ATT_NS_RGS_E")

        # Skip non-files
        if ((file.getType() == TskData.TSK_DB_FILES_TYPE_ENUM.UNALLOC_BLOCKS)
                or
            (file.getType() == TskData.TSK_DB_FILES_TYPE_ENUM.UNUSED_BLOCKS)
                or (file.isFile() == False)):
            return IngestModule.ProcessResult.OK

        if (file.getParentPath().upper()
                == "/SAVE/") and (file.getName().upper()
                                  == "80000000000000A2"):

            self.log(Level.INFO, "Found game history")
            self.filesFound += 1

            buf = zeros(file.getSize(), 'b')
            file.read(buf, 0, file.getSize())

            entries = re.findall("sys_info.*sequence", buf)
            for entry in entries:
                app_id = binascii.hexlify(
                    re.search("app_id.{2}(?P<app>.{8}).*?type",
                              entry).group('app')).upper()
                if app_id in self.gids:
                    game = self.gids[app_id]

                event = re.search("digital.event.(?P<event>.*?).sequence",
                                  entry)
                if not event:
                    event_group = "N/A"
                else:
                    event_group = event.group('event')

                timestamp = re.search("nc_recorded_at.(?P<ts>.*?).nsa_id",
                                      entry).group('ts')

                art = file.newArtifact(artID_ns_rgh_id)

                art.addAttribute(
                    BlackboardAttribute(
                        BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME.
                        getTypeID(), GameHistoryIngestModuleFactory.moduleName,
                        "Nintendo Switch - Game Save"))
                art.addAttribute(
                    BlackboardAttribute(
                        attID_ns_rgh_gid,
                        GameHistoryIngestModuleFactory.moduleName, game))
                art.addAttribute(
                    BlackboardAttribute(
                        attID_ns_rgh_ts,
                        GameHistoryIngestModuleFactory.moduleName, timestamp))
                art.addAttribute(
                    BlackboardAttribute(
                        attID_ns_rgh_e,
                        GameHistoryIngestModuleFactory.moduleName,
                        event_group))

                # Fire an event to notify the UI and others that there is a new artifact
                IngestServices.getInstance().fireModuleDataEvent(
                    ModuleDataEvent(GameHistoryIngestModuleFactory.moduleName,
                                    artID_ns_rgh, None))

            return IngestModule.ProcessResult.OK
Exemplo n.º 40
0
def analyze_homogeneity(image_title):
    IJ.selectWindow(image_title)
    raw_imp = IJ.getImage()
    IJ.run(raw_imp, "Duplicate...", "title=Homogeneity duplicate")
    IJ.selectWindow('Homogeneity')
    hg_imp = IJ.getImage()

    # Get a 2D image
    if hg_imp.getNSlices() > 1:
        IJ.run(hg_imp, "Z Project...", "projection=[Average Intensity]")
        hg_imp.close()
        IJ.selectWindow('MAX_Homogeneity')
        hg_imp = IJ.getImage()
        hg_imp.setTitle('Homogeneity')

    # Blur and BG correct the image
    IJ.run(hg_imp, 'Gaussian Blur...', 'sigma=' + str(HOMOGENEITY_RADIUS) + ' stack')

    # Detect the spots
    IJ.setAutoThreshold(hg_imp, HOMOGENEITY_THRESHOLD + " dark")
    rm = RoiManager(True)
    table = ResultsTable()
    pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER,
                          ParticleAnalyzer.EXCLUDE_EDGE_PARTICLES,
                          Measurements.AREA, # measurements
                          table, # Output table
                          0, # MinSize
                          500, # MaxSize
                          0.0, # minCirc
                          1.0) # maxCirc
    pa.setHideOutputImage(True)
    pa.analyze(hg_imp)

    areas = table.getColumn(table.getHeadings().index('Area'))

    median_areas = compute_median(areas)
    st_dev_areas = compute_std_dev(areas, median_areas)
    thresholds_areas = (median_areas - (2 * st_dev_areas), median_areas + (2 * st_dev_areas))

    roi_measurements = {'integrated_density': [],
                        'max': [],
                        'area': []}
    IJ.setForegroundColor(0, 0, 0)
    for roi in rm.getRoisAsArray():
        hg_imp.setRoi(roi)
        if REMOVE_CROSS and hg_imp.getStatistics().AREA > thresholds_areas[1]:
            rm.runCommand('Fill')
        else:
            roi_measurements['integrated_density'].append(hg_imp.getStatistics().INTEGRATED_DENSITY)
            roi_measurements['max'].append(hg_imp.getStatistics().MIN_MAX)
            roi_measurements['integrated_densities'].append(hg_imp.getStatistics().AREA)

        rm.runCommand('Delete')

    measuremnts = {'mean_integrated_density': compute_mean(roi_measurements['integrated_density']),
                   'median_integrated_density': compute_median(roi_measurements['integrated_density']),
                   'std_dev_integrated_density': compute_std_dev(roi_measurements['integrated_density']),
                   'mean_max': compute_mean(roi_measurements['max']),
                   'median_max': compute_median(roi_measurements['max']),
                   'std_dev_max': compute_std_dev(roi_measurements['max']),
                   'mean_area': compute_mean(roi_measurements['max']),
                   'median_area': compute_median(roi_measurements['max']),
                   'std_dev_area': compute_std_dev(roi_measurements['max']),
                   }

    # generate homogeinity image
    # calculate interpoint distance in pixels
    nr_point_columns = int(sqrt(len(measuremnts['mean_max'])))
    # TODO: This is a rough estimation that does not take into account margins or rectangular FOVs
    inter_point_dist = hg_imp.getWidth() / nr_point_columns
    IJ.run(hg_imp, "Maximum...", "radius="+(inter_point_dist*1.22))
    # Normalize to 100
    IJ.run(hg_imp, "Divide...", "value=" + max(roi_measurements['max'] / 100))
    IJ.run(hg_imp, "Gaussian Blur...", "sigma=" + (inter_point_dist/2))
    hg_imp.getProcessor.setMinAndMax(0, 255)

    # Create a LUT based on a predefined threshold
    red = zeros(256, 'b')
    green = zeros(256, 'b')
    blue = zeros(256, 'b')
    acceptance_threshold = HOMOGENEITY_ACCEPTANCE_THRESHOLD * 256 / 100
    for i in range(256):
        red[i] = (i - acceptance_threshold)
        green[i] = (i)
    homogeneity_LUT = LUT(red, green, blue)
    hg_imp.setLut(homogeneity_LUT)

    return hg_imp, measuremnts
Exemplo n.º 41
0
    def process(self, dataSource, progressBar):

        # we don't know how much work there is yet
        progressBar.switchToIndeterminate()

        # Use blackboard class to index blackboard artifacts for keyword search
        blackboard = Case.getCurrentCase().getServices().getBlackboard()

        # For our example, we will use FileManager to get all
        # files with the word "test"
        # in the name and then count and read them
        # FileManager API: http://sleuthkit.org/autopsy/docs/api-docs/latest/classorg_1_1sleuthkit_1_1autopsy_1_1casemodule_1_1services_1_1_file_manager.html
        fileManager = Case.getCurrentCase().getServices().getFileManager()

        files = fileManager.findFiles(dataSource, "%wallet%")

        numFiles = len(files)
        self.log(Level.INFO, "found " + str(numFiles) + " files")
        progressBar.switchToDeterminate(numFiles)
        fileCount = 0

        for file in files:
            if self.context.isJobCancelled():
                return IngestModule.ProcessResult.OK

            self.log(Level.INFO, "Processing file: " + file.getName())

            fileCount += 1
            # Make an artifact on the blackboard.  TSK_INTERESTING_FILE_HIT is a generic type of
            # artfiact.  Refer to the developer docs for other examples.
            # TODO Make a more interesting artfiact
            art = file.newArtifact(
                BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT)
            att = BlackboardAttribute(
                BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME,
                CryptoWalletDataSourceIngestModuleFactory.moduleName,
                "Possible Wallet File")
            art.addAttribute(att)

            try:
                # index the artifact for keyword search
                blackboard.indexArtifact(art)
            except Blackboard.BlackboardException as e:
                self.log(Level.SEVERE,
                         "Error indexing artifact " + art.getDisplayName())

            # To further the example, this code will read the contents of the file and count the number of bytes

            inputStream = ReadContentInputStream(file)
            buffer = jarray.zeros(1024, "b")
            totLen = 0
            readLen = inputStream.read(buffer)
            while (readLen != -1):
                totLen = totLen + readLen
                readLen = inputStream.read(buffer)

            # Update the progress bar
            progressBar.progress(fileCount)

            #Post a message to the ingest messages in box.
            message = IngestMessage.createMessage(
                IngestMessage.MessageType.DATA, "Wallet Finder Module",
                "Found %d files" % fileCount)
            IngestServices.getInstance().postMessage(message)

        return IngestModule.ProcessResult.OK
Exemplo n.º 42
0
if len(args) != 2:
	parser.print_usage()
	sys.exit(1)

vtpFile = args[0]
outDir = args[1]

Utils.loadVTKLibraries()
reader = vtkXMLPolyDataReader()
reader.SetFileName(vtpFile)
reader.Update()
 
polydata = reader.GetOutput()

mesh = Mesh(MeshTraitsBuilder())
vertices = jarray.zeros(polydata.GetNumberOfPoints(), Vertex)
coord = jarray.zeros(3, "d")
for i in xrange(len(vertices)):
	polydata.GetPoint(i, coord)
	vertices[i] = mesh.createVertex(coord)

indices = Utils.getValues(polydata.GetPolys())
i = 0
while i < len(indices):
	if (indices[i] == 3):
		mesh.add(mesh.createTriangle(
			vertices[indices[i+1]],
			vertices[indices[i+2]],
			vertices[indices[i+3]]))
	i += indices[i] + 1
Exemplo n.º 43
0
    def generateSecretKey(self, keyLength):
        bytes = jarray.zeros(keyLength, "b")
        secureRandom = SecureRandom()
        secureRandom.nextBytes(bytes)

        return bytes
Exemplo n.º 44
0
def spline(ref,outint,offset=0):
    '''
    Usage example:  interpolate(ref,outint = timeinterval("15min"),offset = 48)

    Interpolating spline
    Eli Ateljevich 9/27/99

    This functions is designed to map a coarser time series into a smaller one
    covering the same time window. The spline is monotonicity-preserving
    and fourth order accurate (except near boundaries)
    
    offset shifts the output as appropriate. Typically, offset will be
    zero for inst-val input. For per-ave input, offset will often be
    half of the output frequency. In the example above, NDO 
    input is treated as  "daily averaged". Output is in units of
    15minutes. Since there are are 96 15min samples per 24 hours
    offset = 0.5*96 = 48.
    
    Output is a regular time series (rts).
    
    Reference: Huynh, HT "Accurate Monotone Cubic Interpolation",
    SIAM J. Numer. Analysis V30 No. 1 pp 57-100 
    All equation numbers refer to this paper. The variable names are 
    also almost the same. Double letters like "ee" to indicate 
    that the subscript should have "+1/2" added to it.
    '''
    got_ref = 0
    if isinstance(ref, DataReference):
        data = ref.getData()
        got_ref = 1
    else:
        data = ref
        got_ref = 0
    # check for regular time series
    if not isinstance(data,RegularTimeSeries):
        print ref, " is not a regular time-series data set"
        return None
    
    yt = data.getIterator()
    div =  TimeFactory.getInstance().createTimeInterval(outint)
    nsub = data.getTimeInterval()/div
    from jarray import zeros
    vals = zeros(1+nsub*(data.size()-1),'d')
    vals=map(lambda x: -901.0, vals)
    vallength = len(vals)
    dflags = zeros(vallength,'l')
    lastone = vallength + 4*nsub  -1
    firstone = 4*nsub
 
    y4,y3,y2,y1,y0,ss3,ss2,ss1,ss0,s1,s0 = 0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.
    dd1,dd0,d3,d2,d1,d1,d0,e1,e0,ee2,ee1,ee0,eem1,df0,df1 = 0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.
    count = 0
    subcount = 0
    nextbad = 5
    atstart=1
    atend =0
    while not yt.atEnd() or nextbad>1: 
	if not (yt.atEnd() or atend==1):
            el = yt.getElement()     #Read new value, good or bad
            count = count + 1
            if Constants.DEFAULT_FLAG_FILTER.isAcceptable(el):           
                y4 = el.getY();
             
                #
                ss3 = y4-y3
                d3 = ss3 - ss2
                ee2 = d3-d2
                atstart = 0
            else:
                if atstart == 0:
                    atend = 1
                    nextbad = nextbad - 1
                #NoMissing = "Internal missing values not allowed."
                #raise NoMissing
        else:
            nextbad = nextbad - 1;
        #
        # minmod-based estimates
        s2 = minmod(ss1,ss2)
        dd2 = minmod(d2,d3)
        e2 = minmod(ee1,ee2)

        #polynomial slopes
        dpp1adj1 = ss1 - dd1
        dpp0adj1 = ss0 + dd0

        t1 = minmod(dpp0adj1,dpp1adj1)
        dqq1adj1 = ss1 - minmod(d1+e1,d2-2*e2)   #Eq4.7a
        dqq0adj1 = ss0 + minmod(d0+2*e0,d1-e1)   #Eq4.7b
        ttilde1 = minmod(dqq0adj1,dqq1adj1)

        df1 = 0.5*(dqq0adj1 + dqq1adj1)          #First cut, Eq. 4.16
        tmin = min(0.,3*s1,1.5*t1,ttilde1)
        tmax = max(0.,3*s1,1.5*t1,ttilde1)

#	#If count == 3:         # have enough to make up boundary quantities
#	gamma = (ee1 - ee0)/4   #5.8, 
#        eex0 = ee0 - 4*gamma  #5.9 x is the boundary value
#        qqx = median3(ssm1,qqx,
        
        df1 = df1 + minmod(tmin-df1, tmax-df1)   #Revise,  Eq. 4.16
        
        for j in range(nsub):
            jfrac = (float(j)+offset)/ float(nsub)
            c0 = y0                  # from eq. 2 in the paper
            c1 = df0
            c2 = 3*ss0 - 2*df0 - df1
            c3 = df0 + df1 - 2*ss0
            if count > 4:
                if subcount <= lastone and subcount >= firstone:  
                    vals[subcount-4*nsub] = c0 + c1*jfrac +c2*jfrac**2 + c3*jfrac**3;
   
            subcount = subcount + 1

        # Now lag all data and estimates to make room for next time step
        y3,y2,y1,y0 = y4,y3,y2,y1
        ss2,ss1,ss0,ssm1 = ss3,ss2,ss1,ss0
        s1,s0 = s2,s1
        dd1,dd0 = dd2,dd1
        d2,d1,d0 = d3,d2,d1
        e1,e0 = e2,e1
        ee1,ee0,eem1 = ee2,ee1,ee0
        df0 = df1

        if not  yt.atEnd():
            yt.advance()
        #
    #
    #refpath=ref.getPathname()
    #refpath.setPart(Pathname.E_PART,outint)
    rts = RegularTimeSeries(data.getName(),
			    data.getStartTime().toString(),
			    outint,vals)
 
    return rts
Exemplo n.º 45
0
 def createByteArrayZeros(howMany):
     return jarray.zeros(
         howMany, 'h')  #use short instead of bytes, cause bytes are signed
Exemplo n.º 46
0
def ECEst(stage, ndo, so, sb, beta, npow1, npow2, g0, zrms, c):
    """ Estimate 15min EC at the boundary.
        Inputs:
          stage   astronomical tide estimate. Only 15min data are acceptable
          ndo     ndo estimate -- e.g., from CALSIM
    """
    import interpolate
    from vista.set import Units
    if not isinstance(stage,RegularTimeSeries) or \
           not isinstance(ndo,RegularTimeSeries):
        raise "stage and ndo must be RegularTimeSeries"

    if ndo.getTimeInterval().toString() == "1DAY":
        ndo = interpolate.spline(ndo, "15MIN", 0.5) << 95
    elif ndo.getTimeInterval().toString() != "15MIN":
        raise "ndo must be a one day or 15 minute series"

    if not stage.getTimeInterval().toString() == "15MIN":
        raise "stage must be an hourly or 15 minute series"
    #
    if ndo.getTimeInterval().toString() != stage.getTimeInterval().toString():
        raise "stage and ndo must have the same window"
    #
    #if not len(c) ==9:
    #    raise "Wrong number (%s) of coefficients in the array c" % len(c)

    if (first_missing(ndo)) >= 0:
        raise "missing data not allowed in ndo. First missing data at index: %s" % first_missing(
            ndo)
    if (first_missing(stage)) >= 0:
        raise "missing data not allowed in stage. First missing data at index: %s" % first_missing(
            stage)

    newstart = ndo.getStartTime() - "21HOURS"
    newend = ndo.getEndTime() - "3HOURS"
    if (stage.getStartTime().getTimeInMinutes() - newstart.getTimeInMinutes() >
            0):
        print "Stage record starts %s and NDO starts %s" % (
            stage.getStartTime().toString(), ndo.getStartTime().toString())
        raise "stage record must begin at least 21 hours before ndo"
    if (newend.getTimeInMinutes() - stage.getEndTime().getTimeInMinutes() > 0):
        raise "stage record must end no more than 3 hours before end of ndo"
    ztw = timewindow(newstart.toString() + ' - ' + newend.toString())
    z = stage.createSlice(ztw)
    g = gCalc(ndo, beta, g0)
    # writedss("planning_ec_input.dss","/CALC/RSAC054/G//15MIN/CALC",g) # for debug
    zarr = z.getYArray()
    giter = g.getIterator()
    ec = zeros(g.size(), 'd')
    ec = map(lambda x: -901.0, ec)

    zrmsiter = zrms.getIterator()

    i = 0
    while (not giter.atEnd()):
        gval = giter.getElement().getY()
        zrmsval = zrmsiter.getElement().getY()
        ecfrac = gval * c[0] + 1.1 * gval**npow1 * (
            c[1] * zarr[i + 72] + c[2] * zarr[i + 60] + c[3] * zarr[i + 48] +
            c[4] * zarr[i + 36] + c[5] * zarr[i + 24] + c[6] * zarr[i + 12] +
            c[7] * zarr[i])
        ec[i] = max(200, exp(ecfrac) * (so - sb) + sb)
        zrmsiter.advance()
        giter.advance()
        i = i + 1
    # ec needs to be set umhos/cm
    rts = RegularTimeSeries("/ECest//////",
                            g.getStartTime().toString(),
                            g.getTimeInterval().toString(), ec)
    rts.getAttributes().setYUnits(Units.UMHOS_CM)
    return [rts, gval]
Exemplo n.º 47
0
def createBytesMessage(session, size):
    bytes = zeros(size, 'b')
    random.nextBytes(bytes)
    message = session.createBytesMessage()
    message.writeBytes(bytes)
    return message
    def process(self, file):

        skCase = Case.getCurrentCase().getSleuthkitCase()

        artID_ns_mph = skCase.getArtifactType("TSK_ART_NS_MPH")
        artID_ns_mph_id = skCase.getArtifactTypeID("TSK_ART_NS_MPH")

        attID_ns_cd_mph_user = skCase.getAttributeType("TSK_ATT_MPH_USER")
        attID_ns_cd_mph_game = skCase.getAttributeType("TSK_ATT_MPH_GAME")
        # Not implemented.
        # attID_ns_cd_mph_ts = skCase.getAttributeType("TSK_ATT_MPH_TS")

        # Skip non-files
        if ((file.getType() == TskData.TSK_DB_FILES_TYPE_ENUM.UNALLOC_BLOCKS)
                or
            (file.getType() == TskData.TSK_DB_FILES_TYPE_ENUM.UNUSED_BLOCKS)
                or (file.isFile() is False)):
            return IngestModule.ProcessResult.OK

        if (file.getParentPath().upper()
                == "/SAVE/") and (file.getName().upper()
                                  == "0000000000000001"):

            self.log(Level.INFO, "Found MP user history save")
            self.filesFound += 1

            buf = zeros(file.getSize(), 'b')
            file.read(buf, 0, file.getSize())

            tmp_file_path = os.path.join(self.tmp_path, file.getName())

            with open(tmp_file_path, 'wb+') as tmp_file:
                tmp_file.write(buf)

            hac_cmd = [
                self.hac_path, "-t", "save", "--outdir", self.tmp_path,
                tmp_file_path
            ]
            subprocess.call(hac_cmd)

            mp_hist_file = os.path.join(self.tmp_path, "history.bin")

            users = []

            with open(mp_hist_file, "rb") as hist_file:
                while True:
                    chunk = binascii.hexlify(hist_file.read(256))
                    if not chunk:
                        break
                    user = {}
                    user["block_a"] = chunk[:48]
                    user["block_b"] = chunk[48:64]
                    user["block_c"] = chunk[64:80]
                    user["block_d"] = chunk[80:192]
                    user["block_e"] = chunk[192:224]
                    user["block_f"] = chunk[224:-1]
                    user["username"] = binascii.unhexlify(
                        user["block_d"]).split("\x00")[0]
                    user["game_id"] = "".join(
                        reversed([
                            user["block_b"][i:i + 2]
                            for i in range(0, len(user["block_b"]), 2)
                        ])).upper()
                    if user["game_id"] in self.gids:
                        user["game"] = self.gids[user["game_id"]]
                    users.append(user)

            # Don't add to blackboard if already exists - TODO improve when timestamp is implemented
            artifactList = file.getArtifacts(artID_ns_mph_id)
            seen_users = []
            for artifact in artifactList:
                seen_users.append(
                    artifact.getAttribute(
                        attID_ns_cd_mph_user).getValueString())
            for u in seen_users:
                self.log(
                    Level.INFO,
                    "Ingest MP User - Online multiplayer user found: %s" % u)

            for user in users:

                # Don't add to blackboard if already exists - TODO improve when timestamp is implemented
                if user["username"] in seen_users:
                    return IngestModule.ProcessResult.OK

                art = file.newArtifact(artID_ns_mph_id)

                art.addAttribute(
                    BlackboardAttribute(
                        BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME.
                        getTypeID(),
                        MpUserHistoryIngestModuleFactory.moduleName,
                        "Nintendo Switch - MP User History"))
                art.addAttribute(
                    BlackboardAttribute(
                        attID_ns_cd_mph_user,
                        MpUserHistoryIngestModuleFactory.moduleName,
                        user["username"]))
                if "game" in user:
                    art.addAttribute(
                        BlackboardAttribute(
                            attID_ns_cd_mph_game,
                            MpUserHistoryIngestModuleFactory.moduleName,
                            user["game"]))

                # Fire an event to notify the UI and others that there is a new artifact
                IngestServices.getInstance().fireModuleDataEvent(
                    ModuleDataEvent(
                        MpUserHistoryIngestModuleFactory.moduleName,
                        artID_ns_mph, None))

            return IngestModule.ProcessResult.OK
Exemplo n.º 49
0
        except NotEnoughDataPointsException, e:
            exception = e
        return model, modelFound, inliers, exception

    matrices = {}

    futures = {
        ti: exe.submit(Task(fit, RigidModel3D(), pointmatches))
        for ti, pointmatches in ti_pointmatches.iteritems()
    }
    for ti, f in futures.iteritems():
        model, modelFound, inliers, exception = f.get()
        if modelFound:
            print ti, "inliers:", len(inliers)
            # Write timepoint filepath and model affine matrix into csv file
            matrix = list(model.getMatrix(zeros(12, 'd')))
        else:
            print "Model not found for", ti
            if exception:
                print exception
            matrix = [
                1,
                0,
                0,
                0,  # identity
                0,
                1,
                0,
                0,
                0,
                0,
def createSecurityProviders(resourcesProperties, domainProperties):

    authenticationProviders = resourcesProperties.getProperty(
        'security.providers')

    if authenticationProviders is None or len(authenticationProviders) == 0:
        log.info('No security providers specified, skipping.')
    else:
        defaultRealm = cmo.getSecurityConfiguration().getDefaultRealm()

        initialAuthProviders = defaultRealm.getAuthenticationProviders()

        newAuthProvidersList = []

        authenticationProvidersList = authenticationProviders.split(",")
        for authProvider in authenticationProvidersList:

            name = resourcesProperties.getProperty('security.provider.' +
                                                   authProvider + '.name')
            type = resourcesProperties.getProperty('security.provider.' +
                                                   authProvider + '.type')
            controlFlag = resourcesProperties.getProperty(
                'security.provider.' + authProvider + '.controlFlag')
            userBaseDN = resourcesProperties.getProperty('security.provider.' +
                                                         authProvider +
                                                         '.userBaseDN')
            groupBaseDN = resourcesProperties.getProperty(
                'security.provider.' + authProvider + '.groupBaseDN')
            principal = resourcesProperties.getProperty('security.provider.' +
                                                        authProvider +
                                                        '.principal')
            host = resourcesProperties.getProperty('security.provider.' +
                                                   authProvider + '.host')
            credential = resourcesProperties.getProperty('security.provider.' +
                                                         authProvider +
                                                         '.credential')
            groupFromNameFilter = resourcesProperties.getProperty(
                'security.provider.' + authProvider + '.groupFromNameFilter')
            staticGroupDNs = resourcesProperties.getProperty(
                'security.provider.' + authProvider +
                '.staticGroupDNsfromMemberDNFilter')
            staticGroupObject = resourcesProperties.getProperty(
                'security.provider.' + authProvider +
                '.staticGroupObjectClass')
            staticMember = resourcesProperties.getProperty(
                'security.provider.' + authProvider +
                '.staticMemberDNAttribute')
            userFromNameFilter = resourcesProperties.getProperty(
                'security.provider.' + authProvider + '.userFromNameFilter')
            userNameAttribute = resourcesProperties.getProperty(
                'security.provider.' + authProvider + '.userNameAttribute')
            userObjectClass = resourcesProperties.getProperty(
                'security.provider.' + authProvider + '.userObjectClass')
            useTokenGroup = resourcesProperties.getProperty(
                'security.provider.' + authProvider +
                '.useTokenGroupsForGroupMembershipLookup')
            port = resourcesProperties.getProperty('security.provider.' +
                                                   authProvider + '.port')

            if not name is None:

                if type == ACTIVE_DIRECTORY_AUTHENTICATOR or type == NOVELL_AUTHENTICATOR:
                    if type == ACTIVE_DIRECTORY_AUTHENTICATOR:
                        auth = defaultRealm.createAuthenticationProvider(
                            name,
                            'weblogic.security.providers.authentication.ActiveDirectoryAuthenticator'
                        )
                        if not useTokenGroup is None and useTokenGroup.upper(
                        ) == 'TRUE':
                            adAuth.setUseTokenGroupsForGroupMembershipLookup(
                                TRUE)
                    elif type == NOVELL_AUTHENTICATOR:
                        auth = defaultRealm.createAuthenticationProvider(
                            name,
                            'weblogic.security.providers.authentication.NovellAuthenticator'
                        )

                    if not controlFlag is None:
                        auth.setControlFlag(controlFlag)
                    if not userBaseDN is None:
                        auth.setUserBaseDN(userBaseDN)
                    if not groupBaseDN is None:
                        auth.setGroupBaseDN(groupBaseDN)
                    if not principal is None:
                        auth.setPrincipal(principal)
                    if not host is None:
                        auth.setHost(host)
                    if not credential is None:
                        auth.setCredential(credential)
                    if not groupFromNameFilter is None:
                        auth.setGroupFromNameFilter(groupFromNameFilter)
                    if not staticGroupDNs is None:
                        auth.setStaticGroupDNsfromMemberDNFilter(
                            staticGroupDNs)
                    if not staticGroupObject is None:
                        auth.setStaticGroupObjectClass(staticGroupObject)
                    if not staticMember is None:
                        auth.setStaticMemberDNAttribute(staticMember)
                    if not userFromNameFilter is None:
                        auth.setUserFromNameFilter(userFromNameFilter)
                    if not userNameAttribute is None:
                        auth.setUserNameAttribute(userNameAttribute)
                    if not userObjectClass is None:
                        auth.setUserObjectClass(userObjectClass)
                    if not port is None:
                        auth.setPort(int(port))
                    newAuthProvidersList.append(auth)

        # Re-order the authentication providers so that the new ones are at the start

        initialAuthSize = initialAuthProviders.__len__()
        newAuthSize = newAuthProvidersList.__len__()
        authSize = initialAuthSize + newAuthSize

        authProviders = zeros(authSize, AuthenticationProviderMBean)

        i = 0
        for auth in newAuthProvidersList:
            authProviders[i] = auth
            i = i + 1

        for initialAuth in initialAuthProviders:
            authProviders[i] = initialAuth
            i = i + 1

        defaultRealm.setAuthenticationProviders(authProviders)
Exemplo n.º 51
0
def readAddr(addr):
    arr = jarray.zeros(4, "b")
    mem.getBytes(addr, arr)
    v = (((arr[0] & 0xFF) << 24) | ((arr[1] & 0xFF) << 16) |
         ((arr[2] & 0xFF) << 8) | (arr[3] & 0xFF))
    return intToAddr(v)
Exemplo n.º 52
0
#Invoke the Java static method:
lang.System.out.println("Hello Jython from Java");

#Create Java instance the same way as create Python object, without using the Java new keyword
myStr = lang.String("Zot");

#The Java method can be invoked in two way:
#1. bounded way
print myStr.startsWith("Z")

#2. Unbounded way
print lang.String.startsWith(myStr, "Z");
  
#jarrays
import jarray
x=jarray.zeros(200,"i");
print x;

import java.lang.Math
y=java.lang.Math.sqrt(256);
print y;

#=================GDA Track Number (Scan Number)=============================================
from gda.data import NumTracker

nt = NumTracker("tmp")

#get current scan number
nt.getCurrentFileNumber()

#set new scan number
Exemplo n.º 53
0
sigmaLarger = 30  # pixels: half the radius of an embryo
extremaType = DogDetection.ExtremaType.MAXIMA
minPeakValue = 10
normalizedMinPeakValue = False

# In the differece of gaussian peak detection, the img acts as the interval
# within which to look for peaks. The processing is done on the infinite imgE.
dog = DogDetection(imgE, img, calibration, sigmaSmaller, sigmaLarger,
                   extremaType, minPeakValue, normalizedMinPeakValue)

peaks = dog.getPeaks()

# Create a PointRoi from the DoG peaks, for visualization
roi = PointRoi(0, 0)
# A temporary array of integers, one per dimension the image has
p = zeros(img.numDimensions(), 'i')
# Load every peak as a point in the PointRoi
for peak in peaks:
    # Read peak coordinates into an array of integers
    peak.localize(p)
    roi.addPoint(imp, p[0], p[1])

imp.setRoi(roi)

# Now, iterate each peak, defining a small interval centered at each peak,
# and measure the sum of total pixel intensity,
# and display the results in an ImageJ ResultTable.
table = ResultsTable()

for peak in peaks:
    # Read peak coordinates into an array of integers
Exemplo n.º 54
0
def noconv_(val, parm_ar, j):
    import jarray
    py = jarray.zeros(1, core.PyObject)
    py[0] = val
    lang.System.arraycopy(py, 0, parm_ar, j, 1)
Exemplo n.º 55
0
def run():
    """ Loads an image stack which contains both reference and target images for the registration
		Scales the images to have their largest side equal to longSide
		Registration is performed:
			- translation (brute force optimization)
			- rotation (brute force optimization)
			- sift registration
			- bunwarpj registration
		Calculation of the errors by different methods """

    # load the input stack as an ImagePlus
    imp = IJ.openImage(filePath.getAbsolutePath())
    stack = imp.getStack()
    sizeZ = imp.getStackSize()

    LAND_MARKS = 0

    # Copy the reference and target slice
    refId = 1
    ref = stack.getProcessor(refId).duplicate()

    if (Scale == 1):
        [ref, s] = scaleLongSide(ref, longSide)

    sizeZ = min(sizeZ, maxSlice)
    stackReg = ImageStack(ref.getWidth(), ref.getHeight())
    stackReg.addSlice(ref)
    # = stack.duplicate()
    for i in range(2, sizeZ + 1):
        targetId = i
        target = stack.getProcessor(targetId).duplicate()

        # Scale the slices: scale the reference slice using the longSide parameter, and use the same scale for the target slice.
        if (Scale == 1):
            target = scale(target, s)
            #ImagePlus('Ref',ref).show()
            #ImagePlus('Target',target).show()

        if (Reg == 1):

            if (translationReg == 1):
                target = translation(ref, target)

            if (rotationReg == 1):
                [rot, target] = rotationSingle(ref, target, rotationStep)

            if (siftReg == 1):
                [roiRef, roiTarget] = siftSingle(ref, target)
                impTarget = ImagePlus('Target', target)
                impTarget.setRoi(roiTarget)
                #impTarget.show()
                impRef = ImagePlus('Ref', ref)
                impRef.setRoi(roiRef)
                #impRef.show()
                LAND_MARKS = 1

            if (bunwarpjReg == 1):
                target = bunwarpjSingle(impRef, impTarget, LAND_MARKS,
                                        'direct_transfo_' + str(i) + '.txt',
                                        'inverse_transfo_' + str(i) + '.txt')
                impTarget = ImagePlus('unwarpj_target', target)
                #impTarget.show()
                fileName = 'target_id' + str(targetId) + '.tif'
                IJ.saveAs(impTarget, "Tiff",
                          os.path.join(saveDir.getAbsolutePath(), fileName))

            #stackReg.setProcessor(target.convertToShortProcessor(), i)
            stackReg.addSlice(target)

    if (calculateError == 1):
        eCorrelation = zeros(sizeZ, 'f')
        eMSE = zeros(sizeZ, 'f')
        eMSE_ROI = zeros(sizeZ, 'f')
        eRMSE = zeros(sizeZ, 'f')
        eNRMSE = zeros(sizeZ, 'f')
        eCVRMSE = zeros(sizeZ, 'f')
        for i in range(1, sizeZ + 1):
            ip = stackReg.getProcessor(i).duplicate()
            #ImagePlus('test',ip).show()
            eCorrelation[i - 1], eMSE[i - 1], eMSE_ROI[i - 1], eRMSE[
                i - 1], eNRMSE[i - 1], eCVRMSE[i - 1] = measureError(ref, ip)
        errorFileName = 'error.txt'
        errorFilePath = os.path.join(saveDir.getAbsolutePath(), errorFileName)
        writeCSV(
            errorFilePath,
            [eCorrelation, eMSE, eMSE_ROI, eRMSE, eNRMSE, eCVRMSE],
            ["Correlation", "MSE", "MSE_ROI", "RMSE", "N_RMSE", "CV_RMSE"])
Exemplo n.º 56
0
# Check for "magic" number in header
start = mem.getMinAddress()
magic = mem.getShort(start)
if magic != 0x601a:
    raise Exception("Not a TOS program!")

# Data from PRG header
len_text = mem.getInt(start.add(0x2))
len_data = mem.getInt(start.add(0x6))
len_bss = mem.getInt(start.add(0xa))
len_sym = mem.getInt(start.add(0xe))
has_relo = (mem.getShort(start.add(0x1a)) == 0)

# Keep symbol table for later use
if len_sym > 0:
    sym_table = jarray.zeros(len_sym, "b")
    mem.getBytes(start.add(0x1c + len_text + len_data), sym_table)
    sym_table = bytearray(sym_table)  # to native Python type

if has_relo:
    # Relocate program
    prg = start.add(0x1c)
    ptr = start.add(0x1c + len_text + len_data +
                    len_sym)  # start of relocation table
    rea = mem.getInt(ptr)  # first address to relocate
    ptr = ptr.add(4)
    if rea != 0:
        # print("Relocating %x (%08x => %08x)" % (rea, mem.getInt(prg.add(rea)), mem.getInt(prg.add(rea))+reloc_addr))
        mem.setInt(prg.add(rea), mem.getInt(prg.add(rea)) + reloc_addr)
        while True:
            offs = mem.getByte(ptr)
Exemplo n.º 57
0
    # 0 corresponds to the first mass image (e.g. mass 12.0)
    # 1 corresponds to the second mass image (e.g. mass 13.0)
    ratioProps1 = RatioProps(1, 0)
    mp1 = MimsPlus(ui, ratioProps1)
    imageArray.add(mp1)
    IJ.log("Opening ratio: " + mp1.getTitle())

    # Ratio images
    # 2 corresponds to the first mass image (e.g. mass 26.0)
    # 3 corresponds to the second mass image (e.g. mass 27.0)
    ratioProps2 = RatioProps(3, 2)
    mp2 = MimsPlus(ui, ratioProps2)
    imageArray.add(mp2)
    IJ.log("Opening ratio: " + mp2.getTitle())

    images = jarray.zeros(imageArray.size(), MimsPlus)
    images = imageArray.toArray(images)

    #////////////////////////////
    # Create and display table.
    #////////////////////////////
    table = MimsJTable(ui)
    table.setStats(stats)
    table.setRois(rois)
    table.setImages(images)
    table.setPlanes(planes)
    #append=false
    nPlanes = ui.getOpener().getNImages()
    if nPlanes > 1:
        table.createTable(False)
    else:
    def process(self, dataSource, progressBar):

        # we don't know how much work there is yet
        progressBar.switchToIndeterminate()

        # Use FileManager to get .tbl files
        # Currently only checking for files in a parent folder whose namee contains "Telemetry"
        # to get the standard %USERPROFILE%/AppData/Locaaal/Microsoft/Ofice/16.0/Telemetry
        fileManager = Case.getCurrentCase().getServices().getFileManager()
        sln_tbl_files = fileManager.findFiles(dataSource, "sln.tbl")
        evt_tbl_files = fileManager.findFiles(dataSource, "evt.tbl")
        usr_tbl_files = fileManager.findFiles(dataSource, "user.tbl")

        # Build a dict correlating each AbstractFile object and its Autopsy object ID
        tbl_file_dict = {}
        if (len(sln_tbl_files) > 0 and len(evt_tbl_files) > 0
                and len(usr_tbl_files) > 0):
            for file in sln_tbl_files:
                tbl_file_dict[file.getId()] = file
            for file in evt_tbl_files:
                tbl_file_dict[file.getId()] = file
            for file in usr_tbl_files:
                tbl_file_dict[file.getId()] = file

        # Get total # of files for the progress bar
        numFiles = (len(sln_tbl_files) + len(evt_tbl_files) +
                    len(usr_tbl_files))
        self.log(Level.INFO,
                 "Found " + str(numFiles) + " Office telemetry files")
        progressBar.switchToDeterminate(numFiles)
        artifactCount = 0

        files_to_analyze = correlate_tbl_files(sln_tbl_files, evt_tbl_files,
                                               usr_tbl_files)

        for tbl_set in files_to_analyze:

            sln_object = tbl_file_dict[tbl_set[0]]
            evt_object = tbl_file_dict[tbl_set[1]]
            usr_object = tbl_file_dict[tbl_set[2]]

            # Read the contents of each file into Java zeros object
            sln_size = int(sln_object.getSize())
            sln_buffer = zeros(sln_size, 'b')
            sln_object.read(sln_buffer, 0, sln_size)

            evt_size = int(evt_object.getSize())
            evt_buffer = zeros(evt_size, 'b')
            evt_object.read(evt_buffer, 0, evt_size)

            usr_size = int(sln_object.getSize())
            usr_buffer = zeros(usr_size, 'b')
            usr_object.read(usr_buffer, 0, usr_size)

            # Ensure the .tbl files are valid Office telemetry files
            if validate_tbl_format(sln_buffer) != 'sln':
                continue
            if validate_tbl_format(evt_buffer) != 'evt':
                continue
            if validate_tbl_format(usr_buffer) != 'user':
                continue

            # If the tables have validated, parse them
            sln_table = slnTable(sln_buffer)
            sln_table.parse_entries()
            evt_table = evtTable(evt_buffer, evt_object.getId())
            evt_table.parse_entries()
            user_table = userTable(usr_buffer)
            user_table.parse_entries()

            # Set some local references for the user data that will be added to the output file
            user = user_table.entries[1]
            host = user_table.entries[3] + "." + user_table.entries[4]

            # docid offsets will be a dict formatted as:
            # docid : [[sln_table_offsets], [evt_table_offsets]]
            docid_offsets = build_entry_dict(sln_table, evt_table)

            # Check if the user pressed cancel while we were busy
            if self.context.isJobCancelled():
                return IngestModule.ProcessResult.OK

            # Create a 2 dimensional list to hold the final entries before writing to file.
            # Each entry will be appended to this list as a sub-list.
            results = []

            for docid in docid_offsets:

                # Get all the sln table values for this document
                # Assume the SLN table does not contain duplicate entries for this.
                doc_path = (
                    sln_table.entries[docid_offsets[docid][0][0]][3]
                ) + "\\" + (sln_table.entries[docid_offsets[docid][0][0]][2])
                doc_id = sln_table.entries[docid_offsets[docid][0][0]][1]
                doc_type = sln_table.entries[docid_offsets[docid][0][0]][0]
                doc_title = sln_table.entries[docid_offsets[docid][0][0]][4]
                doc_author = sln_table.entries[docid_offsets[docid][0][0]][5]
                addin_name = sln_table.entries[docid_offsets[docid][0][0]][6]
                desc = sln_table.entries[docid_offsets[docid][0][0]][7]

                # Get the evt table values for this document. There can be multiple entries per docid.
                for entry in range(len(docid_offsets[docid][1])):
                    if entry:
                        timestamp = evt_table.entries[docid_offsets[docid][1][
                            entry]][5].strftime('%Y-%m-%d %H:%M:%S.%f')
                        entry_num = evt_table.entries[docid_offsets[docid][1]
                                                      [entry]][0]
                        event_id = evt_table.entries[docid_offsets[docid][1]
                                                     [entry]][2]
                        event_desc = evt_table.entries[docid_offsets[docid][1]
                                                       [entry]][3]
                        objId = evt_table.entries[docid_offsets[docid][1]
                                                  [entry]][6]

                        results.append([
                            timestamp, entry_num, event_id, event_desc, doc_id,
                            doc_title, doc_path, doc_type, doc_author,
                            addin_name, desc, user, host, objId
                        ])
                        artifactCount += 1

            for result in results:

                # Get the Autopsy object for the evt.tbl that create the artifact
                sourcefile = tbl_file_dict[result[13]]

                # Make an artifact on the blackboard.
                artifact = sourcefile.newArtifact(
                    BlackboardArtifact.ARTIFACT_TYPE.TSK_RECENT_OBJECT)
                # Add Path attribute: MS Office document reported by telemetry
                artifact.addAttribute(
                    BlackboardAttribute(
                        BlackboardAttribute.ATTRIBUTE_TYPE.TSK_PATH.getTypeID(
                        ), MSOfficeTelemProcessFactory.moduleName, result[6]))
                # Add Datetime attribute: time of document open/closed reported by telemetry
                event_datetime = int(
                    (datetime.strptime(result[0], "%Y-%m-%d %H:%M:%S.%f") -
                     datetime(1970, 1, 1)).total_seconds())
                artifact.addAttribute(
                    BlackboardAttribute(
                        BlackboardAttribute.ATTRIBUTE_TYPE.TSK_DATETIME.
                        getTypeID(), MSOfficeTelemProcessFactory.moduleName,
                        event_datetime))
                # Add Description attribute based on event ID
                artifact.addAttribute(
                    BlackboardAttribute(
                        BlackboardAttribute.ATTRIBUTE_TYPE.TSK_COMMENT.
                        getTypeID(), MSOfficeTelemProcessFactory.moduleName,
                        result[3]))
                # Add Username attribute
                artifact.addAttribute(
                    BlackboardAttribute(
                        BlackboardAttribute.ATTRIBUTE_TYPE.TSK_USER_NAME.
                        getTypeID(), MSOfficeTelemProcessFactory.moduleName,
                        result[11]))
Exemplo n.º 59
0
# by using the 'transformed' float array.

# We compute the bounds by, for every corner, checking if the floor of each dimension
# of a corner coordinate is smaller than the previously found minimum value,
# and by checking if the ceil of each corner coordinate is larger than the
# previously found value, packing the new pair of minimum and maximum values
# into the list of pairs that is 'bounds'.

# Notice the min coordinates can have negative values, as the rotated image
# has pixels now somewhere to the left and up from the top-left 0,0,0 origin
# of coordinates. That's why we use Views.zeroMin, to ensure that downstream
# uses of the transformed image see it as fitting within bounds that start at 0,0,0.

bounds = repeat(
    (sys.maxint, 0)
)  # initial upper- and lower-bound values for min, max to compare against
transformed = zeros(img.numDimensions(), 'f')

for corner in product(*zip(repeat(0), Intervals.maxAsLongArray(img))):
    rotation.apply(corner, transformed)
    bounds = [(min(vmin, int(floor(v))), max(vmax, int(ceil(v))))
              for (vmin, vmax), v in zip(bounds, transformed)]

minC, maxC = map(list, zip(*bounds))  # transpose list of lists
imgRot2dFit = IL.wrap(Views.zeroMin(Views.interval(rotated, minC, maxC)),
                      imp.getTitle() + " - rot2dFit")
imgRot2dFit.show()

matrix = rotation.getRowPackedCopy()
pprint([list(matrix[i:i + 4]) for i in xrange(0, 12, 4)])
Exemplo n.º 60
0
 def generateNonce(keyLength):
     bytes = jarray.zeros(keyLength, "b")
     secureRandom = SecureRandom()
     secureRandom.nextBytes(bytes)
     return BaseEncoding.base64().omitPadding().encode(bytes)