Esempio n. 1
0
def sTOsAffine(infile, outfile, locale, globale):
    i = 1
    global LL
    global GG
    while i < len(sys.argv):
        arg = sys.argv[i]
        if infile is None:
            infile = arg
        elif outfile is None:
            outfile = arg
        elif locale is None:
            locale = arg
        elif globale is None:
            globale = arg


#		elif layer_name is None:
#			layer_name = arg
        else:
            Usage()
        i = i + 1
    if outfile is None:
        Usage()
    in_ds = ogr.Open(str(infile), update=0)
    if layer_name is not None:
        in_layer = in_ds.GetLayerByName(layer_name)
    else:
        in_layer = in_ds.GetLayer(0)
        in_defn = in_layer.GetLayerDefn()
    LL = io.read_array(str(locale))
    GG = io.read_array(str(globale))
    shp_driver = ogr.GetDriverByName('ESRI Shapefile')
    shp_driver.DeleteDataSource(str(outfile))
    shp_ds = shp_driver.CreateDataSource(str(outfile))
    shp_layer = shp_ds.CreateLayer(in_defn.GetName(),
                                   geom_type=in_defn.GetGeomType(),
                                   srs=in_layer.GetSpatialRef())
    in_field_count = in_defn.GetFieldCount()
    for fld_index in range(in_field_count):
        src_fd = in_defn.GetFieldDefn(fld_index)
        fd = ogr.FieldDefn(src_fd.GetName(), src_fd.GetType())
        fd.SetWidth(src_fd.GetWidth())
        fd.SetPrecision(src_fd.GetPrecision())
        shp_layer.CreateField(fd)
    in_feat = in_layer.GetNextFeature()
    while in_feat is not None:
        geom = in_feat.GetGeometryRef().Clone()
        geom = WalkAndTransformAff(geom)
        out_feat = ogr.Feature(feature_def=shp_layer.GetLayerDefn())
        out_feat.SetFrom(in_feat)
        out_feat.SetGeometryDirectly(geom)
        shp_layer.CreateFeature(out_feat)
        out_feat.Destroy()
        in_feat.Destroy()
        in_feat = in_layer.GetNextFeature()
    shp_ds.Destroy()
    in_ds.Destroy()
Esempio n. 2
0
def sTOsAffine(infile,outfile,locale,globale):
	i = 1
	global LL
	global GG
	while i < len(sys.argv):
		arg = sys.argv[i]
		if infile is None:
			infile = arg
		elif outfile is None:
			outfile = arg
		elif locale is None:
			locale = arg
		elif globale is None:
			globale = arg
#		elif layer_name is None:
#			layer_name = arg
		else:
			Usage()
		i = i + 1
	if outfile is None:
		Usage()
	in_ds = ogr.Open( str(infile), update = 0 )
	if layer_name is not None:
		in_layer = in_ds.GetLayerByName( layer_name )
	else:
		in_layer = in_ds.GetLayer( 0 )
		in_defn = in_layer.GetLayerDefn()
	LL = io.read_array(str(locale))
	GG = io.read_array(str(globale))
	shp_driver = ogr.GetDriverByName( 'ESRI Shapefile' )
	shp_driver.DeleteDataSource( str(outfile) )
	shp_ds = shp_driver.CreateDataSource( str(outfile) )
	shp_layer = shp_ds.CreateLayer( in_defn.GetName(),geom_type = in_defn.GetGeomType(),srs = in_layer.GetSpatialRef() )
	in_field_count = in_defn.GetFieldCount()
	for fld_index in range(in_field_count):
		src_fd = in_defn.GetFieldDefn( fld_index )
		fd = ogr.FieldDefn( src_fd.GetName(), src_fd.GetType() )
		fd.SetWidth( src_fd.GetWidth() )
		fd.SetPrecision( src_fd.GetPrecision() )
		shp_layer.CreateField( fd )
	in_feat = in_layer.GetNextFeature()
	while in_feat is not None:
		geom = in_feat.GetGeometryRef().Clone()
		geom = WalkAndTransformAff( geom )
		out_feat = ogr.Feature( feature_def = shp_layer.GetLayerDefn() )
		out_feat.SetFrom( in_feat )
		out_feat.SetGeometryDirectly( geom )
		shp_layer.CreateFeature( out_feat )
		out_feat.Destroy()
		in_feat.Destroy()
		in_feat = in_layer.GetNextFeature()
	shp_ds.Destroy()
	in_ds.Destroy()
Esempio n. 3
0
def main():

    # First example, read first and second column from ascii file. Skip first
    # line of header.
    # Note use of (1,-1) in lines to skip first line and then read to end of file
    # Note use of (0,) in columns to pick first column, since its a tuple need trailing comma
    x=read_array("test.txt",lines=(1,-1), columns=(0,))
    y=read_array("test.txt",lines=(1,-1), columns=(1,))

    #Second example, read the file into a single arry
    z=read_array("test.txt",lines=(1,-1), columns=(0,2))
    
    # Plot the data
    plot(x,y,'r--',z[:,0],z[:,1])
    show()
Esempio n. 4
0
def main():

    # First example, read first and second column from ascii file. Skip first
    # line of header.
    # Note use of (1,-1) in lines to skip first line and then read to end of file
    # Note use of (0,) in columns to pick first column, since its a tuple need trailing comma
    x = read_array("test.txt", lines=(1, -1), columns=(0, ))
    y = read_array("test.txt", lines=(1, -1), columns=(1, ))

    #Second example, read the file into a single arry
    z = read_array("test.txt", lines=(1, -1), columns=(0, 2))

    # Plot the data
    plot(x, y, 'r--', z[:, 0], z[:, 1])
    show()
Esempio n. 5
0
 def test_float(self):
     a = rand(3, 4) * 30
     fname = tempfile.mktemp('.dat')
     io.write_array(fname, a)
     b = io.read_array(fname)
     assert_array_almost_equal(a, b, decimal=4)
     os.remove(fname)
Esempio n. 6
0
 def test_float(self):
     a = rand(3,4)*30
     fname = tempfile.mktemp('.dat')
     io.write_array(fname,a)
     b = io.read_array(fname)
     assert_array_almost_equal(a,b,decimal=4)
     os.remove(fname)
Esempio n. 7
0
 def test_complex(self):
     a = rand(13,4) + 1j*rand(13,4)
     fname = tempfile.mktemp('.dat')
     io.write_array(fname,a)
     b = io.read_array(fname,atype=N.Complex)
     assert_array_almost_equal(a,b,decimal=4)
     os.remove(fname)
Esempio n. 8
0
def make_linelist(files, cutoff, fluxlimit, linefile):
    """
	make_linelist(files,cutoff,fluxlimit,linefile)

	Cleans/combines linelists.

	Inputs:
	  files     - list containing the names of the arc files
	  cutoff    - maximum wavelength
	  fluxlimit - minimum flux
	  linefile  - name of output file

	Outputs:
	  outputs the data for 'good' arc lines to a single file.
	"""
    outfile = open(linefile, 'w')
    if fluxlimit is None:
        fluxlimit = 0.
    for name in files:
        f = open(name)
        lines = sio.read_array(f)
        f.close()
        for i, j in lines:
            if i > cutoff or j < fluxlimit:
                continue
            else:
                outfile.write("%8.3f    %8.3f\n" % (i, j))
    outfile.close()
Esempio n. 9
0
def make_arc(filename, dispersion, wave):
    """
	make_arc(filename,dispersion,wave)

	Create models of the arclamps used.

	Inputs:
	  filename   - name of file containing arc wavelengths and amplitudes
	  dispersion - width of arcs
	  wave       - wavelength scale on which to evaluate arcs

	Outputs:
	  resolution-matched arc spectrum and 3x broadened arc spectrum
	"""
    f = open(filename)
    lines = sio.read_array(f)
    f.close()

    n = lines.shape[0]
    fitmodel = scipy.zeros(3 * n + 1)
    fitmodel2 = scipy.zeros(3 * n + 1)
    index = 1
    for i in range(n):
        fitmodel[index] = lines[i, 1]
        fitmodel[index + 1] = lines[i, 0]
        fitmodel[index + 2] = dispersion
        fitmodel2[index] = 1.
        fitmodel2[index + 1] = lines[i, 0]
        fitmodel2[index + 2] = dispersion * 3.
        index += 3
    return sf.ngauss(wave, fitmodel), sf.ngauss(wave, fitmodel2)
Esempio n. 10
0
 def test_complex(self):
     a = rand(13, 4) + 1j * rand(13, 4)
     fname = tempfile.mktemp('.dat')
     io.write_array(fname, a)
     b = io.read_array(fname, atype=N.Complex)
     assert_array_almost_equal(a, b, decimal=4)
     os.remove(fname)
Esempio n. 11
0
 def test_integer(self):
     from scipy import stats
     a = stats.randint.rvs(1, 20, size=(3, 4))
     fname = tempfile.mktemp('.dat')
     io.write_array(fname, a)
     b = io.read_array(fname, atype=a.dtype.char)
     assert_array_equal(a, b)
     os.remove(fname)
Esempio n. 12
0
 def test_integer(self):
     from scipy import stats
     a = stats.randint.rvs(1,20,size=(3,4))
     fname = tempfile.mktemp('.dat')
     io.write_array(fname,a)
     b = io.read_array(fname,atype=a.dtype.char)
     assert_array_equal(a,b)
     os.remove(fname)
Esempio n. 13
0
def getlines(linefile):
	from scipy import io as sio
	file = open(linefile)

	lines = sio.read_array(file)
	file.close()
	lines = lines[:,0]
	lines.sort()

	return lines
Esempio n. 14
0
def aband(inwave,airmass=1.,scale=0.85):
	path = spectra.__path__[0]
	file = path+"/data/aband.dat"

	aband = sio.read_array(file)

	wave = aband[:,0]
	data = aband[:,1].astype(scipy.float32)

	return get_correction(inwave,airmass,scale,wave,data)
Esempio n. 15
0
def response(wave, file1, file2):
    """ Not used. """
    from scipy import interpolate
    f = open(file1)
    resp1 = sio.read_array(f)
    f.close()
    x = resp1[:, 0]
    z = resp1[:, 1]
    spline = interpolate.splrep(x, z, s=0)
    resp1 = interpolate.splev(wave, spline)

    f = open(file2)
    resp2 = sio.read_array(f)
    f.close()
    x = resp2[:, 0]
    z = resp2[:, 1]
    spline = interpolate.splrep(x, z, s=0)
    resp2 = interpolate.splev(wave, spline)

    return resp1 / resp2
Esempio n. 16
0
def main():
    """ Build/train/test RBF net
    """
    from scipy.io import read_array
    print "\nCreating RBF net"
    net = rbf(12, 2)
    print "\nLoading training and test sets...",
    X_trn = read_array('data/oil-trn.dat', columns=(0, (1, 12)), lines=(3, -1))
    Y_trn = read_array('data/oil-trn.dat', columns=(12, -1), lines=(3, -1))
    X_tst = read_array('data/oil-tst.dat', columns=(0, (1, 12)), lines=(3, -1))
    Y_tst = read_array('data/oil-tst.dat', columns=(12, -1), lines=(3, -1))
    print "done."
    #print "\nInitial SSE:\n"
    #print "\ttraining set: ",net.test_all(X_trn,Y_trn)
    #print "\ttesting set: ",net.test_all(X_tst,Y_tst),"\n"
    print "Training...",
    net.train(X_trn, Y_trn)
    print "done."
    print "\nFinal SSE:\n"
    print "\ttraining set: ", net.test_all(X_trn, Y_trn)
    print "\ttesting set: ", net.test_all(X_tst, Y_tst), "\n"
Esempio n. 17
0
def main():
    """ Build/train/test RBF net
    """
    from scipy.io import read_array
    print "\nCreating RBF net"
    net = rbf(12,2)
    print "\nLoading training and test sets...",
    X_trn = read_array('data/oil-trn.dat',columns=(0,(1,12)),lines=(3,-1))
    Y_trn = read_array('data/oil-trn.dat',columns=(12,-1),lines=(3,-1))
    X_tst = read_array('data/oil-tst.dat',columns=(0,(1,12)),lines=(3,-1))
    Y_tst = read_array('data/oil-tst.dat',columns=(12,-1),lines=(3,-1))
    print "done."
    #print "\nInitial SSE:\n"
    #print "\ttraining set: ",net.test_all(X_trn,Y_trn)
    #print "\ttesting set: ",net.test_all(X_tst,Y_tst),"\n"
    print "Training...",
    net.train(X_trn,Y_trn)
    print "done."
    print "\nFinal SSE:\n"
    print "\ttraining set: ",net.test_all(X_trn,Y_trn)
    print "\ttesting set: ",net.test_all(X_tst,Y_tst),"\n"
Esempio n. 18
0
def main():
    """ Build/train/test MLP 
    """
    from scipy.io import read_array, write_array
    print "\nCreating 2-2-1 MLP with logistic outputs"
    net = mlp(2,2,1,'logistic')
    print "\nLoading training and test sets...",
    trn_input = read_array('data/xor-trn.dat',lines=(3,-1),columns=(0,(1,2)))
    trn_targs = read_array('data/xor-trn.dat',lines=(3,-1),columns=(2,-1))
    trn_targs = trn_targs.reshape(N.size(trn_targs),1)
    tst_input = read_array('data/xor-tst.dat',lines=(3,-1),columns=(0,(1,2)))
    tst_targs = read_array('data/xor-tst.dat',lines=(3,-1),columns=(2,-1))
    tst_targs = tst_targs.reshape(N.size(tst_targs),1)
    print "done."
    print "\nInitial SSE:\n"
    print "\ttraining set: ",net.test_all(trn_input,trn_targs)
    print "\ttesting set: ",net.test_all(tst_input,tst_targs),"\n"
    net.wp = net.train(trn_input,trn_targs)[0]
    print "\nFinal SSE:\n"
    print "\ttraining set: ",net.test_all(trn_input,trn_targs)
    print "\ttesting set: ",net.test_all(tst_input,tst_targs),"\n"
Esempio n. 19
0
def main():
    """ Set up a 1-2-1 SRN to solve the temporal-XOR problem from Elman 1990.
    """
    from scipy.io import read_array, write_array
    print "\nCreating 1-2-1 SRN for 'temporal-XOR'"
    net = srn(1, 2, 1, 'logistic')
    print "\nLoading training and test sets...",
    trn_input = read_array('data/txor-trn.dat')
    trn_targs = N.hstack([trn_input[1:], trn_input[0]])
    trn_input = trn_input.reshape(N.size(trn_input), 1)
    trn_targs = trn_targs.reshape(N.size(trn_targs), 1)
    tst_input = read_array('data/txor-tst.dat')
    tst_targs = N.hstack([tst_input[1:], tst_input[0]])
    tst_input = tst_input.reshape(N.size(tst_input), 1)
    tst_targs = tst_targs.reshape(N.size(tst_targs), 1)
    print "done."
    print "\nInitial SSE:\n"
    print "\ttraining set: ", net.test_all(trn_input, trn_targs)
    print "\ttesting set: ", net.test_all(tst_input, tst_targs), "\n"
    net.wp = net.train(trn_input, trn_targs)[0]
    print "\nFinal SSE:\n"
    print "\ttraining set: ", net.test_all(trn_input, trn_targs)
    print "\ttesting set: ", net.test_all(tst_input, tst_targs), "\n"
Esempio n. 20
0
def main():
    """ Set up a 1-2-1 SRN to solve the temporal-XOR problem from Elman 1990.
    """
    from scipy.io import read_array, write_array
    print "\nCreating 1-2-1 SRN for 'temporal-XOR'"
    net = srn(1,2,1,'logistic')
    print "\nLoading training and test sets...",
    trn_input = read_array('data/txor-trn.dat')
    trn_targs = N.hstack([trn_input[1:],trn_input[0]])
    trn_input = trn_input.reshape(N.size(trn_input),1)
    trn_targs = trn_targs.reshape(N.size(trn_targs),1)
    tst_input = read_array('data/txor-tst.dat')
    tst_targs = N.hstack([tst_input[1:],tst_input[0]])
    tst_input = tst_input.reshape(N.size(tst_input),1)
    tst_targs = tst_targs.reshape(N.size(tst_targs),1)
    print "done."
    print "\nInitial SSE:\n"
    print "\ttraining set: ",net.test_all(trn_input,trn_targs)
    print "\ttesting set: ",net.test_all(tst_input,tst_targs),"\n"
    net.wp = net.train(trn_input,trn_targs)[0]
    print "\nFinal SSE:\n"
    print "\ttraining set: ",net.test_all(trn_input,trn_targs)
    print "\ttesting set: ",net.test_all(tst_input,tst_targs),"\n"
Esempio n. 21
0
def analyze_data(ref_files,transp):
    """ Plots histograms of the data.
    """
    data = np.zeros((0, 3))
    data_ind = []
    for n in range(len(ref_files)):
        target_file = DATA_PATH + ref_files[n][:-4] + "REF.txt"
        tmp = read_array(target_file,',') + transp[n]
        data = np.r_[data, tmp]
        data_ind.append(tmp)
    
    pitch = data[:,1]
    pitch = pitch[ np.where(pitch > 10) ]
    unique = list(set(pitch))
    unique.sort()
    print "pitch classes:",unique
    print "nr of classes:",len(unique)+1
    
    xmin = unique[0]
    xmax = unique[-1]
    xbins = xmax-xmin
    
    # plot global statistics
    pylab.figure()
    pylab.subplot(211)
    pylab.hist(pitch,bins=xbins)
    pylab.xlim(xmin,xmax)
    pylab.title("global pitch classes histogram")
    pylab.subplot(212)
    pylab.hist(data[:,2],bins=13)
    pylab.xlim(-1,11)
    pylab.title("global chromas histogram")
    
    # plot individual pieces
    pylab.figure()
    nr = len(data_ind)
    for n in range(nr):
        pitch = data_ind[n][:,1]
        pitch = pitch[ np.where(pitch > 10) ]
        unique = list(set(pitch))
        unique.sort()
        print n,":",unique
#        xmin = unique[0]
#        xmax = unique[-1]
        # plot it
        pylab.subplot(nr,1,n+1)
        pylab.hist(pitch,bins=(xmax-xmin))
        pylab.xlim(xmin,xmax)
        pylab.title(ref_files[n])
Esempio n. 22
0
def gist_load(fn):
    import string

    f = open(fn)
    not_done = True
    while not_done:
        p = f.tell()
        s = f.readline()
        ss = string.split(s)
        if len(ss) == 3 and reduce(lambda b, c: b and c in string.digits,
                                   "".join(ss)):
            f.seek(p)
            arr = io.read_array(f, atype='b')
            not_done = False
    return arr
Esempio n. 23
0
	def load(self,fname):
		"""
		Loading data from a csv or a pickle file of the dbase class
		"""
		fext = self.__ext(fname)
		f = open(fname,'r')
		if fext == 'csv':
			self.varnm = self.__vardic(f.readline().split(','))
			self.data = read_array(f, separator=',', lines=(0,-1))
		elif fext == 'pickle':
			a = pickle.load(f)
			self.varnm = a.varnm
			self.data = a.data
		else:
			raise 'This class only works on csv and pickle files'
		f.close()
Esempio n. 24
0
    def load(self, fname):
        """
		Loading data from a csv or a pickle file of the dbase class
		"""
        fext = self.__ext(fname)
        f = open(fname, 'r')
        if fext == 'csv':
            self.varnm = self.__vardic(f.readline().split(','))
            self.data = read_array(f, separator=',', lines=(0, -1))
        elif fext == 'pickle':
            a = pickle.load(f)
            self.varnm = a.varnm
            self.data = a.data
        else:
            raise 'This class only works on csv and pickle files'
        f.close()
Esempio n. 25
0
def transform_target_data(audiofile,analyze=0):
    """ Gets the target data (in MIDI pitch classes) out of the REF.txt files.
    (for my own new datasets)
    """
    target_file = DATA_PATH + audiofile[:-4] + "REF.txt"
    data = read_array(target_file,',')
    
    if analyze:
        pylab.subplot(211)
        pylab.plot(data[:,1])
        pylab.subplot(212)
        pylab.plot(data[:,2])
        
        pylab.figure()
        pylab.subplot(211)
        pylab.hist(data[:,1],bins=25)
        pylab.xlim(40,65)
        pylab.subplot(212)
        pylab.hist(data[:,2],bins=13)
        pylab.xlim(0,12)
    
    # get the unique element (nr. of different notes) of the piece
    midi_data = data[:,1]
    unique = list(set(midi_data))
    target = np.zeros((len(midi_data), len(unique)))
    target_chromas = np.zeros((len(midi_data), 13))
    for n in range( len(midi_data) ):
        ind = unique.index( midi_data[n] )
        target[n,ind] = 1
        target_chromas[n,data[n,2]+1] = 1
    
    if analyze:
        print "pitch classes:",len(unique)
        pylab.show()
        exit(0)
    
    savefile = OUTPUT_DIR + audiofile[:-4] + "_STFT.dat"
    data = shelve.open(savefile)
    lenge = data["features"].shape[0]
    data["targets"] = target[:lenge]
    data["target_midi"] = midi_data[:lenge]
    data["target_chromas"] = target_chromas[:lenge]
    data["pitch_classes"] = unique
    data.close()
Esempio n. 26
0
def convert_to_midi(reffile,savefile):
    """ converts REF files from mirex05 to MIDI data
    """
    target_file = DATA_PATH + reffile
    data = read_array(target_file)
    
    # remove time axes and convert to midi
    midi = np.round( audiotools.freq_to_midi(data[:,1]) )
    
    # calc chromas
    chromas = np.ones(len(midi)) * (-1)
    for n in range(len(midi)):
        if midi[n]>0:
            chromas[n] = midi[n] % 12
    
    # write data to disk
    writedata = np.c_[data[:,0], midi, chromas]
    FILE = open(DATA_PATH + savefile,"w")
    write_array(FILE,writedata,',')
    FILE.close()
Esempio n. 27
0
    def update_coefficients(self):
        """ Uses the samples file to extract parameters using linear multifit """
        import pygsl
        import pygsl._numobj as Numeric
        import pygsl.rng
        import pygsl.multifit

        from scipy.io import read_array
        from numpy import array

        data = read_array(file(self['sample_file']))
        dep_var = data[:,-3:]        # Last 3 columns are supposed to be Renergy, Wenergy, Leakage
        indep_var = data[:,:5]       # First 3 columns are of interest, 2 are just used as fillers

        indep_var[:,-2] = indep_var[:,0] * indep_var[:,1]  # We also consider words * bits as a parameter
        indep_var[:,-1:] = 1                               # Coefficient used as a constant

        work = pygsl.multifit.linear_workspace(len(indep_var), 5)
        self.c_read, cov, chisq = pygsl.multifit.linear(indep_var, dep_var[:,0], work)
        self.c_write, cov, chisq = pygsl.multifit.linear(indep_var, dep_var[:,1], work)
        self.c_leakage, cov, chisq = pygsl.multifit.linear(indep_var, dep_var[:,2], work)
Esempio n. 28
0
def transform_target_data_old(audiofile,analyze=0):
    """ Calculates the target data (in MIDI pitch classes) out of the REF.txt files.
    (for the old datasets of Ellis and MIREX)
    """
    target_file = DATA_PATH + audiofile[:-4] + "REF.txt"
    data = read_array(target_file)
    
    # remove time axes and convert to midi
    data = data[:,1]
    midi_data = audiotools.freq_to_midi(data)
    midi_data = np.round(midi_data)
    
    if analyze:
        pylab.plot(midi_data)
    
    # get the unique element (nr. of different notes) of the piece
    unique = list(set(midi_data))
    target = np.zeros((len(midi_data), len(unique)))
    for n in range( len(midi_data) ):
        ind = unique.index( midi_data[n] )
        target[n,ind] = 1
    
    if analyze:
        print "classes:",len(unique)
        pylab.figure()
        pylab.psd(target.flatten())
        pylab.show()
        exit(0)
    
    savefile = OUTPUT_DIR + audiofile[:-4] + "_STFT.dat"
    data = shelve.open(savefile)
    lenge = data["features"].shape[0]
    data["targets"] = target[:lenge]
    data["target_midi"] = midi_data[:lenge]
    data["pitch_classes"] = unique
    data.close()
Esempio n. 29
0
#outname=olmodel.RunMaxima('new_sym.tex')
#outname=olmodel.RunMaxima('new_sym_test.tex')
#RunLatex(outname)
runf=1
if runf:
    olmodel.CreateFortranandPythonModules('new_sym')
    olmodel.CreateFortranandPythonModules('old_sym',newsym=False)
#    myfinalfnames=olmodel.PrepareFortranFiles(bodenames)
#    mypynames=olmodel.PreparePythonFiles(bodenames)
#    fmodnames=olmodel.CallF2py(myfinalfnames)



#olmodel.SymBodeMaximaAll(texname='old_sym.tex',basebodename='old_sym_bode')

dfcomp=read_array(os.path.join(fitdir,'dumbfit_comp_w1.txt'))
dfdb=read_array(os.path.join(fitdir,'dumbfit_db.txt'))

nsp='new_sym'
osp='old_sym'
nl='New Sym'
ol='Old Sym'
myreport.NewBodeDataSetFromFortranModels(nsp, 'new_sym1', nl ,bodenums=[0,1],ucv=dfcomp,expkey='fullsweep')
myreport.NewBodeDataSetFromFortranModels(osp, 'old_sym1', ol ,bodenums=[0,1],ucv=dfcomp,expkey='fullsweep')

myreport.NewBodeDataSetFromFortranModels(nsp, 'new_sym2', nl ,bodenums=[0,1],ucv=dfdb,expkey='fullsweep')
myreport.NewBodeDataSetFromFortranModels(osp, 'old_sym2', ol ,bodenums=[0,1],ucv=dfdb,expkey='fullsweep')

key1='test'
myreport.GenListofBodeFigs(key1,['fullsweep','new_sym1','old_sym1','new_sym2','old_sym2'],[3,3],[212,212],maglims=[[-40,5],[-25,25]],phaselims=[[-400,200],[-220,-50]],magticks=[arange(-40,5,10),arange(-20,25,10)],phaseticks=[arange(-360,220,90),arange(-220,-55,40)])
Esempio n. 30
0
from numpy import *
from scipy.io import read_array
import qld, quapro
import pylab
from mystic.svc import *
import os.path, time

def myshow():
    import Image, tempfile
    name = tempfile.mktemp()
    pylab.savefig(name,dpi=150)
    im = Image.open('%s.png' % name)
    im.show()

c1 = read_array(os.path.join('DATA','g1x.pts'))
c2 = read_array(os.path.join('DATA','g2.pts'))
c1[:,0] += 5 # to make the two sets overlap a little

# interchange ?
#c1, c2 = c2, c1

# the Kernel Matrix (with the linear kernel)
# Q = multiply.outer(X,X) <--- unfortunately only works when X is a list of scalars...
# In Mathematica, this would be implemented simply via Outer[K, X, X, 1]

XX = concatenate([c1,-c2])
nx = XX.shape[0]

# quadratic and linear terms of QP
Q = KernelMatrix(XX)
Esempio n. 31
0
                  "--max",
                  action="store_true",
                  dest="max",
                  help="show max only?")
options, args = parser.parse_args()

if len(args) != 3:
    parser.error("incorrect number of arguments")
print options
jaclistfile = args[0]
outactsfile = args[1]
outfile = args[2]
print "jacobian list file", jaclistfile
print "output activations file", outactsfile
print "output file", outfile
a = io.read_array(file(outactsfile), lines=[2, -1])
labels = file(outactsfile).readline().split()[1:]
T = shape(a)[1]
maxindices = []
for t in range(T):
    c = list(a[:, t])
    m = c.index(max(c))
    if not options.blank and labels[m] == 'blank':
        c = c[:-1]
        m = c.index(max(c))
    maxindices.append(m)
print maxindices
print len(labels)
print labels
if options.bylab:
    v = zeros((len(labels), T), 'f')
Esempio n. 32
0
from scipy.io import read_array
import qld, quapro
import matplotlib.pyplot as plt
from mystic.svc import *
import os.path, time


def myshow():
    import Image, tempfile
    name = tempfile.mktemp()
    plt.savefig(name, dpi=150)
    im = Image.open('%s.png' % name)
    im.show()


c1 = read_array(os.path.join('DATA', 'g1x.pts'))
c2 = read_array(os.path.join('DATA', 'g2.pts'))
c1[:, 0] += 5  # to make the two sets overlap a little

# interchange ?
#c1, c2 = c2, c1

# the Kernel Matrix (with the linear kernel)
# Q = multiply.outer(X,X) <--- unfortunately only works when X is a list of scalars...
# In Mathematica, this would be implemented simply via Outer[K, X, X, 1]

XX = concatenate([c1, -c2])
nx = XX.shape[0]

# quadratic and linear terms of QP
Q = KernelMatrix(XX)
Esempio n. 33
0
"""Plot a histogram."""


from pyxgraph import *
from scipy import io

def plot_histogram(epsoutfile, x):
    
    g = pyxgraph(width=6, height=6,
                 key=None,
                 xlabel="x", 
                 xlimits=(min(x),max(x))
                )
    g.pyxplothist(x, Nbins = 100, bin_range=(min(x),max(x)), bars=0)
    
    g.pyxsave(epsoutfile)


if __name__=="__main__":    
    x = io.read_array("data_histogram.dat")
    plot_histogram("histogram.eps", x)
Esempio n. 34
0
def lris_pipeline(prefix,
                  dir,
                  science,
                  arc,
                  flats,
                  out_prefix,
                  useflat=0,
                  usearc=0,
                  cache=0,
                  offsets=None):
    print "Processing mask", out_prefix

    scinums = science.split(",")
    flatnums = flats.split(",")

    for i in range(len(flatnums)):
        flatnums[i] = dir + prefix + flatnums[i] + ".fits"
    scinames = []
    for i in range(len(scinums)):
        name = dir + prefix + scinums[i] + ".fits"
        scinames.append(name)
    arcname = dir + prefix + arc + ".fits"

    nsci = len(scinums)

    print "Preparing flatfields"
    if useflat == 1:
        yforw, yback, slits, starboxes, flatnorm = flatload(out_prefix)
    else:
        yforw, yback, slits, starboxes, flatnorm = flatpipe(
            flatnums, out_prefix)
    axis1 = flatnorm.shape[0]
    axis2 = flatnorm.shape[1]

    print "Preparing arcs for line identification"
    if usearc == 1:
        arcname = out_prefix + "_arc.fits"
        arc_tmp = pyfits.open(arcname)
        arc_ycor = arc_tmp[0].data.astype(scipy.float32)
        lamps = arc_tmp[0].header['LAMPS']
        del arc_tmp
    else:
        arcname = dir + prefix + arc + ".fits"
        arc_tmp = pyfits.open(arcname)
        arcdata = arc_tmp[0].data.copy()
        lamps = arc_tmp[0].header['LAMPS']
        del arc_tmp
        arcdata = biastrim(arcdata)
        arc_ycor = spectools.resampley(arcdata, yforw).astype(scipy.float32)
        arcname = out_prefix + "_arc.fits"
        arc_hdu = pyfits.PrimaryHDU(arc_ycor)
        arc_hdu.header.update('LAMPS', lamps)
        arc_hdu.writeto(arcname)
        del arc_hdu

    wide_stars = []
    for i, j in starboxes:
        mod = scipy.where((yback < j) & (yback > i))
        a = mod[0].min() - 3
        b = mod[0].max() + 3
        if a < 0:
            a = 0
        if b > axis1:
            b = axis1
        wide_stars.append([a, b])

    print "Bias trimming and flatfielding science data"
    scidata = scipy.zeros((nsci, axis1, axis2), 'f4')
    center = scipy.zeros((nsci, len(starboxes)), 'f4')
    flux = scipy.zeros((nsci), 'f4')
    airmass = []
    for i in range(nsci):
        filename = scinames[i]
        scitmp = pyfits.open(filename)

        scidatatmp = scitmp[0].data.copy()
        scidatatmp = biastrim(scidatatmp).astype(scipy.float32)

        #Remove screwed columns (this should already be done though...)
        bad = scipy.where(scidatatmp > 56000.)
        nbad = bad[0].size
        for k in range(nbad):
            y = bad[0][k]
            x = bad[1][k]
            scidatatmp[y,
                       x] = (scidatatmp[y, x - 1] + scidatatmp[y, x + 1]) / 2.
        # Don't flatfield blueside data
        scidatatmp = scidatatmp / flatnorm
        scidata[i, :, :] = scidatatmp.copy()

        try:
            mswave = scitmp[0].header['MSWAVE']
        except:
            mswave = 6500.
        if len(slits) == 1:
            try:
                mswave = scitmp[0].header['WAVELEN']
            except:
                pass
        disperser = scitmp[0].header['GRANAME']
        airmass.append(scitmp[0].header['AIRMASS'])

        # Old data mightn't have a dichroic keyword!
        try:
            dichroic = scitmp[0].header['DICHNAME']
        except:
            dichroic = None

        flux[i] = scipy.sort(scipy.ravel(scidatatmp))[scidatatmp.size / 4]
        for j in range(len(starboxes)):
            a, b = starboxes[j]
            m, n = wide_stars[j]
            a -= 4
            b += 4
            m -= 2
            n += 2
            center[i, j] = offset.findoffset(scidatatmp[m:n], yforw[a:b], m)

        del scitmp
        del scidatatmp
    del flatnorm

    if offsets is not None:
        center = scipy.asarray(offsets)
    else:
        center = stats.stats.nanmean(center, axis=1)

    center[scipy.isnan(center)] = 0.
    print "Normalizing Fluxes"
    cmax = center.max()
    fmax = flux.max()
    for i in range(center.size):
        center[i] -= cmax
        ratio = fmax / flux[i]
        scidata[i] *= ratio
    cmax = ceil(fabs(center.min()))

    if disperser == "150/7500":
        scale = 4.8
    elif disperser == "300/5000":
        scale = 2.45
    elif disperser == "400/8500":
        scale = 1.85
    elif disperser == "600/5000":
        scale = 1.25
    elif disperser == "600/7500":
        scale = 1.25
    elif disperser == "600/10000":
        scale = 1.25
    elif disperser == "831/8200":
        scale = 0.915
    elif disperser == "900/5500":
        scale = 0.85
    elif disperser == "1200/7500":
        scale = 0.64

    if dichroic == 'mirror':
        redcutoff = 4000.
        dich_file = ''
    elif dichroic == '460':
        redcutoff = 4600.
        dich_file = '460'
    elif dichroic == '500':
        redcutoff = 5000.
        dich_file = '500'
    elif dichroic == '560':
        redcutoff = 5500.
        dich_file = '560'
    elif dichroic == '680':
        redcutoff = 6700.
        dich_file = '680'
    else:
        redcutoff = 3500.
        dich_file = ''

    nsize = 0
    csize = 0
    wide_slits = []
    linewidth = []
    slits = [[1150, 1250]]
    for i, j in slits:
        csize += int(j - i + cmax) + 5
        nsize += j - i + 5
        mod = scipy.where((yback > i) & (yback < j))
        a = mod[0].min() - 4
        b = mod[0].max() + 4
        if a < 0:
            a = 0
        if b > axis1:
            b = axis1
        wide_slits.append([a, b])
        if len(wide_slits) % 7 == 0 or len(slits) == 1:
            linewidth.append(
                measure_width.measure(arc_ycor[(i + j) / 2, :], 15))
    csize -= 5
    nsize -= 5

    linewidth = scipy.median(scipy.asarray(linewidth))

    print "Loading wavelength model"
    lris_path = lris_red.__path__[0]

    filename = lris_path + "/uves_sky.model"
    infile = open(filename, "r")
    wavecalmodel = load(infile)
    infile.close()
    wave = scipy.arange(3400., 10400., 0.1)

    if dich_file != '':
        filename = lris_path + "/dichroics/dichroic_" + dich_file + "_t.dat"
        infile = open(filename, "r")
        input = sio.read_array(infile)
        infile.close()
        spline = interpolate.splrep(input[:, 0], input[:, 1])
        dich = interpolate.splev(wave, spline)
        dich[wave < 4500.] = 1.
        dich[wave > 8800.] = 1.
        del input, spline
    else:
        dich = scipy.ones(wave.size)
    wavemodel = interpolate.splev(wave, wavecalmodel)
    finemodel = ndimage.gaussian_filter1d(wavemodel, linewidth * scale / 0.1)
    wavemodel = ndimage.gaussian_filter1d(finemodel, 5. / 0.1)
    finemodel *= dich
    finemodel = interpolate.splrep(wave, finemodel)
    wavemodel *= dich
    widemodel = interpolate.splrep(wave, wavemodel)
    goodmodel = finemodel
    del dich, wave, wavemodel

    extractwidth = 10

    print "Creating output arrays"
    outlength = int(axis2 * 1.6)
    out = scipy.zeros((nsci, nsize, outlength), scipy.float32) * scipy.nan
    out2 = scipy.zeros((2, csize, outlength), scipy.float32) * scipy.nan

    if cache:
        print "Caching..."
        strtfile = out_prefix + "_TMPSTRT.fits"
        bgfile = out_prefix + "_TMPBSUB.fits"
        try:
            os.remove(strtfile)
        except:
            pass

        outfile = pyfits.PrimaryHDU(out)
        outfile.header.update('CTYPE1', 'LINEAR')
        outfile.header.update('CRPIX1', 1)
        outfile.header.update('CRVAL1', mswave - (0.5 * out2.shape[2]) * scale)
        outfile.header.update('CD1_1', scale)
        outfile.header.update('CTYPE2', 'LINEAR')
        outfile.header.update('CRPIX2', 1)
        outfile.header.update('CRVAL2', 1)
        outfile.header.update('CD2_2', 1)
        if nsci > 1:
            outfile.header.update('CTYPE3', 'LINEAR')
            outfile.header.update('CRPIX3', 1)
            outfile.header.update('CRVAL3', 1)
            outfile.header.update('CD3_3', 1)
        outfile.writeto(strtfile)
        del outfile, out

        try:
            os.remove(bgfile)
        except:
            pass

        outfile = pyfits.PrimaryHDU(out2)
        outfile.header.update('CTYPE1', 'LINEAR')
        outfile.header.update('CRPIX1', 1)
        outfile.header.update('CRVAL1', mswave - (0.5 * out2.shape[2]) * scale)
        outfile.header.update('CD1_1', scale)
        outfile.header.update('CTYPE2', 'LINEAR')
        outfile.header.update('CRPIX2', 1)
        outfile.header.update('CRVAL2', 1)
        outfile.header.update('CD2_2', 1)
        outfile.header.update('CTYPE3', 'LINEAR')
        outfile.header.update('CRPIX3', 1)
        outfile.header.update('CRVAL3', 1)
        outfile.header.update('CD3_3', 1)
        outfile.writeto(bgfile)
        del outfile, out2

    posc = 0
    posn = 0
    count = 1
    for k in range(len(slits)):
        i, j = slits[k]
        a, b = wide_slits[k]
        ##
        if count < 1:
            count += 1
            continue
        ##
        print "Working on slit %d (%d to %d)" % (count, i, j)
        sky2x, sky2y, ccd2wave = wavematch(a, scidata[:, a:b], arc_ycor[i:j],
                                           yforw[i:j], widemodel, finemodel,
                                           goodmodel, scale, mswave, redcutoff)

        strt, bgsub, varimg = doskysub(i, j - i, outlength, scidata[:, a:b],
                                       yback[a:b], sky2x, sky2y, ccd2wave,
                                       scale, mswave, center, redcutoff,
                                       airmass)

        h = strt.shape[1]
        if cache:
            file = pyfits.open(strtfile, mode="update")
            out = file[0].data
        out[:, posn:posn + h] = strt.copy()
        if cache:
            file.close()
            del file, out
        posn += h + 5

        ##
        #		lris_red.skysub.RESAMPLE = 1
        #		count += 1
        #		continue
        ##

        h = bgsub.shape[0]
        if cache:
            file = pyfits.open(bgfile, mode="update")
            out2 = file[0].data
        out2[0, posc:posc + h] = bgsub.copy()
        out2[1, posc:posc + h] = varimg.copy()
        if cache:
            file.close()
            del file, out2
        posc += h + 5
        ##
        #		count += 1
        #		continue
        ##
        tmp = scipy.where(scipy.isnan(bgsub), 0., bgsub)
        filter = tmp.sum(axis=0)
        mod = scipy.where(filter != 0)
        start = mod[0][0]
        end = mod[0][-1] + 1
        del tmp
        slit = bgsub[:, start:end]
        spectra = extract(slit, varimg[:, start:end], extractwidth)
        num = 1
        crval = mswave - (0.5 * bgsub.shape[1] - start) * scale
        for spec in spectra:
            for item in spec:
                if item.size == 4:
                    hdu = pyfits.PrimaryHDU()
                    hdu.header.update('CENTER', item[2])
                    hdu.header.update('WIDTH', item[3])
                    hdulist = pyfits.HDUList([hdu])
                else:
                    thdu = pyfits.ImageHDU(item)
                    thdu.header.update('CRVAL1', crval)
                    thdu.header.update('CD1_1', scale)
                    thdu.header.update('CRPIX1', 1)
                    thdu.header.update('CRVAL2', 1)
                    thdu.header.update('CD2_2', 1)
                    thdu.header.update('CRPIX2', 1)
                    thdu.header.update('CTYPE1', 'LINEAR')
                    hdulist.append(thdu)
            outname = out_prefix + "_spec_%02d_%02d.fits" % (count, num)
            hdulist.writeto(outname)
            num += 1

        count += 1

##
#	file = pyfits.open(bgfile)
#	file.writeto(out_prefix+"_save.fits")
#	return
##

    if cache:
        file = pyfits.open(bgfile)
        out2 = file[0].data.copy()
        del file
    tmp = out2[0].copy()
    tmp = scipy.where(scipy.isnan(tmp), 0, 1)
    mod = scipy.where(tmp.sum(axis=0) != 0)
    start = mod[0][0]
    end = mod[0][-1] + 1
    del tmp

    outname = out_prefix + "_bgsub.fits"
    outfile = pyfits.PrimaryHDU(out2[0, :, start:end])
    outfile.header.update('CTYPE1', 'LINEAR')
    outfile.header.update('CRPIX1', 1)
    outfile.header.update('CRVAL1',
                          mswave - (0.5 * out2.shape[2] - start) * scale)
    outfile.header.update('CD1_1', scale)
    outfile.header.update('CRPIX2', 1)
    outfile.header.update('CRVAL2', 1)
    outfile.header.update('CD2_2', 1)
    outfile.writeto(outname)
    hdr = outfile.header.copy()

    outname = out_prefix + "_var.fits"
    outfile = pyfits.PrimaryHDU(out2[1, :, start:end])
    outfile.header = hdr
    outfile.writeto(outname)
    del out2, hdr

    if cache:
        file = pyfits.open(strtfile)
        out = file[0].data.copy()
        del file
    outname = out_prefix + "_straight.fits"
    outfile = pyfits.PrimaryHDU(out[:, :, start:end])
    outfile.header.update('CTYPE1', 'LINEAR')
    outfile.header.update('CRPIX1', 1)
    outfile.header.update('CRVAL1',
                          mswave - (0.5 * out.shape[2] - start) * scale)
    outfile.header.update('CD1_1', scale)
    outfile.header.update('CRPIX2', 1)
    outfile.header.update('CRVAL2', 1)
    outfile.header.update('CD2_2', 1)
    if nsci > 1:
        outfile.header.update('CRPIX3', 1)
        outfile.header.update('CRVAL3', 1)
        outfile.header.update('CD3_3', 1)
    outfile.writeto(outname)

    del out, outfile
Esempio n. 35
0
from __future__ import division
from past.utils import old_div
from scipy import *
import scipy.io as io

from SloppyCell.ReactionNetworks import *

alb_dat = io.read_array('Brodersen_1987/Albumin.ens.dat')
alb_dat = transpose(reshape(alb_dat, (6, 30)))
ua, va = Ensembles.PCA_eig_log_params(alb_dat)

alb_chi2_hess, temp, temp = Utility.load('Brodersen_1987/hess_dict.albumin.bp')
ua2, va2 = Utility.eig(alb_chi2_hess)

heme_dat = io.read_array('Brodersen_1987/Hemoglobin.ens.dat')
heme_dat = transpose(reshape(heme_dat, (4, 30)))
uh, vh = Ensembles.PCA_eig_log_params(heme_dat)

heme_chi2_hess, temp, temp = Utility.load(
    'Brodersen_1987/hess_dict.hemeglobin.bp')
uh2, vh2 = Utility.eig(heme_chi2_hess)

Plotting.figure(figsize=(6, 3))
Plotting.plot_eigval_spectrum(old_div(ua, ua[0]), widths=0.8)
Plotting.plot_eigval_spectrum(old_div(ua2, ua2[0]), widths=0.8, offset=1.0)
Plotting.plot_eigval_spectrum(old_div(uh, uh[0]), widths=0.8, offset=2.0)
Plotting.plot_eigval_spectrum(old_div(uh2, uh2[0]), widths=0.8, offset=3.0)
Plotting.gca().set_xlim(-0.1, 3.9)
Plotting.gca().set_ylim(0.5e-6, 2)
Plotting.ylabel(r'$\lambda/\lambda_0$', fontsize='large')
Plotting.xticks([0.4, 1.4, 2.4, 3.4],
Esempio n. 36
0
#!/usr/bin/python

# Author: Varun Hiremath <*****@*****.**>
# Created: Thu,  2 Apr 2009 05:21:30 -0400

import scipy, pylab
from scipy import io
import plot_settings as ps

error = io.read_array("error.op")
nrs = error[:,0]

# Publishable quality image
ps.set_mode("publish")
pylab.figure(1)
pylab.axes([0.125,0.2,0.95-0.125,0.95-0.3])

for i in range(6, len(error[0])):
    pylab.plot(nrs, error[:,i], ps.lps[i], label="$\Phi_{"+str(i-5)+"}$")

pylab.legend(numpoints=1)
pylab.xlabel("$n_{rs}$")
pylab.ylabel("error ($\epsilon$)")
pylab.title("Plot of error")
pylab.savefig("error_publish.eps")
pylab.savefig("error_publish.png")

# Medium size image
ps.set_mode("medium")
pylab.figure(2)
# pylab.axes([0.125,0.2,0.95-0.125,0.95-0.3])
Esempio n. 37
0
def lris_pipeline(prefix,
                  dir,
                  scinames,
                  arcname,
                  flatnames,
                  out_prefix,
                  useflat=0,
                  usearc=0,
                  cache=0,
                  offsets=None,
                  logfile=None):
    # Create a logfile for this session
    if logfile is None:
        logfile = open('%s.log' % out_prefix, 'w')
    else:
        logfile = open(logfile, 'w')
    stime = time.strftime("%d/%m/%y %H:%M:%S")
    logfile.write('%s\n' % stime)

    print "Processing mask", out_prefix
    logfile.write('Processing mask %s\n' % out_prefix)
    """ Prepare image names. """

    nsci = len(scinames)
    YMID = 2048  # offset for the second detector

    print "Preparing flatfields"
    if useflat == 1:
        logfile.write('Using pre-made flats\n')
        yforw, yback, slits, starboxes = flatload(out_prefix)
    else:
        logfile.write('Making new flats\n')
        yforw, yback, slits, starboxes = flatpipe(flatnames, out_prefix)

    print "Preparing arcs for line identification"
    if usearc == 1:
        logfile.write('Using pre-made arcs\n')
        arc_ycor = {}
        for i in ['bottom', 'top']:
            arcname = out_prefix + "_arc_%s.fits" % i
            arc_tmp = pyfits.open(arcname)
            arc_ycor[i] = arc_tmp[0].data.astype(scipy.float32)
            lamps = arc_tmp[0].header['LAMPS']
            filter = arc_tmp[0].header['BLUFILT']
            del arc_tmp
    else:
        logfile.write('Making new arcs\n')
        """ Load arc data from fits file """
        arc_tmp = pyfits.open(arcname)
        arcdata = arc_tmp[0].data.copy()
        """ Determine which lamps were used """
        lamps = arc_tmp[0].header['LAMPS']
        try:
            filter = arc_tmp[0].header['BLUFILT']
        except:
            filter = 'clear'
        del arc_tmp
        """ Process arcs for the top and bottom separately """
        arcdata = biastrim(arcdata)
        arc_ycor = {}
        arc_ycor['bottom'] = spectools.resampley(
            arcdata[:YMID], yforw['bottom']).astype(scipy.float32)
        arcname = out_prefix + "_arc_bottom.fits"
        arc_hdu = pyfits.PrimaryHDU(arc_ycor['bottom'])
        arc_hdu.header.update('LAMPS', lamps)
        arc_hdu.header.update('BLUFILT', filter)
        arc_hdu.writeto(arcname)

        arc_ycor['top'] = spectools.resampley(
            arcdata[YMID:], yforw['top']).astype(scipy.float32)
        arcname = out_prefix + "_arc_top.fits"
        arc_hdu = pyfits.PrimaryHDU(arc_ycor['top'])
        arc_hdu.header.update('LAMPS', lamps)
        arc_hdu.header.update('BLUFILT', filter)
        arc_hdu.writeto(arcname)

        del arc_hdu, arcdata

    axis1 = 4096
    axis2 = 4096
    """
	We create 'wide' starboxes that describe the minimum and maximum
	  y-position of the slit in the *unstraightened* frame.
	"""
    wide_stars = {}
    logfile.write('\n')
    for l in ['bottom', 'top']:
        wide_stars[l] = []
        bmax = arc_ycor[l].shape[0]
        logfile.write('Star boxes for %s\n' % l)
        for i, j in starboxes[l]:
            logfile.write('[:,%d:%d]\n' % (i, j))

            mod = scipy.where((yback[l] < j) & (yback[l] > i))
            a = mod[0].min() - 3  # We include a small amount of
            b = mod[0].max() + 3  #  padding for resampling.
            if a < 0:
                a = 0
            if b > bmax:
                b = bmax
            wide_stars[l].append([a, b])

    print "Bias trimming science data"
    nstars = len(starboxes['bottom']) + len(starboxes['top'])
    scidata = scipy.zeros((nsci, axis1, axis2), 'f4')
    center = scipy.zeros((nsci, nstars), 'f4')
    flux = scipy.zeros((nsci), 'f4')
    for i in range(nsci):
        filename = scinames[i]
        scitmp = pyfits.open(filename)

        scidatatmp = scitmp[0].data.copy()
        scidatatmp = biastrim(scidatatmp).astype(scipy.float32)
        """
		Remove screwed columns (this should already be done by biastrim
		  though...).
		"""
        bad = scipy.where(scidatatmp > 56000.)
        nbad = bad[0].size
        for k in range(nbad):
            y = bad[0][k]
            x = bad[1][k]
            scidatatmp[y,
                       x] = (scidatatmp[y, x - 1] + scidatatmp[y, x + 1]) / 2.
        """
		We don't flatfield blueside data because of ghosts and
		  reflections. Milan Bogosavljevic has data that show that
		  flatfielding is important if using very blue data--the bluest
		  end looks like it has fringes! I should add a flag to allow
		  flatfielding to be turned on....
		"""
        scidata[i, :, :] = scidatatmp.copy()
        """
		The try/except code is because sometimes the data just don't
		  have these keywords (I think this might be correlated to
		  stopping exposures early, though I'm not sure). Plus old data
		  might not have used the dichroic at all....
		"""
        try:
            disperser = scitmp[0].header['GRISNAME']
        except:
            pass
        try:
            dichroic = scitmp[0].header['DICHNAME']
        except:
            dichroic = None
        """
		We use the first quartile for the flux normalization.
		"""
        flux[i] = scipy.sort(scipy.ravel(scidatatmp))[scidatatmp.size / 4]
        """
		The starboxes are used to determine relative y-shifts between
		  mask exposures. If the offsets keyword was used, this will
		  be ignored.
		"""
        for l in ['bottom', 'top']:
            if offsets is not None:
                continue
            for j in range(len(starboxes[l])):
                a, b = starboxes[l][j]
                m, n = wide_stars[l][j]
                a -= 4
                b += 4
                m -= 2
                n += 2
                if a < 0:
                    a = 0
                if l == 'top':
                    m += YMID
                    n += YMID
                center[i, j] = offset.findoffset(scidatatmp[m:n],
                                                 yforw[l][a:b], m)

        del scitmp
        del scidatatmp

    if offsets is not None:
        center = scipy.asarray(offsets)
    else:
        center = stats.stats.nanmean(center, axis=1)
    center[scipy.isnan(center)] = 0.

    print "Normalizing Fluxes"
    cmax = center.max()
    fmax = flux.max()
    logfile.write('\nMask pixel and flux offsets\n')
    logfile.write('-----------------------------\n')
    logfile.write('Mask   Pixels   Flux\n')
    for i in range(center.size):
        center[i] -= cmax
        ratio = fmax / flux[i]
        scidata[i] *= ratio
        logfile.write('%4d   %6.2f   %4.2f\n' % (i, center[i], ratio))
    cmax = ceil(fabs(center.min()))
    logfile.write('\n')

    if disperser == "300/5000":
        scale = 1.41
        mswave = 5135.
    elif disperser == "400/3400":
        scale = 1.05
        mswave = 3990.
    elif disperser == "600/4000":
        scale = 0.63
        mswave = 4590.
    elif disperser == "1200/3400":
        scale = 0.24
        mswave = 3505.

    if dichroic == 'mirror':
        bluecutoff = 0.
        dich_file = ''
    elif dichroic == '460':
        bluecutoff = 4650.
        dich_file = '460'
    elif dichroic == '500':
        bluecutoff = 5100.
        dich_file = '500'
    elif dichroic == '560':
        bluecutoff = 5650.
        dich_file = '560'
    elif dichroic == '680':
        #		bluecutoff = 6800.
        bluecutoff = 5650.
        dich_file = '680'
    else:
        bluecutoff = 8000.
        dich_file = ''
    """
	We create 'wide' slits that describe the minimum and maximum y-position
	  of the slit in the *unstraightened* frame. We also determine the mask
	  resolution using every seventh slit.
	"""
    nsize = 0  # Size of straightened mask
    csize = 0  # Size of coadded mask
    wide_slits = {}
    linewidth = []
    print "Determining mask resolution"
    for l in ['bottom', 'top']:
        wide_slits[l] = []
        bmax = arc_ycor[l].shape[0]
        logfile.write('Slits for %s (%d total)\n' % (l, len(slits[l])))
        for i, j in slits[l]:
            logfile.write('[:,%d:%d]\n' % (i, j))
            csize += int(j - i + cmax) + 5
            nsize += j - i + 5
            mod = scipy.where((yback[l] > i) & (yback[l] < j))
            a = mod[0].min() - 4
            b = mod[0].max() + 4
            if a < 0:
                a = 0
            if b > bmax:
                b = bmax
            wide_slits[l].append([a, b])
            if len(wide_slits[l]) % 7 == 0:
                linewidth.append(
                    measure_width.measure(arc_ycor[l][(i + j) / 2, :], 15))
    csize -= 5
    nsize -= 5
    logfile.write("\n\n")
    logfile.close()

    linewidth = scipy.median(scipy.asarray(linewidth))
    """ We can temporarily delete the top CCD from memory """
    yforw = yforw['bottom']
    yback = yback['bottom']
    arc_ycor = arc_ycor['bottom']
    """
	Create the arclamp model by using only the blue lamps that were turned
	  on. Turning off the red lamps (Ne and Ar) reduces reflections on the
	  blue side, and it is difficult to use these lines for the wavelength
	  solution because 2nd order blue lines show up starting at ~5800.
	"""
    print "Loading wavelength model"
    lris_path = lris.__path__[0]
    lamps = lamps.split(',')
    wave = scipy.arange(2000., 8000., 0.1)
    filenames = []
    if lamps[0] == '1':
        filenames.append(lris_path + "/data/bluearcs/hg.dat")
    if lamps[3] == '1':
        filenames.append(lris_path + "/data/bluearcs/cd.dat")
    if lamps[4] == '1':
        filenames.append(lris_path + "/data/bluearcs/zn.dat")
    fluxlimit = None
    if filter == 'SP580' and bluecutoff > 5650:
        cutoff = 5650.
    else:
        fluxlimit = 150.
        cutoff = bluecutoff
    linefile = out_prefix + "_lines.dat"
    make_linelist(filenames, cutoff, fluxlimit, linefile)
    """
	The relative amplitudes of the lines in the hg, cd, and zn.dat files
	  are more appropriate for the bluer grisms. A separate linelist is
	  used for the 300 grism and assumes all three lamps were on. This is
	  one of the problems with (1) not knowing the throughput for each
	  setup and (2) not having stable lamps.
	"""
    if disperser == "300/5000":
        filename = lris_path + "/data/bluearcs/300_lines.dat"
        arc, lines = make_arc(filename, linewidth * scale, wave)
    else:
        arc, lines = make_arc(linefile, linewidth * scale, wave)
    finemodel = interpolate.splrep(wave, arc, s=0)
    smooth = ndimage.gaussian_filter1d(arc, 9. / 0.1)
    widemodel = interpolate.splrep(wave, smooth, s=0)
    linemodel = interpolate.splrep(wave, lines, s=0)

    filename = lris_path + "/data/uves_sky.model"
    infile = open(filename, "r")
    wavecalmodel = pickle.load(infile)
    infile.close()
    wave = scipy.arange(3400., 10400., 0.1)
    """ We attempt to model the dichroic cutoff for the sky mode. """
    if dichroic == '680' and disperser == "300/5000":
        filename = lris_path + "/data/dichroics/dichroic_680_t.dat"
        infile = open(filename, "r")
        input = sio.read_array(infile)
        infile.close()
        input[:, 1] = 1. - input[:, 1]
        spline = interpolate.splrep(input[:, 0], input[:, 1], s=0)
        dich = interpolate.splev(wave, spline)
        dich[wave < 4500.] = 1.
        dich[wave > 8800.] = 1.
        filename = lris_path + "/data/grisms/grism_300.dat"
        infile = open(filename, "r")
        input = sio.read_array(infile)
        infile.close()
        spline = interpolate.splrep(input[:, 0], input[:, 1], s=0)
        eff = interpolate.splev(wave, spline)
        eff[wave < 5100.] = 1.
        eff[wave > 7200.] = 1.
        dich *= eff
        del input, spline
    else:
        dich = scipy.ones(wave.size)
    wave = scipy.arange(3400., 10400., 0.1)
    wavemodel = interpolate.splev(wave, wavecalmodel)
    goodmodel = ndimage.gaussian_filter1d(wavemodel, linewidth * scale / 0.12)
    goodmodel *= dich
    goodmodel = interpolate.splrep(wave, goodmodel, s=0)

    extra = [linefile, cutoff, 3000]
    extractwidth = 15

    del arc, wave, smooth
    """
	Use the skyarcmatch routine if the 300grism is employed, otherwise
	  just match the arclines.
	"""
    if dichroic == '680' and disperser == "300/5000":
        from lris.lris_blue.skyarcmatch import arcmatch as wavematch
        extra2 = [linefile, 6850, 3500]
    else:
        from lris.lris_blue.arcmatch import arcmatch as wavematch
        extra2 = extra
    """
	This could be improved by making the arrays the (pre-determined) size
	  stipulated by the red and blue cutoffs.
	"""
    print "Creating output arrays"
    outlength = int(axis2 * 1.6)
    out = scipy.zeros((nsci, nsize, outlength), scipy.float32) * scipy.nan
    out2 = scipy.zeros((2, csize, outlength), scipy.float32) * scipy.nan
    """
        For systems with limited RAM, it might make sense to cache the output
          arrays to disk. This increases the time it takes to run but may be
          necessary and also allows the progress of the reduction to be
          monitored.
        """
    if cache:
        import os
        print "Caching..."
        strtfile = out_prefix + "_TMPSTRT.fits"
        bgfile = out_prefix + "_TMPBSUB.fits"
        try:
            os.remove(strtfile)
        except:
            pass

        outfile = pyfits.PrimaryHDU(out)
        outfile.header.update('CTYPE1', 'LINEAR')
        outfile.header.update('CRPIX1', 1)
        outfile.header.update('CRVAL1', mswave - (0.5 * out2.shape[2]) * scale)
        outfile.header.update('CD1_1', scale)
        outfile.header.update('CTYPE2', 'LINEAR')
        outfile.header.update('CRPIX2', 1)
        outfile.header.update('CRVAL2', 1)
        outfile.header.update('CD2_2', 1)
        if nsci > 1:
            outfile.header.update('CTYPE3', 'LINEAR')
            outfile.header.update('CRPIX3', 1)
            outfile.header.update('CRVAL3', 1)
            outfile.header.update('CD3_3', 1)
        outfile.writeto(strtfile)
        del outfile, out

        try:
            os.remove(bgfile)
        except:
            pass

        outfile = pyfits.PrimaryHDU(out2)
        outfile.header.update('CTYPE1', 'LINEAR')
        outfile.header.update('CRPIX1', 1)
        outfile.header.update('CRVAL1', mswave - (0.5 * out2.shape[2]) * scale)
        outfile.header.update('CD1_1', scale)
        outfile.header.update('CTYPE2', 'LINEAR')
        outfile.header.update('CRPIX2', 1)
        outfile.header.update('CRVAL2', 1)
        outfile.header.update('CD2_2', 1)
        outfile.header.update('CTYPE3', 'LINEAR')
        outfile.header.update('CRPIX3', 1)
        outfile.header.update('CRVAL3', 1)
        outfile.header.update('CD3_3', 1)
        outfile.writeto(bgfile)
        del outfile, out2

    logfile = open(logfile.name, 'a')
    logfile.write('Beginning Wavelength Solution and Resampling\n')
    logfile.write('--------------------------------------------\n')
    logfile.close()
    """
        Loop through all of the slits, determining the wavelength solution and
          performing the background subtraction. It might be more robust to
          determine all wavelength solutions, then jointly determine a 'master'
          solution.... posc stores the current (starting) position of the
          coadded array, and posn stores the current position of the straight
          array. off is 0 while looping over the bottom and YMID for the top. n
	  is the number of bottom slits, so that the 1st top slit is n+1.
        """
    nbottom = len(slits['bottom'])
    nslits = nbottom + len(slits['top'])
    posc = 0
    posn = 0
    count = 1
    off = 0
    n = 0
    narrow = slits['bottom']
    wide = wide_slits['bottom']
    """ Debugging feature; set to 1 to skip background subtraction """
    lris.lris_blue.skysub.RESAMPLE = 0
    for k in range(nslits):
        """
		When we have finished all of the bottom slits, switch
		  parameters over to their top values.
		"""
        if k == nbottom:
            arc_ycor = get_arc(out_prefix)
            yforw = get_yforw(out_prefix)
            yback = get_yback(out_prefix)
            n = nbottom
            off = YMID
            narrow = slits['top']
            wide = wide_slits['top']
        i, j = narrow[k - n]
        a, b = wide[k - n]
        """ Debugging feature; change number to skip initial slits """
        if count < 1:
            count += 1
            continue

        print "Working on slit %d (%d to %d)" % (count, i + off, j + off)
        logfile = open(logfile.name, 'a')
        logfile.write("Working on slit %d (%d to %d)\n" %
                      (count, i + off, j + off))
        logfile.close()
        sky2x, sky2y, ccd2wave = wavematch(a, scidata[:, a + off:b + off],
                                           arc_ycor[i:j], yforw[i:j],
                                           widemodel, finemodel, goodmodel,
                                           linemodel, scale, mswave, extra,
                                           logfile)
        logfile = open(logfile.name, 'a')
        logfile.write("\n")
        logfile.close()
        strt, bgsub, varimg = doskysub(i, j - i, outlength,
                                       scidata[:, a + off:b + off], yback[a:b],
                                       sky2x, sky2y, ccd2wave, scale, mswave,
                                       center, extra2)
        """ Store the resampled 2d spectra """
        h = strt.shape[1]
        if cache:
            file = pyfits.open(strtfile, mode="update")
            out = file[0].data
        out[:, posn:posn + h] = strt.copy()
        if cache:
            file.close()
            del file, out
        posn += h + 5

        if lris.lris_blue.skysub.RESAMPLE == 1:
            count += 1
            continue
        """ Store the resampled, background subtracted 2d spectra """
        h = bgsub.shape[0]
        if cache:
            file = pyfits.open(bgfile, mode="update")
            out2 = file[0].data
        out2[0, posc:posc + h] = bgsub.copy()
        out2[1, posc:posc + h] = varimg.copy()
        if cache:
            file.close()
            del file, out2
        posc += h + 5
        """ Find and extract object traces """
        tmp = scipy.where(scipy.isnan(bgsub), 0., bgsub)
        filter = tmp.sum(axis=0)
        mod = scipy.where(filter != 0)
        start = mod[0][0]
        end = mod[0][-1] + 1
        del tmp
        slit = bgsub[:, start:end]
        spectra = extract(slit, varimg[:, start:end], extractwidth)
        num = 1
        crval = mswave - (0.5 * bgsub.shape[1] - start) * scale
        for spec in spectra:
            for item in spec:
                if item.size == 4:
                    hdu = pyfits.PrimaryHDU()
                    hdu.header.update('CENTER', item[2])
                    hdu.header.update('WIDTH', item[3])
                    hdulist = pyfits.HDUList([hdu])
                else:
                    thdu = pyfits.ImageHDU(item)
                    thdu.header.update('CRVAL1', crval)
                    thdu.header.update('CD1_1', scale)
                    thdu.header.update('CRPIX1', 1)
                    thdu.header.update('CRVAL2', 1)
                    thdu.header.update('CD2_2', 1)
                    thdu.header.update('CRPIX2', 1)
                    thdu.header.update('CTYPE1', 'LINEAR')
                    hdulist.append(thdu)
            outname = out_prefix + "_spec_%02d_%02d.fits" % (count, num)
            hdulist.writeto(outname)
            num += 1

        count += 1
    """ Output 2d spectra"""
    if cache:
        file = pyfits.open(bgfile)
        out2 = file[0].data.copy()
        del file
    tmp = out2[0].copy()
    tmp = scipy.where(scipy.isnan(tmp), 0, 1)
    mod = scipy.where(tmp.sum(axis=0) != 0)
    start = mod[0][0]
    end = mod[0][-1] + 1
    del tmp

    outname = out_prefix + "_bgsub.fits"
    outfile = pyfits.PrimaryHDU(out2[0, :, start:end])
    outfile.header.update('CTYPE1', 'LINEAR')
    outfile.header.update('CRPIX1', 1)
    outfile.header.update('CRVAL1',
                          mswave - (0.5 * out2.shape[2] - start) * scale)
    outfile.header.update('CD1_1', scale)
    outfile.header.update('CRPIX2', 1)
    outfile.header.update('CRVAL2', 1)
    outfile.header.update('CD2_2', 1)
    outfile.writeto(outname)
    hdr = outfile.header.copy()

    outname = out_prefix + "_var.fits"
    outfile = pyfits.PrimaryHDU(out2[1, :, start:end])
    outfile.header = hdr
    outfile.writeto(outname)
    del out2, hdr

    if cache:
        file = pyfits.open(strtfile)
        out = file[0].data.copy()
        del file
    outname = out_prefix + "_straight.fits"
    outfile = pyfits.PrimaryHDU(out[:, :, start:end])
    outfile.header.update('CTYPE1', 'LINEAR')
    outfile.header.update('CRPIX1', 1)
    outfile.header.update('CRVAL1',
                          mswave - (0.5 * out.shape[2] - start) * scale)
    outfile.header.update('CD1_1', scale)
    outfile.header.update('CRPIX2', 1)
    outfile.header.update('CRVAL2', 1)
    outfile.header.update('CD2_2', 1)
    if nsci > 1:
        outfile.header.update('CRPIX3', 1)
        outfile.header.update('CRVAL3', 1)
        outfile.header.update('CD3_3', 1)
    outfile.writeto(outname)

    del out, outfile
Esempio n. 38
0
parser.add_option("-l", "--bylab", action="store_true", dest="bylab", help="calculate score by label?")
parser.add_option("-b", "--blank", action="store_true", dest="blank", help="allow blank?")
parser.add_option("-x", "--softmax", action="store_true", dest="softmax", help="apply softmax?")
parser.add_option("-m", "--max", action="store_true", dest="max", help="show max only?")
options, args = parser.parse_args()

if len(args) != 3:
    parser.error("incorrect number of arguments")
print options
jaclistfile = args[0]
outactsfile = args[1]
outfile = args[2]
print "jacobian list file", jaclistfile
print "output activations file", outactsfile
print "output file", outfile
a = io.read_array(file(outactsfile), lines=[2,-1])
labels = file(outactsfile).readline().split()[1:]
T = shape(a)[1]
maxindices = []
for t in range(T):
    c = list(a[:,t])
    m = c.index(max(c))
    if not options.blank and labels[m] == 'blank':
        c = c[:-1]
        m = c.index(max(c))
    maxindices.append(m)
print maxindices
print len(labels)
print labels
if options.bylab:
    v = zeros((len(labels),T),'f')
Esempio n. 39
0
def load_data():
    """ Reads the Japanese Vowel Dataset from textfiles as in Jaegers paper
    and puts it into python lists / numpy arrays.
    """
    aetrain = io.read_array("./JaegerPaper/data/ae.train")
    aetest = io.read_array("./JaegerPaper/data/ae.test")

    # value for bias input
    bias = 0.07

    # aetrain and aetest contain the 12-dim time series, which have
    # different lengthes, concatenated vertically and separated by ones(1,12)
    # rows. We now sort them into lists, such that each element represents
    # one time series
    trainInputs = []
    readindex = 0
    for c in range(270):
        l = 0
        while aetrain[readindex, 0] != 1.0:
            l += 1
            readindex += 1
        readindex += 1
        trainInputs.append(aetrain[readindex - l - 1:readindex - 1, :])
        # add bias input and input which indicates the length
        trainInputs[c] = np.c_[trainInputs[c], bias * np.ones((l, 1)),
                               (l / 30.) * np.ones((l, 1))]

    # now the same with the test inputs
    testInputs = []
    readindex = 0
    for c in range(370):
        l = 0
        while aetest[readindex, 0] != 1.0:
            l += 1
            readindex += 1
        readindex += 1
        testInputs.append(aetest[readindex - l - 1:readindex - 1, :])
        # add bias input and input which indicates the length
        testInputs[c] = np.c_[testInputs[c], bias * np.ones((l, 1)),
                              (l / 30.) * np.ones((l, 1))]

    # produce teacher signals. For each input time series of size N x 12 this
    # is a time series of size N x 9, all zeros except in the column indicating
    # the speaker, where it is 1.
    trainOutputs = []
    for c in range(270):
        l = trainInputs[c].shape[0]
        teacher = np.zeros((l, 9))
        speakerIndex = int(np.ceil((c + 1) / 30.) - 1)
        teacher[:, speakerIndex] = np.ones(l)
        trainOutputs.append(teacher)

    # produce test output signal
    testOutputs = []
    speakerIndex = 0
    blockCounter = 0
    blockLengthes = [31, 35, 88, 44, 29, 24, 40, 50, 29]
    for c in range(370):
        if blockCounter == blockLengthes[speakerIndex]:
            speakerIndex += 1
            blockCounter = 0
        blockCounter += 1
        l = testInputs[c].shape[0]
        teacher = np.zeros((l, 9))
        teacher[:, speakerIndex] = np.ones(l)
        testOutputs.append(teacher)

    return trainInputs, trainOutputs, testInputs, testOutputs
Esempio n. 40
0
#!/usr/bin/env python

from scipy import io
from scipy import stats
from math import sqrt
from decimal import Decimal

file = raw_input('>Give name of bootstrap results file:  ')

data = io.read_array(file)
chisq = data[:, 0]
q = data[:, 1]
dphi = data[:, 2]
rd = data[:, 3]
rwd = data[:, 4]
ulimb = data[:, 5]
bsScale = data[:, 6]
bsAz = data[:, 7]
bsFis = data[:, 8]
dExp = data[:, 9]
incl = data[:, 10]
phi0 = data[:, 11]


def printParam(par, parString):
    mean = stats.mean(par)
    sd = sqrt(stats.var(par))
    print parString, ' = ', mean, ' +/- ', sd


printParam(q, 'Q')