def assertStdout(inputString, result):
	inputStream = Stream(inputString)
	environment = createStandardEnvironment()
	outputStream = Stream()
	environment["*standard-output*"] = outputStream
	nextExpression = read(inputStream)
	while nextExpression:
		evaluate(environment, nextExpression)
		nextExpression = read(inputStream)
	assert(environment["*standard-output*"].read() == result)
def runCode(code):
	inputStream = Stream(code)
	environment = createStandardEnvironment()
	outputStream = Stream()
	environment["*standard-output*"] = outputStream
	nextExpression = read(inputStream)
	while nextExpression:
		lastReturnValue = evaluate(environment, nextExpression)
		nextExpression = read(inputStream)
	stdout = environment["*standard-output*"].read()
	return lastReturnValue, stdout
Example #3
0
def exchange_X_Y(filename):

  #input_file = open(filename,'r')

  #input_lines = input_file.readlines()

  basename=os.path.splitext(filename)[0]
  ext = os.path.splitext(filename)[1]
  savefile = "%s_exchanged_X_Y%s" % (basename,ext)
  print "savefile is",savefile

  output_file = open(savefile,'w')

  #nlines = len(input_lines)

  X=[]
  Y=[]
  (X,Y)=read.read(filename)

  index = 0

  for x in X:
    y = Y[index]
    output_file.write('%e %e\n' % (y,x))
    index = index + 1
Example #4
0
def test_classification():
    from read import read
    import numpy, tfidf
    from sklearn.decomposition import TruncatedSVD
    from sklearn.pipeline import make_pipeline
    from sklearn.preprocessing import Normalizer

    m, files = read("training.json")
    y_map = [str(file["topic"]) for file in files]
    map = []
    for i in range(len(y_map)):
        if(len(map) == 0 or not map.__contains__(y_map[i])):
            map.append(y_map[i])
    y = numpy.array([map.index(y_map[i]) for i in range(len(y_map))])

    print("Construindo TF-IDF...")
    X, vectorizer = tfidf.vectorizeTFIDF(files)
    print X.shape

    print("Performing dimensionality reduction using LDA...")

    lda = LDA(n_components=9)
    X = X.toarray()
    lda.fit(X, y)
    X = lda.transform(X)

    mlp = MLPClassifier()
    mlp.fit(X, y)
    training_score = mlp.score(X, y)
    print("training accuracy: %f" % training_score)
Example #5
0
def download(url,local):
    r = read(url)
    if r != '':
        with open(local,'w') as f:
            f.write(r)
        return True
    else: return False
Example #6
0
def collect(device_path):
    rrds = listrrd(device_path)

    if len(rrds) == 0:
        return

    hostname = device_path.split('/')[-1]
    ts = get_timestamp()
    
    xml = '<ServerMeasurement xmlns="http://mms/cwo/types/1.2.0" origin="generator.py" site="Blunck WAN" state="UP" hostname="%s" timestamp="%s">' % (hostname, ts)

    hit = False

    for rrd in rrds:
        millis = INTERVAL * 1000
        name = os.path.basename(rrd)
        value = read.read(rrd)
        if value is None:
            continue
        
        numeric = '<NumericMeasurement intervalInMilliseconds="%d" measurementName="%s" measurementType="RATE" units="unknown" value="%f"/>' % (millis, name, value)
        xml += numeric

        hit = True

    mvnm = collect_interfaces(device_path)
    if mvnm is not None:
        xml += mvnm

    xml += "</ServerMeasurement>"

    if hit:
        return xml
    else:
        return None
Example #7
0
def collect_interfaces(device_path):
    root = device_path + "/os/interfaces"
    interfaces = listdir(root)
    if len(interfaces) == 0:
        return None

    interface = os.path.basename(interfaces[0])
    rrds = listrrd(root + "/" + interface)

    xml = ''

    for rrd in rrds:
        millis = INTERVAL * 1000
        name = os.path.basename(rrd)

        mvnm = '<MultiValueNumericMeasurement intervalInMilliseconds="%d" measurementName="%s" measurementType="COUNTER" units="unknown">' % (millis, name)

        numerics = "<numericValues>"
        for interface in interfaces:
            interface = os.path.basename(interface)
            try:
                filename = root + "/" + interface + "/" + os.path.basename(rrd)
                value = read.read(filename)
                
                numeric = '<numericValue key="%s">%f</numericValue>' % (interface, value)
                numerics += numeric
            except:
                pass
        numerics += "</numericValues>"

        mvnm += numerics + "</MultiValueNumericMeasurement>"
        
        xml += mvnm

    return xml
Example #8
0
 def getCaptcha(self):
     re = self.s.get("http://202.193.80.58:81/academic/getCaptcha.do")
     image_bytes = re.content
     data_stream = io.BytesIO(image_bytes)
     captcha = Image.open(data_stream)
     strcat = read(captcha)    #使用空间向量对比识别验证码
     # print(strcat)
     return strcat
Example #9
0
def get_reads(samtools_comm, rev_comp):
    """
    """
    read_strs = check_output(samtools_comm, shell=True).split('\n')
    read_strs.remove("")
    reads = [read(y) for y in read_strs]
    [t_read.set_rev_comp(rev_comp) for t_read in reads]

    return reads 
Example #10
0
def read_csf():
    """
    Read in CSF results from each file and concatenate them into a
    single data frame
    """
    data = []
    for csf_file in CSF_FILES:
        data.append(read(BASE_DIR+csf_file))

    return pd.concat(data, ignore_index=True)
Example #11
0
def read_(id):
    data = read.read(id, addr).split("\n")
    id = data[2]
    time = data[3]
    title = data[4]
    from_ = data[5]
    message = ' '.join(data[7:])
    num = check()
    to_ = title.split()[1]
    title_ = from_.split()[1]
    return render_template("read.html", title_=title_, to_=to_, num=str(len(num)), id=id, time=time, from_=from_, message=message, title=title, addr=addr)
Example #12
0
    def do_read(self, line):
        line = line.split()
        id = line[0]
        data = read.read(id, self.username, self.password)
        data = json.loads(data)
        print """
        
        ID: {1}
        From: {0}

{2}

        """.format(data['from'], id, data['message'])
	def nextEntry(self):
		#name = self.entry.get()
		#info = self.entry2.get()
		name, title, org, keyword = read.read('reviewer/'+fileList[self.cnt].strip())
		wordList = str()
		for word in keyword:
			wordList += " " + word
		self.entry.delete(0,len(self.entry.get()))
		self.entry2.delete(0,len(self.entry2.get()))
		self.entry3.delete(0,len(self.entry3.get()))
		self.entry4.delete(0,len(self.entry4.get()))
		self.entry.insert(0,name)
		self.entry2.insert(0,org)
		self.entry3.insert(0,title)
		self.entry4.insert(0,wordList.strip())
		self.cnt = self.cnt + 1
Example #14
0
def shebang(path):
    """return file/string shebang"""
    if not path:
        return
    path = str(path)
    if not os.path.exists(path):
        return
    if isbinaryfile(path):
        return
    content = read(path)
    lines = content.splitlines()
    if lines:
        l = lines[0]  # first line
        if isshebang(l):
            l = l.replace("#!", "", 1)
            return l.lstrip().rstrip()
Example #15
0
File: main.py Project: bilguun/alpr
def main(i):
	from read import read
	from preproc import preproc
	from blob_identification import blob_identification
	from recognition import recognition
	from os import popen,mkdir,environ
	
	path = environ.get("HOME")
	popen("rm -rf blobs")
	mkdir("blobs")
	name=path+"/alpr/latest/images/"+str(i)+".jpg"	
	print name
	image=read(name)
	binary_image=preproc(image)
	blob_identification(binary_image)
	number = recognition()
	return number
Example #16
0
"""Read and clean the UCSF Free-surfer data"""

import pandas as pd
from read import read
from patient_info import clean_visits
import numpy as np
import matplotlib.pyplot as plt
from patient_info import get_dx, get_baseline_classes, get_dx_with_time
from read_clinical import MMSE, CDR

BASE_DIR = '/phobos/alzheimers/adni/'

FDG_FILE = BASE_DIR + 'UCBERKELEYFDG_03_13_14.csv'
AV_FILE = BASE_DIR + 'UCBERKELEYAV45_07_30_14.csv'

FDG = read(FDG_FILE)
AV = read(AV_FILE)
FDG['ROI'] = FDG['ROINAME'] + '_' + FDG['ROILAT']

if 'VISCODE2' in FDG.columns:
    FDG = clean_visits(FDG)
else:
    FDG['VISCODE2'] = FDG['VISCODE']

if 'VISCODE2' in AV.columns:
    AV = clean_visits(AV)
else:
    AV['VISCODE2'] = AV['VISCODE']

def flatten_pet():
    """
Example #17
0
import matplotlib.pyplot as plt
import numpy as np
from mfcc import mfcc 
from read import read
from stft import stft

fname = "sineSweep.wav"
(srate, data) = read(fname, "mono")
N = 1024
X= stft(data, N)
X = np.abs(X)
X = X[:N/2+1]
X = mfcc(X, 44100)
#mag to dec conversion
#X = 10 * np.log10(X)
plt.imshow(X[1:], interpolation='nearest', aspect='auto', origin='lower')
plt.show()
Example #18
0
 # read in data in UTC
 if bys[buoy]['inmysql']:  # mysql tables
     if table == 'sum':
         # need to have this choose most recent data available
         # choose to look for ven since sum mostly shows ven data
         dend = tools.query_setup_recent(engine, buoy, 'ven').tz_localize('utc')
     else:
         dend = tools.query_setup_recent(engine, buoy, table).tz_localize('utc')
 else:
     dend = pd.Timestamp('now', tz='utc')
 # start 5 days earlier from 00:00 on day of last data, and account for time zones
 # so that enough data is read in for time zone conversion
 tzoffset = (dend.tz_localize(None) - dend.tz_convert(tz).tz_localize(None)).seconds/3600.
 dstart = (dend - pd.Timedelta('5 days')).normalize() + pd.Timedelta(str(tzoffset) + ' hours')
 dend += pd.Timedelta(str(tzoffset) + ' hours')
 df = read.read(buoy, dstart, dend, table=table, usemodel=False,
                userecent=True, tz=tz)
 if len(buoy) == 1:
     fname = path.join('..', 'daily', 'tabs_' + buoy + '_' + table)
 else:
     fname = path.join('..', 'daily', buoy)
 # write daily data file, for whatever most recent time period
 # data was available
 if df is not None:
     tools.write_file(df, fname)
 # if there are too few rows to plot, set as None
 if df is not None and len(df) < 2:
     df = None
 # no model output for stations in bays or outside domain
 now = pd.Timestamp('now', tz='utc').normalize()
 past = now - pd.Timedelta('5 days')
 future = now + pd.Timedelta('4 days')
Example #19
0
 def do_read(self, id):
     addr = db.data.find("data", "all")[0]['addr']
     print read.read(id, addr)
Example #20
0
 def do_read(self, id):
     addr = db.data.find("data", "all")[0]["addr"]
     print read.read(id, addr).decode("utf-8")
     self.lastcmd = ""
Example #21
0
if 'tabs_' in fname:  # only need table name for tabs
    table = fname.split('/')[-1].split('_')[2]
    buoy = fname.split('/')[-1].split('_')[1]
else:
    buoy = fname.split('/')[-1].split('_')[0]
    table = bys[buoy]['table1']

# force the use of metric units if making a plot since both units shown anyway
if datatype == 'pic':
    units = 'M'

## Read in data ##
# from daily file, only for showing table since images created in run_daily.py
if dstart is None:

    df = read.read(fname, dstart=None, dend=None, table=table, units=units, tz=tz, datum=datum)
    dfmodelhindcast = None
    dfmodelrecent = None
    dfmodelforecast = None

# Call to database if needed
else:
    ## Read data ##
    if not modelonly:
        df = read.read(buoy, dstart, dend, table=table, units=units, tz=tz, datum=datum)
        if df is not None:  # won't work if data isn't available in this time period
            tools.write_file(df, fname)

    ## Read model ##
    # To use NOAA-provided model predictions
    if usemodel and bys[buoy]['table1'] == 'ports' and buoy != 'cc0101':
Example #22
0
from read import read
from simulate import simulate


init_dict = read('model.prefpy.ini')

simulate(init_dict)
Example #23
0
"""Read in data from clinical tests"""

from read import read

BASE_DIR = '/phobos/alzheimers/adni/'

MMSE_FILE = BASE_DIR + 'MMSE.csv'
CDR_FILE = BASE_DIR + 'CDR.csv'

MMSE = read(MMSE_FILE)
CDR = read(CDR_FILE)

MMSE.loc[MMSE['VISCODE2'] == 'sc', 'VISCODE2'] = 'bl'
CDR.loc[CDR['VISCODE2'] == 'sc', 'VISCODE2'] = 'bl'
# coding=utf-8
import read
import geturl

fr = open('fileList.txt','r')
fileList = fr.readlines()
fr.close()

fw = open('ans.txt','a')

cnt = 24
n = len(fileList)

while cnt <= n:
	name,title,org,keyword = read.read('reviewer/'+fileList[cnt-1].strip())
	url = geturl.search(name,org,title,keyword)
	fw.write('%s %s %s %s\n' % (name.encode('utf-8'),title.encode('utf-8'),org.encode('utf-8'),url))
	print cnt
	cnt += 1

fw.close()
Example #25
0
BASE_DIR = '/phobos/alzheimers/adni/'

# diagnostic summary data
DXSUM_FILE = BASE_DIR + 'DXSUM_PDXCONV_ADNIALL.csv'

# data dictionary for all ADNI data
DATADIC_FILE = BASE_DIR + 'DATADIC.csv'

# data dictionary for the ARM assignments
ARM_FILE = BASE_DIR + 'ARM.csv'

# data file for the Registries
REG_FILE = BASE_DIR + 'REGISTRY.csv'

DXSUM = read(DXSUM_FILE)
DICT = read(DATADIC_FILE)
ARM = read(ARM_FILE)
REG = read(REG_FILE)

"""
1: Normal
2: Serious Memory Complaints (SMC)
3: Early MCI
4: Late MCI
5: Alzheimer's Disease
"""
NORMAL = 1
SMC = 2
EMCI = 3
LMCI = 4
def createExpressionFromString(string):
	inputStream = Stream(string)
	return read(inputStream)
Example #27
0
def custom_list():
	from linked_list import Link
	li = []
	with open("ips.txt", 'r') as f:
		for i in f:
			word = i.strip()
			index= word.find(':')
			l = Link(word[0:index], word[index+1:])
			li.append(l)
	# print(li)
	return li

if __name__=="__main__":
	subprocess.call("bash get_latest.sh", shell=True)
	ip4list = read.read("latest.json")
	# ip4list = custom_list()
	print("starting to create all nodes")
	unique_id = 10000  # start here to ensure len() = 5
	try:
		for link in ip4list:
			# a_thread(link=link)  # use only this line for single-threaded
			th = threading.Thread(target=a_thread, kwargs={'link':link})
			th.name = unique_id
			th.start()
			unique_id += 1
		
		print("waiting for all threads to complete")
		master_thread = threading.current_thread().ident
		for i in threading.enumerate():
			if master_thread != i.ident:
Example #28
0
from read import read
import sys



if __name__ == "__main__":
    reads = []

    with open(sys.argv[1], "r") as read_f:
        for line in read_f:
            reads.append(read(line))                               
   
    pos = 8878
    bases = {}
    for read in reads:
        base = read.get_base_at_pos(pos)



        bases[base] = bases.get(base,0)
        bases[base] += 1

        print read.get_base_at_pos(pos),"  " , read.get_base_qual_at_pos(pos), "   ", read.MAPQ

    print bases    
    
Example #29
0
path = '.'
#files = os.listdir(path)
px_dict = pixel_dict()
#np.seterr(divide='ignore', invalid='ignore')

#for ifile in files:
for ifile in sys.argv[1:]:
    print('Processing {}...'.format(ifile))
    if ifile.endswith(".tif"):

        print('getting pixel sizes')
        [name, ext] = os.path.splitext(ifile)
        px = px_dict[name]

        print('converting to array')
        intensity_array = read(ifile)

        print('binarizing')
        intensity_array = (intensity_array < 2000)

        print('getting void labels')
        lbl, num = label(intensity_array, np.ones((3,3,3)))

        print("finding part com")
        part_com = np.mean(np.argwhere(lbl==0), axis=0)

        print('find blobs')
        #blobs = [(i, np.argwhere(lbl==i)) for i in range(2,num+1)]
        try:
            start = time.time()
            indices = np.argwhere(lbl > 1)
Example #30
0
import pandas as pd
from read import read
from patient_info import clean_visits

BASE_DIR = '/phobos/alzheimers/adni/'

# data from ADNIGO/ADNI2
DICTIONARY_51_FILE = BASE_DIR + 'UCSFFSX51_DICT_08_01_14.csv'
DATA_51_FILE = BASE_DIR + 'UCSFFSX51_08_01_14.csv'

# data from ADNI1
DICTIONARY_FILE = BASE_DIR + 'UCSFFSX_DICT_08_01_14.csv'
DATA_FILE = BASE_DIR + 'UCSFFSX_08_01_14.csv'

FSX_51 = read(DATA_51_FILE)
FSX = read(DATA_FILE)

if 'VISCODE2' in FSX.columns:
    FSX = clean_visits(FSX)
else:
    FSX['VISCODE2'] = FSX['VISCODE']

if 'VISCODE2' in FSX_51.columns:
    FSX_51 = clean_visits(FSX_51)
else:
    FSX_51['VISCODE2'] = FSX_51['VISCODE']

def find_unique(src, target):
    """
    Keyword Arguments: