Пример #1
0
def Listen():
    try:
        signal.signal(signal.SIGALRM, handler)
        signal.alarm(100)
        flag = checkConn()
        if flag == True:
            getData()
        signal.alarm(0)
    except AssertionError:
        exit(0)
Пример #2
0
def Listen():
    try:
        signal.signal(signal.SIGALRM, handler)
        signal.alarm(100)
        flag = checkConn();
        if flag == True:
            getData()
        signal.alarm(0)
    except AssertionError:
        exit(0)
Пример #3
0
def linearRegreSin(url,degree):
    [a,b] = getData(url)
    trainA = a[0:139]
    trainB = b[0:139]
    testA = a[140:]
    testB = b[140:]

    poly = PolynomialFeatures(degree)
    trainA = np.float64(poly.fit_transform(trainA))
    testA = np.float64(poly.fit_transform(testA))
    theta = np.dot(np.dot(np.linalg.inv(np.dot(trainA.T,trainA)),trainA.T),trainB)
    plt.figure(1)
    plt.xlabel('x')
    plt.ylabel('y')
    plt.title('data')
    plt.plot(trainA[:,1],trainB,"r*")
    y=np.dot(trainA, theta)
    print(pow(sum((y-trainB)**2),1/2)/140) #print MSE

    y=np.dot(testA, theta)
    #plt.plot(testA[:,1], testB, "r.")
    plt.plot(testA[:,1],y,"k*")
    print(pow(sum((y-testB)**2),1/2)/60) #print MSE
    plt.show()
    print(theta)
Пример #4
0
    def main():

        # Get data
        training_set, test_set = getData()

        # predict
        prediction = modeling(trainx, trainy, testx, testy).predict(testx)

        # Transform
        prediction_i = scaler.inverse_transform(prediction.reshape(-1, 1))
        test_y_inv = scaler.inverse_transform(testy.reshape(-1, 1))
        prediction2_i = np.array(prediction_i[:, 0][1:])
        test2_y_inv = np.array(test_y_inv[:, 0])

        # Train
        _, predictions = flow(data,
                              split,
                              modeling,
                              rmse,
                              n_train=600,
                              n_test=60)

        # Get dif
        mean, _ = validate(data, split, modeling, rmse, flow)

        # Show chart
        getChart(test_Dates, test2_y_inv, predictions - mean)
Пример #5
0
def bestMatch(data):  #finds other jobs for which your personality is a match

    jobnames = data.titles
    currAvg = data.avg
    totalComp = []
    betterJobs = []
    for i in range(len(data.textJobnames)):
        comparisons = 0
        jobname = jobnames[data.z + i]
        # print(jobname)
        txt = findJob(jobname)
        callPersonalityInsights(txt, jobname)
        a, b, c = getData(jobname + ".json")
        things = [a[0], a[1], b]
        for ele in range(len(things)):
            #print(candidateData[ele])
            match, res = Comparing(data.candidatedata[ele], things[ele])
            comparisons += res
        # print(ele)
        totalAvg = comparisons / len(things)
        totalComp += [(jobname, totalAvg)]
    for element in totalComp:
        name, avg = element
        if currAvg < avg:
            betterJobs += [element]
    data.bestJobs = betterJobs
Пример #6
0
def train(f_hr=8, b_hr=3, thr=40, method="HCR"):
    p = DATA_PATH

    # Set logger
    logger = generateLogger(p + "log.log")
    log(
        "--------------------------------------------------------------------------",
        logger)
    log(
        "---------------------------------  Train  --------------------------------",
        logger)

    # Get data
    end_dt = datetime.now() - timedelta(hours=24)
    start_dt = end_dt - timedelta(hours=8000)
    log("Get data from " + str(start_dt) + " to " + str(end_dt), logger)
    df_esdr_array_raw, df_smell_raw = getData(start_dt=start_dt,
                                              end_dt=end_dt,
                                              logger=logger)
    df_esdr, df_smell = preprocessData(df_esdr_array_raw=df_esdr_array_raw,
                                       df_smell_raw=df_smell_raw,
                                       logger=logger)

    # Compute features
    df_X, df_Y, df_C = computeFeatures(df_esdr=df_esdr,
                                       df_smell=df_smell,
                                       f_hr=f_hr,
                                       b_hr=b_hr,
                                       thr=thr,
                                       is_regr=False,
                                       add_inter=False,
                                       add_roll=False,
                                       add_diff=False,
                                       logger=logger,
                                       out_p_mean=p + "mean.csv",
                                       out_p_std=p + "std.csv")

    # Select features
    # NOTE: currently, the best model uses all the features
    #df_X, df_Y = selectFeatures(df_X, df_Y, logger=logger, out_p=p+"feat_selected.csv")

    # Train, save, and evaluate model
    model = trainModel({
        "X": df_X,
        "Y": df_Y,
        "C": df_C
    },
                       method=method,
                       out_p=p + "model.pkl",
                       logger=logger)
    metric = computeMetric(df_Y, model.predict(df_X, df_C), False)
    for m in metric:
        log(metric[m], logger)
Пример #7
0
    def GET(self, name):
        data = ''
        i = web.input()
        user = i.get('user')
        if user == "explorer":
            latitude = i.get("latitude")
            longitude = i.get("longitude")
            data = getData(latitude, longitude)
        else:
            data = "Name Error"

        if data == None:
            data = "No Data"

        return data
def getProportions(location):
    flatten = lambda l: [item for sublist in l for item in sublist]
    fullData = getData(location)
    newData = []
    for x in range(len(fullData)):
        if x not in exclusions:
            newData.append(fullData[x])
    data = flatten(newData)
    proportions = {
        -1: data.count('-1'),
        0: data.count('0'),
        1: data.count('1')
    }
    proportions['total'] = proportions[-1] + proportions[0] + proportions[1]
    return proportions
Пример #9
0
    def GET(self,name):
        data = ''
        i = web.input()
        user = i.get('user')
        if user == "explorer":
            latitude = i.get("latitude")
            longitude = i.get("longitude")
            data = getData(latitude, longitude)
        else:
            data = "Name Error"

        if data == None:
            data = "No Data"

        return data
Пример #10
0
def get_data():
    data = ast.literal_eval(getData()["GPSData:"])
    text_data = " "
    for i in range(len(data)):
        if i % 2 == 1:
            text_data += data[i] + " "
        else:
            text_data += data[i]
    with open('current_log.txt', 'a') as fi:
        fi.write(text_data + "\n")
    with open('log.txt', 'a') as f:
        f.write(text_data + "\n")
    with open('take_screenshot.pickle', 'wb') as f:
        pickle.dump('True', f)
    return text_data
Пример #11
0
async def i(ctx, *args):
    errorColor=0xed4337
    mainColor=0xfde107
    if len(args) > 1:
        embed = embedFunc("Hata","Lütfen tek kelime giriniz.",errorColor)
    elif len(args) == 0:
        embed = embedFunc("Hata", "Lütfen kelime giriniz",errorColor)
    else:
        title, gif, description = getData(args[0])
        if title != "0":
            embed = embedFunc(title,description,mainColor)
            embed.set_image(url=gif)
            embed.set_footer(text="isaretce.com")
            
        else:
            title="Kelime Bulunamadı"
            embed = embedFunc(title,args[0],errorColor)
    await ctx.send(embed=embed)
def analyzeStimulus(location):
    stimulusData = getData(location)

    def analyzeNeurons(data, neuronNum1, neuronNum2, delay):
        neuron1 = data[neuronNum1]
        neuron2 = data[neuronNum2]

        if delay > 0:
            neuron1 = neuron1[delay:]
            neuron2 = neuron2[:-delay]
        elif delay < 0:
            neuron1 = neuron1[:delay]
            neuron2 = neuron2[-delay:]

        neuron1 = map(float, neuron1)
        neuron2 = map(float, neuron2)

        correlationVal = scipy.stats.pearsonr(neuron1, neuron2)[0]
        tempCorVals = 0
        tempNeuron2 = neuron2[:]
        lessThanCounter = 0.0

        return [neuronNum1 + 1, neuronNum2 + 1, delay, correlationVal]

    analyzedNeurons = []
    for combo in neuronCombinations:
        for delay in range(-3, 4):
            analyzedNeurons.append(
                analyzeNeurons(stimulusData, combo[0], combo[1], delay))

    for x in range(len(analyzedNeurons)):
        if math.isnan(analyzedNeurons[x][-1]):
            analyzedNeurons[x][-1] = 0

    analyzedNeuronsSorted = sorted(analyzedNeurons, key=lambda cor: cor[3])
    highPositiveCorrelations = analyzedNeuronsSorted[len(analyzedNeuronsSorted
                                                         ) * 95 / 100:]
    highNegativeCorrelations = analyzedNeuronsSorted[
        0:len(analyzedNeuronsSorted) * 5 / 100]

    return highPositiveCorrelations, highNegativeCorrelations
    def __init__(self, params):
        """
			Constructor method of the class which initializes variables and calls function to read image names and labels 
		"""
        self._data_file_path = params['data_file_path']
        self._num_of_artists = params['num_of_artists']
        self._num_of_images_per_artist = params['num_of_images_per_artist']
        self._saveData_to_file = params['saveData_to_file']
        self._read_prefetched_Data = params['read_prefetched_Data']
        self._batch_size = params['batch_size']
        self._data_dir = params['data_dir']

        self._height = 256
        self._width = 256

        self._left_image = {}
        self._right_image = {}
        self._labels = {}
        self._params = {}
        self._trainFlag = True

        # Object for the getData class which reads filenames and labels from csv/text file. Defined in the getData.py
        _csv_obj = getData()

        # Fetches the training data either from csv or text file.
        if not self._read_prefetched_Data:
            print('Loading file names and labels from csv.')
            self.data, self.label = _csv_obj.read_csv(
                self._data_file_path, self._data_dir, self._num_of_artists,
                self._num_of_images_per_artist, self._saveData_to_file)
        else:
            print('Loading existing file names and labels from text file.')
            self.data, self.label = _csv_obj.read_from_file(
                self._data_file_path)

        # Splits the training data into training and validation sets (80-20)
        self.train_data = self.data[:int(len(self.data) * 0.8)]
        self.train_labels = self.label[:int(len(self.label) * 0.8)]

        self.valid_data = self.data[int(len(self.data) * 0.8):]
        self.valid_labels = self.label[int(len(self.label) * 0.8):]
Пример #14
0
	matEps=[lamb,eps.real,eps.imag]
	return matEps




		
import matplotlib.pyplot as plt



#Plotting Ag
#hag=getData("database/main/Ag/Hagemann.yml");
range=(0.280,0.790)
lambdasAg=np.linspace(range[0],range[1],100)
ag=getData("database/main/Ag/Johnson.yml",lambdasAg);
#print ag
lambdasTiO2=np.linspace(0.280,0.790)
#tio2=getData("database/main/TiO2/Devore-o.yml",lambdasTiO2);
tio2=getData("database/main/SiO2/Malitson.yml",lambdasTiO2);



fig=plt.figure()
plt.plot(lambdasTiO2*1e3,[ x.real for x in tio2],'g-',label=r'Real($n_{SiO_{2}}$)')


plt.legend()
plt.title(r"Współczynnik załamania $SiO_2$ ($n$)");
plt.ylabel('')
plt.xlabel('wavelength [nm]')
Пример #15
0
# @Time    : 2020/9/3 11:23
# @Author  : Jianyuan Lu
# @FileName: Homework2.1.py
# @Software: PyCharm

import numpy as np
import math
import matplotlib.pyplot as plt
import scipy.io as spio
import scipy.sparse.linalg as ll
import sklearn.preprocessing as skpp
from getData import *

A = getData('../data/food-consumption.csv')

stdA = np.std(A, axis=0)  #沿行计算标准差
stdA = skpp.normalize(stdA.reshape(
    1, -1))  # 使得所有标准差的平方和等于1  the normalize is different from MATLAB's
stdA2 = np.std(A, axis=1)  #沿列计算标准差
stdA2 = skpp.normalize(stdA2.reshape(1, -1))

meanA = (np.mean(A, axis=0))
mean2A = (np.mean(A, axis=1))
temp = A - meanA
temp2 = (A.T - mean2A)

Anew = (A - meanA) @ np.diag(
    np.ones(stdA.shape[1]) / stdA[0])  #Anew每个元素除以对应的标准差
Anew2 = temp2 @ np.diag(np.ones(stdA2.shape[1]) / stdA2[0])  #Anew每个元素除以对应的标准差

Anew = Anew.T  #归一化,转置后的A
Пример #16
0
def drawResult(canvas, data):  #RESULT PAGE 1
    canvas.create_rectangle(0, 0, data.width, data.height, fill="slategray4")
    margin = 50
    canvas.create_text(data.width // 2,
                       margin,
                       text="RESULTS",
                       font="Times 20 bold")
    personality, needs, values = getData(
        "comparison.json")  #data.candidate +".json"
    first, second = personality[0], personality[1]
    data.comparisondata = []
    comparisonData = {}
    things = [first, second, needs]

    for dictionary in things:
        comphell = {}
        for key in dictionary:
            comphell[key] = dictionary[key]
            comparisonData[key] = dictionary[key]
        data.comparisondata += [comphell]
    personalitycomp, needscomp, valuescomp = getData("candidate.json")
    firstcomp, secondcomp = personalitycomp[0], personalitycomp[1]
    things1 = [firstcomp, secondcomp, needscomp]

    data.candidatedata = []
    candidateData = {}
    for dictionary1 in things1:
        candhell = {}
        for key1 in dictionary1:
            candhell[key1] = dictionary1[key1]
            candidateData[key1] = dictionary1[key1]
        data.candidatedata += [candhell]
    match, data.avg = Comparing(candidateData, comparisonData)
    canvas.create_text(data.width // 2,
                       3 * margin,
                       text="You are a %s match to the job" % data.avg,
                       font="Times 15")
    margin = 30
    border = 25
    width = 4 * margin
    centeringx1, centeringy = 1.7, 1.5
    canvas.create_rectangle(data.width - width,
                            data.height - 2 * margin,
                            data.width - border,
                            data.height - border,
                            fill="pink",
                            outline="white")
    canvas.create_text(data.width - width / centeringx1,
                       data.height - centeringy * margin,
                       text="DETAILS")
    data.result1bounds = (data.width - width, data.height - 2 * margin,
                          data.width - border, data.height - border)

    data.candidateList = []
    for dictionary1 in things1:
        candidatedata = {}
        for key1 in dictionary1:
            candidatedata[key1] = dictionary1[key1]
        data.candidateList += [[candidatedata]]
    canvas.create_text(
        data.width // 2,
        4 * margin,
        text=
        "To conside similar and better suited jobs press 'r', your match to the job is also displayed:"
    )
    i = 0
    if data.bestMatch:
        for ele in data.bestJobs:
            name, match = ele
            name = name[:name.index("cover") - 1]
            if match >= 100:
                canvas.create_text(data.width // 2, (8 + i) * margin,
                                   text=str(name) + " : " + str(match),
                                   fill="red",
                                   font="Times 15 ")
            else:
                canvas.create_text(data.width // 2, (8 + i) * margin,
                                   text=str(name) + " : " + str(match),
                                   fill="black",
                                   font="Times 15 ")
            i += 1
Пример #17
0
def predict(f_hr=8, b_hr=3, thr=40):
    p = DATA_PATH

    # Set logger
    logger = generateLogger(p + "log.log")
    log(
        "--------------------------------------------------------------------------",
        logger)
    log(
        "--------------------------------  Predict  -------------------------------",
        logger)

    # Get data for previous b_hr hours
    end_dt = datetime.now()
    start_dt = end_dt - timedelta(hours=b_hr + 1)
    log("Get data from " + str(start_dt) + " to " + str(end_dt), logger)
    df_esdr_array_raw, df_smell_raw = getData(start_dt=start_dt,
                                              end_dt=end_dt,
                                              logger=logger)
    df_esdr, df_smell = preprocessData(df_esdr_array_raw=df_esdr_array_raw,
                                       df_smell_raw=df_smell_raw,
                                       logger=logger)
    if len(df_esdr) < b_hr + 1:
        log("ERROR: Length of esdr is less than " + str(b_hr + 1) + " hours",
            logger)
        log("Length of esdr = " + str(len(df_esdr)), logger)
        return

    # Compute features
    df_X, _, df_C = computeFeatures(df_esdr=df_esdr,
                                    df_smell=df_smell,
                                    f_hr=f_hr,
                                    b_hr=b_hr,
                                    thr=thr,
                                    is_regr=False,
                                    add_inter=False,
                                    add_roll=False,
                                    add_diff=False,
                                    logger=logger,
                                    in_p_mean=p + "mean.csv",
                                    in_p_std=p + "std.csv")
    if len(df_X) != 1:
        log("ERROR: Length of X is not 1", logger)
        log("Length of X = " + str(len(df_X)), logger)
        return

    # Select features
    # NOTE: currently, the best model uses all the features
    #df_feat_selected = pd.read_csv(p+"feat_selected.csv")
    #df_X = df_X[df_feat_selected.columns]

    # Load model
    log("Load model...", logger)
    model = joblib.load(p + "model.pkl")

    # Predict result
    # For the hybrid crowd classifier
    # if pred==0, no event
    # if pred==1, event predicted by the base estimator
    # if pred==2, event detected by the crowd
    # if pred==3, event both predicted by the base estimator and detected by the crowd
    y_pred = model.predict(df_X, df_C)[0]
    log("Prediction for " + str(end_dt) + " is " + str(y_pred), logger)
    if y_pred == 1 or y_pred == 3: pushType1(end_dt, logger)
    if y_pred == 2 or y_pred == 3: pushType2(end_dt, logger)
Пример #18
0
#!/usr/bin/python
# -*- coding: utf-8 -*-

#
#Author: Pengfei Shi <*****@*****.**>
#

from getData import *
from usingNeo4j import *
from _ast import List
#import getData 

if __name__=="__main__":
    
    url1="http://www.sjtu.edu.cn/xbdh/yjdh/gk/jgsz/jgbc.htm"
    s=getData(url1)
    s.getPages()
    s.getContext()
    
    department=["党务部门","行政部门","直属单位","附属医院","附属学校","学院(系)","研究院"]
    
#    list=s.re(r'(?<=#8a9046">).*(?=</font>)')
    
 
    list0=s.re(r'(?<="c51084">).*(?=</a>)')
    list1=s.re(r'(?<="c51085">).*(?=</a>)')
    list2=s.re(r'(?<="c51273">).*(?=</a>)')
    list3=s.re(r'(?<="c51244">).*(?=</a>)')
    list4=s.re(r'(?<="c51245">).*(?=</a>)')
    
Пример #19
0
from sklearn import datasets

# Q1
# [x,y]=getData("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data")
# x = x[0:100,0]
# y = y[0:100]
# clf = LDA()
# clf.fit(x,y)
# LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
#   store_covariance=False, tol=0.0001)
# for i in range(100):
#     y_pre = clf.predict([x[i]])
#     print(y_pre)

# Q2
[x,y]=getData("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data")
x = x[0:100]
y = y[0:100]
sum = np.zeros((4,1))
for i in range(4):
    for j in range(50):
        sum[i] = sum[i] + x[j][i]
sum = sum/50 #calculate mean
std = np.std(x[0:50],axis=0)

x = x[50:100]
sum1 = np.zeros((4,1))
for i in range(4):
    for j in range(50):
        sum1[i] = sum1[i] + x[j][i]
sum1 = sum1/50 #calculate mean
def analyzeStimulus(location):
	stimulusData = getData(location)
	def analyzeNeurons(data,neuronNum1,neuronNum2,delay):
		neuron1=data[neuronNum1]
		neuron2=data[neuronNum2]

		if delay>0:
			neuron1=neuron1[delay:]
			neuron2=neuron2[:-delay]
		elif delay<0:
			neuron1=neuron1[:delay]
			neuron2=neuron2[-delay:]

		neuron1 = map(float, neuron1)
		neuron2 = map(float, neuron2)
		def findSimilarityCorrelation(neuron1,neuron2):
			sims=0.
			total=0.
			for x in range(len(neuron1)):
				if neuron1[x]==neuron2[x]:
					sims+=1.
				total+=1.
			return sims/total

		correlationVal=findSimilarityCorrelation(neuron1,neuron2)
		tempCorVals=0
		tempNeuron2=neuron2[:]
		lessThanCounter=0.0
		

		if math.isnan(correlationVal):
			percentile=correlationVal

			significance=0
		else:
			for x in range(number_of_shuffles):
				random.shuffle(tempNeuron2)
				tempCorVal=findSimilarityCorrelation(neuron1,tempNeuron2)
				
				if tempCorVal<=correlationVal:
					lessThanCounter+=1.0

			
			percentile=lessThanCounter/number_of_shuffles
			
			if percentile>.95:
				significance=1
			else:
				significance=0



		return [neuronNum1+1, neuronNum2+1, delay, correlationVal, percentile, significance]



	analyzedNeurons=[]
	analyzedNeurons.append(['neuron1','neuron2','delay','correlation coefficient','percentile','significance'])
	for combo in neuronCombinations:
		for delay in range(-3,4):
			analyzedNeurons.append(analyzeNeurons(stimulusData,combo[0],combo[1],delay))

	return analyzedNeurons
Пример #21
0
def linearRegreMul(url):
    [a,b] = getData(url)
    theta = np.dot(np.dot(np.linalg.inv(np.dot(a.T,a)),a.T),b)
    y=np.dot(a,theta)
    print(pow(sum((y-b)**2),1/2)) #print MSE
Пример #22
0
  rho = rho * dens
  return(rho)

radius=10
dist='40.240'
path="two_colloid_dist_" + str(dist)
filename="two_colloid_dist_" + str(dist) + "/ipbs_solution.vtu"
dist=float(dist)

xmin=-dist
xmax=0
ymin=0
ymax=dist/2.
resolution=.125

x,y,sol=getData(filename, xmin, xmax, ymin, ymax, resolution, resolution)

print x.shape, y.shape, sol.shape

phi = scipy.interpolate.RectBivariateSpline(x,y,sol, kx=3, ky=3)
print phi(2,3)
numpy.savetxt("sol.txt", sol.transpose())
#rho=scipy.interpolate.interp2d(x,y,dens(sol), kind='cubic')
#rho=scipy.interpolate.RectBivariateSpline(x,y,sol)
#x = numpy.linspace(xmin,xmax,100)# x coordinates
#y = numpy.linspace(ymin,ymax,100)# y coordinates
#z = solutiondata(x,y)
#print z
print phi(x,y).shape

Пример #23
0
from scipy import sparse
from scipy.sparse import find
from sklearn.cluster import KMeans
from matplotlib import pyplot as plt
from getData import *
from filterIsolatedDate import *
from getWebName import *
from calMisrate import *




n=1490
largest_K =101

edgeData = getData('../data/edges.txt')
p1 = edgeData[:, 0] - 1  # 矩阵是从0开始的
p2 = edgeData[:, 1] - 1

v = np.ones((edgeData.shape[0], 1)).flatten()  # 生成一堆1,准备存放

#A = sparse.csc_matrix((v, (p1, p2)), shape=(n, n))
temp = np.zeros((n, n)).astype(int)
temp[p1,p2] = 1
A = temp
A = (A + np.transpose(A)) / 2  # 连接的地方都是0.5
webName,orientation = getwebname('../data/nodes.txt')
A, zeroIndex,webName,orientation = filterData(A,webName,orientation)
n=A.shape[0]
temp = np.sum(A, axis=1)
D = np.diag(1 / np.sqrt(np.sum(A, axis=1)))  # 不用这个.A1,就生成不了矩阵
Пример #24
0
    N = n[:] + 1j * k[:]
    eps = N[:] * N[:]

    matEps = [lamb, eps.real, eps.imag]
    return matEps


import matplotlib.pyplot as plt


# Plotting Ag
# hag=getData("database/main/Ag/Hagemann.yml");
range = (0.280, 0.790)
lambdasAg = np.linspace(range[0], range[1], 100)
ag = getData("database/main/Ag/Johnson.yml", lambdasAg)
# print ag
lambdasTiO2 = np.linspace(0.280, 0.790)
# tio2=getData("database/main/TiO2/Devore-o.yml",lambdasTiO2);
tio2 = getData("database/main/SiO2/Malitson.yml", lambdasTiO2)


fig = plt.figure()
plt.plot(lambdasTiO2 * 1e3, [x.real for x in tio2], "g-", label=r"Real($n_{SiO_{2}}$)")


plt.legend()
plt.title(r"Współczynnik załamania $SiO_2$ ($n$)")
plt.ylabel("")
plt.xlabel("wavelength [nm]")
plt.xlim([280, 750])
Пример #25
0
#!/usr/bin/python
# -*- coding: utf-8 -*-

from __future__ import unicode_literals
import numpy as np
import pylab as plt
from getData import *


import matplotlib
matplotlib.rcParams.update({'font.size': 30})

range=(1,17)
lambdas=np.linspace(range[0],range[1],100)
ag=getData("database/main/GaAs/Skauli.yml",lambdas);

fig=plt.figure()
plt.plot(lambdas,[ (x*x).real for x in ag ],'r-',label=r'Re($\varepsilon_{GaAs}$)',lw=4)

#plt.plot(lambdas,[ (x*x).imag for x in ag] ,'b-',label=r'Imag($\varepsilon_{Ag}$)')
#plt.plot(lambdasTiO2*1e3,[ (x*x).real for x in tio2],'g-',label=r'Real($\varepsilon_{TiO_{2}}$)')

c=299792458
lambdas=lambdas*10e-6
freq=c/lambdas
freq=freq/10e12

#plt.plot(freq,[ (x*x).real for x in ag ],'r-',label=r'Real($\varepsilon_{Ag}$)')

plt.legend()
#plt.title(r"Współczynnik przenikalnośći elektrycznej ($\varepsilon$)");
Пример #26
0
				print "\t\t "+sp.join(mailto[3*n+2])+"\n\n\n"
				found=1; break
		if found==0:
			print "Mail to:"
			print "\t\tUnable to locate address for "+cLoc+"\n\n\n" 


	print "-------------------------------------------------------------------"
	print "					'exit' or 'quit' to escape"
	






#%%%%
#MAIN
#%%%%%

lu=""

mailto = getData("./MOD/prison_add.dict",1)

while lu != "exit" and lu != "Exit" and lu != "quit" and lu != "Quit" and lu !="q" and lu != "Q":
	i_no=raw_input("\n\n\n\n\nPrisoner ID:")
	run_query(i_no)
	lu=raw_input()
#print "\n\n\n\n\nPrisoner ID:"

Пример #27
0
#!/usr/bin/python
# -*- coding: utf-8 -*-

from __future__ import unicode_literals
import numpy as np
import pylab as plt
from getData import *

import matplotlib
matplotlib.rcParams.update({'font.size': 30})

range=(1,280)
lambdas=np.linspace(range[0],range[1],100)
ag=getData("database/main/Au/Ordal.yml",lambdas);

fig=plt.figure()
plt.plot(lambdas,[ -(x*x).real for x in ag ],'r-',label=r'-Re($\varepsilon_{Au}$)',lw=4)

plt.plot(lambdas,[ (x*x).imag for x in ag] ,'b-',label=r'Im($\varepsilon_{Au}$)',lw=4)
#plt.plot(lambdasTiO2*1e3,[ (x*x).real for x in tio2],'g-',label=r'Real($\varepsilon_{TiO_{2}}$)')

c=299792458
lambdas=lambdas*10e-6
freq=c/lambdas
freq=freq/10e12

#plt.plot(freq,[ (x*x).real for x in ag ],'r-',label=r'Real($\varepsilon_{Ag}$)')

plt.legend(loc=4)
#plt.title(r"Współczynnik przenikalnośći elektrycznej ($\varepsilon$)");
plt.xlabel('wavelength [um]')
Пример #28
0
def run():
    print('available countries are: ' + displayCountries(getCountries()) )
    country = (input('Select a country: '))
    status = input('Enter Status number (1 : (confirmed) , 2 : (recovered) , 3 : (deaths): ')

    possibleProvinceCity = (searchProvince(getData(country)))# searches for possible provinces and/or cities tied to the country

    possibleProvinces = ', '.join(list(possibleProvinceCity.keys()))

    if possibleProvinces:# makes additional inputs for province and/or cities if needed
        print('Available provinces are: ' + possibleProvinces)
        province = (input('Enter a province: ').title())
        possibleCities = ', '.join(list(possibleProvinceCity[province]))
        if possibleCities:
            print('Available cities are: ' + possibleCities)
            city = input('Enter a City')
        else:
            city = ''
    elif possibleProvinceCity['']:
        possibleCities = ', '.join(list(possibleProvinceCity[''].values()))
        print('Available cities are: ' + possibleCities)
        city = input('Enter a City') 
    else:
        province = ''
        city = ''

    if province:# confirms input
        if city:
            print('Processing input of : Country: ' +  country  + ', Province: ' + province + ', City: ' + city)
        elif not city:
            print('Processing input of : Country: ' +  country  + ', Province: ' + province )
    else:
        print('Processing input of: ' +country)


    #sets the data
    data = machineLearning(parseData(getData(country,status),province, city))
    if data['daysPredicted']and data['type'] == 'confirmed':
        if data['i']  != LIMIT:
            print('expected resolution on day '+ str(data['daysPredicted'][-1]))

    x1 = (data['days'])
    x2 = data['daysPredicted']
    y1 = (data['totalCases'])
    y2 = (data['casesPerDay'])
    y11 = (data['perDayPredicted'])
    y22 = (data['totalPredicted'])

    # plot the data
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    if data['i'] != LIMIT:
        ax.plot(x2, y22,'--', color='tab:blue', label = 'predicted total cases')
        ax.plot(x2, y11,'--', color='tab:orange', label = 'predicted cases per day')
    plt.scatter(x1, y1,s= 1, color='tab:blue', label = 'total cases')
    plt.scatter(x1, y2,s = 1, color='tab:orange', label = 'cases per day')


    ax.set_xlabel('number of days')
    ax.set_ylabel(('cases of status : ' + data['type']))
    ax.legend()
    ax.set_title('# of cases of status: ' + data['type'] + ', with respect to time, starting from ' + data['day1'] +' in '+ data['province'] + data['city'] + ', ' + data['country'])

    # display the plot
    return plt.show()
Пример #29
0
            b = np.array([np.sum(weights * y), np.sum(weights * y * x)])
            A = np.array([[np.sum(weights), np.sum(weights * x)],
                          [np.sum(weights * x), np.sum(weights * x * x)]])
            beta = linalg.solve(A, b)
            yest[i] = beta[0] + beta[1] * x[i]

        residuals = y - yest
        s = np.median(np.abs(residuals))
        delta = np.clip(residuals / (6.0 * s), -1, 1)
        delta = (1 - delta ** 2) ** 2

    return yest

if __name__ == '__main__':
    import math
    n = 200
    [x, y] = getData("http://www.cs.iit.edu/~agam/cs584/data/regression/svar-set4.dat")
    x = x.T[0]
    y = y.T[0]
    #x = np.linspace(0, 2 * math.pi, n)
    #y = np.sin(x) + 0.3 * np.random.randn(n)

    f = 0.25
    yest = lowess(x, y, f=f, iter=3)

    import pylab as pl
    pl.clf()
    pl.plot(x, y, '.',label='y noisy')
    pl.plot(x, yest, '.',label='y pred')
    pl.legend()
    pl.show()
Пример #30
0
comRange = np.empty(shape=(2, 1))
comRange[0] = matRange1[0] if matRange1[0] > matRange2[0] else matRange2[0]
comRange[1] = matRange1[1] if matRange1[1] < matRange2[1] else matRange2[1]

sys.stderr.write("comRange=" + str(comRange))

#Sampling definition:
lambdas = np.linspace(comRange[0], comRange[1], 100)

#alternativelly hard specified

lambdas = np.linspace(7, 10, 100)

#Change refractive index to epsilon
matData1 = getData(file1, lambdas)
for i in range(0, len(matData1)):
    matData1[i] = matData1[i] * matData1[i]

matData2 = getData(file2, lambdas)
for i in range(0, len(matData2)):
    matData2[i] = matData2[i] * matData2[i]

R1 = np.empty(shape=(0, len(lambdas)))
I1 = np.empty(shape=(0, len(lambdas)))

R2 = np.empty(shape=(0, len(lambdas)))
I2 = np.empty(shape=(0, len(lambdas)))

for f in np.linspace(0, 1, 100):
    (efw1, efw2) = effEpsilon(matData1, matData2, f)
Пример #31
0
def searchRef(yamlFile,bookName,nmin,nmax):

	#List for result
	result=[]

	#Function used only internally to display material parameters values	
	#ATTENTION!!! comment is not only a comment is like command for this function (yes, it's dirty ;))
#	def printParams(paramsArray,paramList,comment):
#		for param in paramList:
#			sys.stdout.write(str(paramsArray[-1][param])+" ");
#		sys.stdout.write(comment+"\n");
#		if comment.find("start_range")!=-1:
	#		printParams.lastStart=paramsArray[-1]["l"]
	#	elif comment.find("end_range")!=-1:
	#		result.append((printParams.lastStart,paramsArray[-1]["l"]))
	#printParams.lastStart=0;

	#Just some boolen, if the firs element is OK it may not be shown
	#it's defined only for first material for the rest it's undefined at the begining
	print_in_my_range=False;


	#This parameter decides additional output about parameters inside range have been met
	my_range=False;
	
	yamlStream=open(yamlFile,'r')
	allData=yaml.load(yamlStream);

	materialData=allData["DATA"][0]

	#dataRange - where we can check the data
	dataRange=getRange(yamlFile)
	if dataRange[0]==0:
		return []

	#wavelengths to check for this material:
	#HERE WE DEFINE THE SAMPLING
	lambdas=np.linspace(dataRange[0],dataRange[1],100)

	#refractive indx
	nList=np.empty((1,0))
#	try:
	nList=getData(yamlFile,lambdas[:])
	if len(nList) == 1:
		return []
	eps=np.zeros(len(nList),dtype="complex")

	for i in range(0,len(nList)):
		eps[i]=nList[i]*nList[i]
#	except UnsupportedDataType as e:
#		sys.stderr.write(e)

	if len(nList)<100:
		return result
	inRange=False
	rangeStart=0
	
	for i in range(0,len(lambdas)):
		if eps[i].real>nmin and eps[i].real<nmax:
			if inRange==False:
				inRange=True
				rangeStart=lambdas[i]
		
		elif inRange:
			result.append((rangeStart,lambdas[i]))
			inRange=False
			

	return result
Пример #32
0
			,x.strip().split(','))
		,data)

	return data
		
def writeFile(location,data):
	f = open(location+".txt", "w")
	f.write(str(data)[1:-1])
	f.close()


fullData=[]
temp=[]
for stimulus in stimuliNames:
	
	temp=getData(outputDirectory + "exportingN/" + stimulus)
	tempPositive = map(lambda x:
			map(lambda y: 0 if y==-1 else y,x),
		temp)
	tempNegative = map(lambda x:
			map(lambda y: 0 if y==1 else y,x),
		temp)


	newDataPositive=map(lambda x: sum(x) / len(x), (np.transpose(tempPositive)).tolist())
	newDataNegative=map(lambda x: -1* sum(x) / len(x), (np.transpose(tempNegative)).tolist())

	writeFile(outputDirectory + "histogramData/" + stimulus + "Positive", newDataPositive)
	writeFile(outputDirectory + "histogramData/" + stimulus + "Negative", newDataNegative)

Пример #33
0
        preDataFrameDict["instrumentalness"].append(
            playlistSongFeatures[i]["instrumentalness"])
        preDataFrameDict["liveness"].append(
            playlistSongFeatures[i]["liveness"])
        preDataFrameDict["loudness"].append(
            playlistSongFeatures[i]["loudness"])
        preDataFrameDict["speechiness"].append(
            playlistSongFeatures[i]["speechiness"])
        preDataFrameDict["tempo"].append(playlistSongFeatures[i]["tempo"])
        preDataFrameDict["valence"].append(playlistSongFeatures[i]["valence"])
    df = pd.DataFrame(preDataFrameDict)
    return df.set_index("Song")


#getting data to play with
playlistIDs, playlistSongs, playlistSongFeatures = getData(userID, accessToken)

#example of basic visual
#DeathflowFrame = playlistToDataFrame("Deathflow", playlistSongs["Deathflow"], playlistSongFeatures["Deathflow"])
#DeathflowFrame[["acousticness","danceability","energy"]].plot.area()
#plt.show()

SavedTracksFrame = playlistToDataFrame("savedTracks",
                                       playlistSongs["savedTracks"],
                                       playlistSongFeatures["savedTracks"])
SavedTracksFrame[["acousticness", "danceability", "energy"]].plot.area()
plt.show()

# using KNN to determine which playlist a song belongs in
# to do this i will average the song features of each playlist dataframe and construct
# a new dataframe where the index is the playlist and the features are the averaged features of the
Пример #34
0
from getData import *
import pybrain as pb
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer

trainData = getData('data_2.csv');
testData = getData('data_1_2.csv');

#training
fnn = buildNetwork(24, 30, 8, 1, bias=True);
ds = SupervisedDataSet(24, 1);
for i in trainData:
	ds.addSample([i[j] for j in Name[1:]], i[Name[0]]);
trainer = BackpropTrainer(fnn, ds);
print "Begin....";
trainer.trainEpochs(epochs=100);
print "Ing...";
out = SupervisedDataSet(24, 1);
for i in testData:
	out.addSample([i[j] for j in Name[1:]], [0]);

res = fnn.activateOnDataset(out);

for i, o in out:
	print i, "->", o;
print res  
Пример #35
0
comRange = np.empty(shape=(2, 1))
comRange[0] = matRange1[0] if matRange1[0] > matRange2[0] else matRange2[0]
comRange[1] = matRange1[1] if matRange1[1] < matRange2[1] else matRange2[1]

sys.stderr.write("comRange=" + str(comRange))

# Sampling definition:
lambdas = np.linspace(comRange[0], comRange[1], 100)

# alternativelly hard specified

lambdas = np.linspace(7, 10, 100)


# Change refractive index to epsilon
matData1 = getData(file1, lambdas)
for i in range(0, len(matData1)):
    matData1[i] = matData1[i] * matData1[i]

matData2 = getData(file2, lambdas)
for i in range(0, len(matData2)):
    matData2[i] = matData2[i] * matData2[i]

R1 = np.empty(shape=(0, len(lambdas)))
I1 = np.empty(shape=(0, len(lambdas)))

R2 = np.empty(shape=(0, len(lambdas)))
I2 = np.empty(shape=(0, len(lambdas)))

for f in np.linspace(0, 1, 100):
    (efw1, efw2) = effEpsilon(matData1, matData2, f)
Пример #36
0
def main(argv):
    p = "data_main/"
    mode = None
    if len(argv) >= 2:
        mode = argv[1]

    # Parameters
    is_regr = False  # False for classification, True for regression
    smell_thr = 40  # threshold to define a smell event
    start_dt = datetime(2016, 10, 31, 0, tzinfo=pytz.timezone("US/Eastern"))
    end_dt = datetime(2019, 8, 5, 0, tzinfo=pytz.timezone("US/Eastern"))

    # Set mode
    get_data, preprocess_data, analyze_data, compute_features, cross_validation = False, False, False, False, False
    if mode == "pipeline":
        get_data = True
        preprocess_data = True
        compute_features = True
        cross_validation = True
    elif mode == "data":
        get_data = True
    elif mode == "preprocess":
        preprocess_data = True
    elif mode == "feature":
        compute_features = True
    elif mode == "validation":
        cross_validation = True
    elif mode == "analyze":
        analyze_data = True
    else:
        get_data = True
        preprocess_data = True
        compute_features = True
        cross_validation = True

    # Get data
    # OUTPUT: raw esdr and raw smell data
    if get_data:
        getData(out_p=[p + "esdr_raw/", p + "smell_raw.csv"],
                start_dt=start_dt,
                end_dt=end_dt)

    # Preprocess data
    # INPUT: raw esdr and raw smell data
    # OUTPUT: preprocessed esdr and smell data
    if preprocess_data:
        preprocessData(in_p=[p + "esdr_raw/", p + "smell_raw.csv"],
                       out_p=[p + "esdr.csv", p + "smell.csv"])

    # Analyze data
    if analyze_data:
        analyzeData(in_p=[p + "esdr.csv", p + "smell.csv"],
                    out_p_root=p,
                    start_dt=start_dt,
                    end_dt=end_dt)

    # Compute features
    # INPUT: preprocessed esdr and smell data
    # OUTPUT: features and labels
    if compute_features:
        computeFeatures(in_p=[p + "esdr.csv", p + "smell.csv"],
                        out_p=[p + "X.csv", p + "Y.csv", p + "C.csv"],
                        is_regr=is_regr,
                        f_hr=8,
                        b_hr=3,
                        thr=smell_thr,
                        add_inter=False,
                        add_roll=False,
                        add_diff=False)

    # Cross validation
    # INPUT: features
    # OUTPUT: plots or metrics
    if cross_validation:
        #methods = ["ET", "RF", "SVM", "RLR", "LR", "LA", "EN", "MLP", "KN", "DMLP"] # regression
        #methods = ["ET", "RF", "SVM", "LG", "MLP", "KN", "DMLP", "HCR", "CR", "DT"] # classification
        methods = ["RF", "ET"]  # default for random forest and extra trees
        #methods = genModelSet(is_regr)
        p_log = p + "log/"
        if is_regr: p_log += "regression/"
        else: p_log += "classification/"
        checkAndCreateDir(p_log)
        num_folds = (end_dt - start_dt).days / 7  # one fold represents a week
        for m in methods:
            start_time_str = datetime.now().strftime("%Y-%d-%m-%H%M%S")
            lg = generateLogger(p_log + m + "-" + start_time_str + ".log",
                                format=None)
            crossValidation(in_p=[p + "X.csv", p + "Y.csv", p + "C.csv"],
                            out_p_root=p,
                            event_thr=smell_thr,
                            method=m,
                            is_regr=is_regr,
                            logger=lg,
                            num_folds=num_folds,
                            skip_folds=48,
                            train_size=8000)
Пример #37
0
from getData import *
from math import *
import matplotlib.pyplot as plt

def d2p(x):
	if x < 90:
		x = x;
	elif x < 180:
		x = 180 - x;
	elif x < 270:
		x = x - 180;
	else:
		x = 360 - x;
	return x/180*pi

data = getData('data_2.csv');
l = len(data);
x = range(l);
y1 = [i['power'] for i in data];
y2 = [ (i['WS30']*cos(d2p(i['DIRECTION30']))+i['WS31']*cos(d2p(i['DIRECTION31']))+ \
	 i['WS32']*cos(d2p(i['DIRECTION32']))+i['WS10']*cos(d2p(i['DIR10']))+ \
	 i['WS10S']*cos(d2p(i['DIR10S'])))**3 for i in data];
print y1;
print y2;

plt.figure(figsize=(16,8));
plt.plot(y2, y1, 'o');
#plt.plot(x, y1, 'r', linewidth=2);
#plt.plot(x, y2, 'b', linewidth=2);
plt.legend()
Пример #38
0
        for n in range(len(mailto) / 3):
            if mailto[3 * n][0] == parts[1]:
                print "Mail to:"
                print "\t\t" + pName.upper() + " - " + i_no
                print "\t\t" + cLoc
                print "\t\t " + sp.join(mailto[3 * n + 1])
                print "\t\t " + sp.join(mailto[3 * n + 2]) + "\n\n\n"
                found = 1
                break
        if found == 0:
            print "Mail to:"
            print "\t\tUnable to locate address for " + cLoc + "\n\n\n"

    print "-------------------------------------------------------------------"
    print "					'exit' or 'quit' to escape"


#%%%%
#MAIN
#%%%%%

lu = ""

mailto = getData("./MOD/prison_add.dict", 1)

while lu != "exit" and lu != "Exit" and lu != "quit" and lu != "Quit" and lu != "q" and lu != "Q":
    i_no = raw_input("\n\n\n\n\nPrisoner ID:")
    run_query(i_no)
    lu = raw_input()
#print "\n\n\n\n\nPrisoner ID:"
#!/usr/bin/python
# -*- coding: utf-8 -*-

from __future__ import unicode_literals
import numpy as np
import pylab as plt
from getData import *

import matplotlib
matplotlib.rcParams.update({'font.size': 30,'font.weight':'bold'})

range=(7,10)
lambdas=np.linspace(range[0],range[1],100)
ag=getData("database/main/NaCl/Li.yml",lambdas);

fig=plt.figure()
plt.plot(lambdas,[ (x*x).real for x in ag ],'r-',label=r'Re',lw=4)

plt.plot(lambdas,[ (x*x).imag for x in ag] ,'b--',label=r'Im',lw=4)
#plt.plot(lambdasTiO2*1e3,[ (x*x).real for x in tio2],'g-',label=r'Real($\varepsilon_{TiO_{2}}$)')

c=299792458
lambdas=lambdas*10e-6
freq=c/lambdas
freq=freq/10e12

#plt.plot(freq,[ (x*x).real for x in ag ],'r-',label=r'Real($\varepsilon_{Ag}$)')

plt.legend(loc=1,borderpad=0,frameon=False)
#plt.title(r"Współczynnik przenikalnośći elektrycznej ($\varepsilon$)");
plt.xlabel(r'$\lambda$ [$\mu m$]')