def loadFile(self):
     print(self.path)
     self.dataList = loader.load(self.path)
     print(self.dataList[0])
     self.calculateE()
     self.min = self.dataList[0][0]
     self.max = self.dataList[0][-1]
     self.savePlot()
     self.print_graph()
Exemple #2
0
 def get(self):
     offset = self.request.get('offset')
     if offset == '':
         offset = 0
     offset = int(float(offset))
     (meals, entrees) = loadData.load(offset)
     #(meals, entrees) = models.getMealsAndEntrees()
     #menus = models.getHomeMenus()
     #self.response.out.write(menus)
     
     for m in meals:
         self.response.out.write(m.html_string())
     for e in entrees:
         self.response.out.write(e.html_string())
Exemple #3
0
def showDWT():
    path = './bci2003'
    subject = 'trainData_DWT'.split(' ')
    loadData = load(subNames=subject, path=path)
    (x_train, y_train) = loadData.loadTrainDataFromTxt()

    d_avg = dict()
    for tp in range(2):
        tmp_index = indices(y_train, tp)
        #print(len(tmp_index)) #show class quantity
        tempList = x_train[tmp_index]
        fixList = np.sum(tempList, axis=0)
        resultList = np.divide(fixList, tempList.shape[0])
        d_avg[tp] = resultList
    plt.subplot(211)
    plt.plot(d_avg[0])
    plt.legend(['c3', 'cZ', 'c4'], loc='upper left')
    plt.subplot(212)
    plt.plot(d_avg[1])
    plt.legend(['c3', 'cZ', 'c4'], loc='upper left')
    plt.show()
    print("end")
Exemple #4
0
def showAveragepower():
    path = './bci2003/'
    subject = 'trainData'.split(' ')
    loadData = load(subNames=subject, path=path, isRand=True)
    (x_train, y_train) = loadData.loadTrainDataFromTxt()

    d_avg = dict()
    for tp in range(2):
        tmp_index = indices(y_train, tp)
        #print(len(tmp_index)) #show class quantity
        tempList = x_train[tmp_index]
        fixList = np.sum(tempList, axis=0)
        resultList = np.divide(fixList, tempList.shape[0])
        d_avg[tp] = resultList
    LH = np.array(d_avg[0])
    RH = np.array(d_avg[1])
    time = np.arange(LH[:, 0].size) / 128
    plt.subplot(211)
    plt.plot(time, LH)
    plt.legend(['c3', 'cZ', 'c4'], loc='upper left')
    plt.subplot(212)
    plt.plot(time, RH)
    plt.legend(['c3', 'cZ', 'c4'], loc='upper left')
    plt.show()
Exemple #5
0
import numpy as np
import loadData as ld
import writeToCSV as wc

if __name__ == '__main__':
    # path = './result/mistake_rate_0.0.csv'
    # data = ld.load(path)
    # matrix = np.asmatrix(data, dtype=float)
    # print(matrix)
    result_path = './result/mistake_rate_'
    mistake_rate = ['0.0', '0.05', '0.1', '0.2', '0.3', '0.5']
    for mr in mistake_rate:
        maxtrix = np.zeros([50, 5], dtype=float)
        max_iter = 100
        file = result_path + mr + '.csv'
        for i in range(0, max_iter):
            path = result_path + mr + '_' + str(i) + '.csv'
            data = ld.load(path)
            maxtrix += np.asmatrix(data, dtype=float)
        maxtrix /= max_iter
        wc.writecsvByName(maxtrix, file)
Exemple #6
0
Created on Mon Nov 28 20:56:36 2016

@author: xierhacker
"""

from __future__ import print_function, division
import numpy as np
import tensorflow as tf
from scipy.io import loadmat
from scipy.io import savemat
import matplotlib.pyplot as plt
import loadData
#import dp

#load data
train_samples, train_labels = loadData.load("../data/train_32x32")
test_samples, test_labels = loadData.load("../data/test_32x32")
'''
#print the infomation of the data
#you can remove the comment to run this code
print("the shape of train_set:",train_samples.shape)
print("the shape of train_labels:",train_labels.shape[0])
print("type of samples element:",train_samples.dtype)
print("type of labels element:",train_labels.dtype)
print("1-20:\n",train_labels[1:20])
'''

#data transformat
train_samples, train_labels = loadData.transformat(train_samples, train_labels)
test_samples, test_labels = loadData.transformat(test_samples, test_labels)
'''
Exemple #7
0
from keras.layers.recurrent import LSTM
from keras.layers.wrappers import Bidirectional
from keras.optimizers import RMSprop
from keras.callbacks import Callback, ModelCheckpoint
from keras.models import load_model
from keras.utils import plot_model

np.random.seed(1337)  # for reproducibility
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

# load train dataset
OUTPUT_SIZE = 2
path = './bci2003'
subject = 'trainData_DWT'.split(' ')
getDataClass = load(subNames=subject, path=path, notEqual='n',isRand=True)
(x_train, y_train) = getDataClass.loadTrainDataFromTxt()
y_train = np_utils.to_categorical(y_train, num_classes=OUTPUT_SIZE)
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
# load test dataset
path = './bci2003'
subject = 'testData_DWT'.split(' ')
getDataClass = load(subNames=subject, path=path, notEqual='n',isRand=True)
(x_test, y_test) = getDataClass.loadTrainDataFromTxt()
print('x_test shape:', x_train.shape)
print('y_test shape:', y_train.shape)

BATCH_SIZE = 16
EPOCHS = 200
TIME_STEPS = x_train.shape[1]
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from loadData import load
from sklearn.linear_model import LogisticRegression as LR


def logisticRegressionTrain(trainData, trainLabels, solver="liblinear"):
  model = LR(solver=solver)
  model.fit(trainData, np.array(trainLabels).ravel())
  return model

def logisticRegressionEvaluate(model, data, labels):
  predictions = model.predict(data)
  errors = (predictions != np.array(labels).ravel())
  return 100 * np.mean(errors)


if __name__ == '__main__':
  trainData, trainLabels, testData, testLabels = load()
  model = logisticRegressionTrain(trainData, trainLabels)

  trainError = logisticRegressionEvaluate(model, trainData, trainLabels)
  print "Training Error: " + trainError

  testError = logisticRegressionEvaluate(model, testData, testLabels)
  print "Test Error: " + testError
# -*- coding:utf-8 -*-

#对分类问题进行学习拟合

import math
import random
import numpy as np
import scipy.optimize as op
import matplotlib.pyplot as plt
import time
from loadData import load

#--导入训练数据
data = load(r"F:\SomeMLProjects\ex2data1.txt")
#data前两列为x 最后一列为y 注意要将y化为矩阵形式
x = np.c_[data[:, 0], data[:, 1]]
y = np.array([data[:, 2]]).T

# 以当前时间戳的int为随机数种子
np.random.seed(int(time.time()))

# 对于只有一行的矩阵 np.random.randn输出的也会是个数组中的数组
# theta个数与单个输入x的维度一样 此处1表示列长度
m = np.size(x, 1)
theta = np.random.randn(m, 1)
#print(theta)

testX = []
testY = []
J_history = []
i_history = []
Exemple #10
0
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM, BatchNormalization, Conv1D, MaxPooling1D
import matplotlib.pyplot as plt
import loadData
import preprocess
from sklearn import preprocessing
import pandas as pd

df = loadData.load()

# features=len(df.columns)
training_size = 0.8

spilt_point = int(training_size * len(df))

#splitting data for training and testing in ratio 8:2
train_df = df[:spilt_point]
test_df = df[spilt_point:]

print(f"train_df {train_df[:10]}")
print(f"test_df {test_df[:10]}")

train_x, train_y = preprocess.process_data(train_df)

test_x, test_y = preprocess.process_data(test_df)

# print(f"train_x.shape[1:]{train_x.shape[1:]}")

NAME = "NIFTY50PRED"
Exemple #11
0
    args = parser.parse_args()

    # setup correct name
    args.save_name = args.sampling + "_" + str(args.no_lines) + "lines_" + str(
        args.seed) + "seed"

    # set up the gpu
    args.device = cudaDeviceChecker.device(args)

    # put a limit on cpu resources used by pytorch
    torch.set_num_threads(8)
    torch.random.manual_seed(args.seed)

    # load data
    dataloader_train, dataloader_val, dataloader_test = loadData.load(args)

    # %% create network depending on the type of sampling performed
    if args.sampling in ["random_uniform", "low_pass", "VDS", "GMS"]:
        Network = myModel.fixedMask(args)
    if args.sampling == "LOUPE":
        Network = myModel.LOUPEMask(args)
    if args.sampling == "DPS":
        Network = myModel.DPSMask(args)
    if args.sampling == "ADPS":
        Network = myModel.ADPSMask(args)

    Network = Network.to(args.device)

    # discriminator network
    Discriminator = discriminatorModel.Discriminator(args)
Exemple #12
0
def cluster():
    #   INITIALIZE PARAMETERS
    y, dates = loadData.load()

    sampling_rate = 1
    start = 400
    end = 1500
    eps = 26
    min_samples = 25

    threshold = 0.9
    branching_factor = 5

    #   CUSTOMIZE TRAIN DATA
    array = []
    for i in range(151):
        # x = y[i][0]
        x = y[0][i]
        # x = x[2000:4000]
        x = x[start:end:sampling_rate]
        array.append(x)

    X_train = np.array(array)
    X_train = preprocessing.scale(X_train, axis=1)

    array = []
    for x in X_train:
        x = x.flatten()

        fs = float(end - start)
        fc = float(20)
        w = float(fc / (fs / 2))

        b, a = signal.butter(5, w, 'low')
        xx1 = signal.filtfilt(b, a, x)

        array.append(xx1)

    X_train = np.array(array)

    print(np.linalg.norm(X_train[15] - X_train[14]))

    # nbrs = NearestNeighbors(n_neighbors=len(X_train)).fit(X_train)
    # distances, indices = nbrs.kneighbors(X_train)
    # # print(distances.flatten().shape)
    # # print(distances.shape)
    # sortedDiss = distances.flatten()
    # sortedDiss = np.sort(sortedDiss)
    # index = np.arange(0,22801)
    # plt.plot(sortedDiss, index)
    # plt.ylabel('indices')
    # plt.xlabel('distance')
    # plt.show()

    # clustering = OPTICS(min_samples=5).fit(X_train)
    clustering = DBSCAN(eps=eps, min_samples=min_samples).fit(X_train)
    # clustering = KMeans(n_clusters=3, random_state=0).fit(X_train)
    # clustering = AgglomerativeClustering(n_clusters=3).fit(X_train)
    # clustering = Birch(branching_factor=branching_factor, n_clusters=3, threshold=threshold, compute_labels=True).fit(X_train)

    for n, i in enumerate(clustering.labels_):
        if i == -1:
            clustering.labels_[n] = 2
    y_pred = clustering.labels_

    sink_indices = [u'040928', u'050606', u'061211']
    source_indices = [u'071004']
    for i in range(151):
        if dates[i][0] in sink_indices:
            y_pred[i] = 1
        if dates[i][0] in source_indices:
            y_pred[i] = 0

    print(type(y_pred))

    append_title = "DBSCAN Algorithm with minSamples={}, eps={}".format(
        min_samples, eps)

    # fcluster2 = filter_data.filter(X_train, 50, 6)

    plots.plotCluster(0,
                      y_pred,
                      X_train,
                      dates,
                      sampling_rate,
                      end,
                      start,
                      append_title,
                      analyse=True)
    # plots.plotCluster(1, y_pred, X_train, dates, sampling_rate, end, start, append_title, analyse=True)
    # plots.plotCluster(2, y_pred, X_train, dates, sampling_rate, end, start, append_title)
    # plots.plotCluster(2, y_pred, fcluster2, dates, sampling_rate, end, start, append_title)

    return y_pred, X_train
    dataList[0] = tempRange
    dataList[1] = dCInterp
    return dataList


def drawPlot(x, y):
    plot.xlabel("Temp")
    plot.ylabel("Entalphy")
    plot.plot(x, y)
    plot.show()


if (__name__ == "__main__"):
    filename = "dane.txt"
    dataFolderPath = "resources/"
    path = dataFolderPath + filename

    dataList = loader.load(path)

    dataList = interpolate(dataList)

    resultList = calculateE(dataList[0], dataList[1])

    p = Process(target=drawPlot, args=(dataList[0], resultList))
    p.start()
    for x in range(len(dataList[0])):
        print("Temp: {}\tdC: {}\t Result: {}".format(dataList[0][x],
                                                     dataList[1][x],
                                                     resultList[x]))
    p.join()
Exemple #14
0
import pandas as pd
import numpy as np
import os

import loadData as ld
dir_path = ld.dir_path

train = ld.load('train.csv')

userFeatures = ld.load('user.csv')

data = pd.merge(train.head, userFeatures, on=['userID'], how='left')

print data.head(10)																											

#data.to_csv(r'data.csv', encoding='gbk')
Exemple #15
0
import loadData as load
import pandas as pd
from sklearn import linear_model as lin
import numpy as np

traincsv = load.load('train100.csv')

reg = linear_model.LinearRegression()


from keras.models import load_model
from keras.utils import np_utils
from loadData import load

X_train, y_train, X_test, y_test = load()

model = load_model("my_model.h5")

score = model.predict(X_test)

c = 0
for i in range(10000):
	pos = 0
	maxEle = -1.00
	for j in range(len(score[i])):
		if(score[i][j] > maxEle):
			maxEle = score[i][j]
			pos = j
	if(pos == y_test[i]):
		c += 1
print((c / 10000) * 100)
Exemple #17
0
    # setup correct name
    args.save_name = args.sampling + "_" + str(args.no_lines) + "lines_" + str(
        args.seed) + "seed"

    if args.Pineda == True:
        args.save_name += "_Pineda"

    # set up the gpu
    args.device = cudaDeviceChecker.device(args)

    # put a limit on cpu resources used by pytorch
    torch.set_num_threads(8)
    torch.random.manual_seed(args.seed)

    # load data
    _, _, dataloader_test = loadData.load(args, only_test=True)

    # create network depending on the type of sampling performed
    if args.sampling in ["random_uniform", "low_pass", "VDS", "GMS"]:
        Network = myModel.fixedMask(args)
    if args.sampling == "LOUPE":
        Network = myModel.LOUPEMask(args)
    if args.sampling == "DPS":
        Network = myModel.DPSMask(args)
    if args.sampling == "ADPS" and args.Pineda == False:
        Network = myModel.ADPSMask(args)
    if args.sampling == "ADPS" and args.Pineda == True:
        Network = myModel.ADPSMask_legacy(args)

    Network = Network.to(args.device)
    mode =  'rgb'
else:
    mode =  'opt'
model = Inception_Inflated3d(input_shape=input_shape, dropout_prob=0.5, data_format=args.data_format,mode=mode, nclass=args.nclass )
model.summary()
print("Number of layer in model : ", len(model.layers))
model.compile(loss=categorical_crossentropy, optimizer=Adam(), metrics=['accuracy']) 
#-------------------------------------------LOAD DATA TO TRAINNING-----------------------------------------

print("[INFOR] : Starting loading data form disk...........")

# color: True RGB
# color: False OPT Flow RGB

from loadData import load
(X_train, y_train, X_test, y_test) = load(mode = 'opt')

print("[INFOR] : Loading Done !")

from sklearn.preprocessing import LabelBinarizer
label_binary = LabelBinarizer()
y_train = label_binary.fit_transform(y_train)
print(label_binary.classes_)

y_train = np.asanyarray(y_train)

y_test = label_binary.transform(y_test)
y_test = np.asanyarray(y_test)


# #------------------------------------------TRAINNING-------------------------------------------------------
    args.no_pixels = int(28**2 * args.percentage / 100)

    # setup correct name
    args.save_name = args.sampling + "_" + str(
        args.percentage) + "percent_" + str(args.seed) + "seed"

    # set up the gpu
    args.device = cudaDeviceChecker.device(args)

    # put a limit on cpu resources used by pytorch
    torch.set_num_threads(8)
    torch.random.manual_seed(args.seed)

    # load data
    train_loader, val_loader, test_loader = loadData.load(args)

    # %% create network depending on the type of sampling performed
    Network = myModel.Network(args)
    Network = Network.to(args.device)

    # optimizers
    if args.sampling == 'DPS':
        optimizer = optim.Adam([
            {
                'params': Network.f1.parameters(),
                'lr': args.learning_rate
            },
            {
                'params': Network.f2.parameters(),
                'lr': args.learning_rate
Exemple #20
0
import matplotlib.pyplot as plt
from scipy.io import loadmat, savemat
import numpy as np
from scipy import signal
import pywt
import umap
from sklearn.cluster import DBSCAN, KMeans
import copy
from tslearn.preprocessing import TimeSeriesScalerMeanVariance, TimeSeriesResampler

import sys
sys.path.insert(1, '../')
import loadData

y, dates = loadData.load()

eps = 24.5
min_samples = 27
sampling_rate = 1.5
start = 400
end = 1500

array = []
for i in range(151):
    x = y[i][0]
    x = x[2000:4000]
    x = x[start:end:1]
    array.append(x)
X_train = np.array(array)

X_train = TimeSeriesScalerMeanVariance().fit_transform(X_train[:151])