Beispiel #1
0
def Main(filename,
         features_file,
         Algorithms,
         number_of_features,
         filter_target,
         list_features=None):
    print("\n---------------------------------\n")
    print("Load Data", end="")
    DATA = Load_Data.load_data(filename,
                               ALGORITHMS,
                               filter_target=filter_target)
    print("  [OK]\nLoad ELA Features", end="")
    P, D, F = Load_Data.load_ELA_features(features_file)
    print("  [OK]\nLink ELA Features to problems", end="")
    Problem.link_all_features(DATA, P, D, F)
    print("  [OK]\nInitialize Empirical Performance Model", end="")
    model = epm.EmpiricalPerformanceModel(
        number_of_parameters,
        number_of_features,
        len(ALGORITHMS),
        input_type="parameters",
        selector=Selector.Random_selector(probability=0.7),
        list_features=list_features)
    print("  [OK]\nFix Training and Testing sets", end="")
    model.build_training_and_testing_sets(DATA)
    print("\nNumber of problems : " + str(len(model.get_results())) + "\n")
    '''
    print("  [OK]\nTrain EPM",end="")
    model.train_model()
    print("  [OK]\nTest EPM",end="")
    model.test_model()
    print("  [OK]\n")
  
    SBS=Statistic.SingleBestSolver(model)
    VBS=Statistic.VirtualBestSolver(model)
    RS=Statistic.RealSolver(model)
    Merit=Statistic.Merit(SBS,VBS,RS)
    print("SBS "+str(SBS))
    print("VBS "+str(VBS))
    print("RS "+str(RS))
    print("Merit "+str(Merit))
    '''
    model.reset_model()
    model.set_input_type('features')
    print("Train EPM", end="")
    model.train_model()
    print("  [OK]\nTest EPM", end="")
    model.test_model()
    print("  [OK]\n")
    SBS = Statistic.SingleBestSolver(model)
    VBS = Statistic.VirtualBestSolver(model)
    RS = Statistic.RealSolver(model)
    Merit = Statistic.Merit(SBS, VBS, RS)
    print("SBS " + str(SBS))
    print("VBS " + str(VBS))
    print("RS " + str(RS))
    print("Merit " + str(Merit))
Beispiel #2
0
def oneSample_ttest(map_list, timeseries, coord):
    print("hOLII")
    template = ld.load_timserie_mask()
    print("alooooo")
    design_matrix = pd.DataFrame([1] * len(map_list), columns=['intercept'])
    print("Hasta aqui si")
    nifti_masker = NiftiMasker(standardize=True,
                               mask_strategy='epi',
                               memory="nilearn_cache",
                               memory_level=2,
                               smoothing_fwhm=8)
    print("me iamgino que hasta aqui llega.")
    ts = ants.matrix_to_timeseries(template, timeseries[0])
    print("Esto es nuevo")
    nifti_masker.fit(ts)
    print("No estoy seguro de donde ha reventado")
    zf = np.asmatrix(map_list[0].transpose())
    imgUsar = nifti_masker.inverse_transform(zf)
    print("No estoy seguro de donde ha reventado 2")
    second_level_model = SecondLevelModel().fit(pd.DataFrame(zf),
                                                design_matrix=design_matrix)
    print("Creo que peta aqui")
    z_map = second_level_model.compute_contrast(output_type='z_score')

    return z_map
def Load_Historical_Data(tag,Base):
    
    import json     
    from datetime import datetime   
    
    import Load_Data as  Lod_data_module
    
    test_tra            = Lod_data_module.Loda_Historic_Data(tag)    # Load data    
    date                = []
    value               = []        
    for test_data in test_tra:
        date_str        = test_data['Sample_Date'] 
        format_str      = '%d/%m/%Y %H:%M'           # The time format
        datetime_obj    = datetime.strptime(date_str, format_str) 
        date.append(datetime_obj)
        i_pu            = float(test_data['Val'])/Base
        value.append(i_pu)

# Sort by day
    Monday                    = Sort_Data_By_Date(date,value,0)
    Tuesday                   = Sort_Data_By_Date(date,value,1)
    Wednesday                 = Sort_Data_By_Date(date,value,2)
    Thursday                  = Sort_Data_By_Date(date,value,3)
    Friday                    = Sort_Data_By_Date(date,value,4)
    Saturday                  = Sort_Data_By_Date(date,value,5)
    Sunday                    = Sort_Data_By_Date(date,value,6)
          
    Day                       = {'Monday':Monday,'Tuesday':Tuesday,'Wednesday': Wednesday,'Thursday':Thursday,'Friday':Friday,'Saturday':Saturday,'Sunday':Sunday} 
    data                      = Load_Historic_Data(date,value,Day)
    return data 
Beispiel #4
0
    def step(self):
 
        num_train = self.X_train.shape[0]
        batch_mask = np.random.choice(num_train, self.batch_size)
        X_batch = self.X_train[batch_mask]
        y_batch = self.y_train[batch_mask]

        X_batch = ld.augment_batch(X_batch, 
                         rotation_range=5,
                         height_shift_range=0.16,
                         width_shift_range=0.16,
                         img_row_axis=1,
                         img_col_axis=2,
                         img_channel_axis=0,
                         horizontal_flip=True,
                         vertical_flip=False)
          
        loss = self.model.loss(X_batch, y_batch)
        self.loss_history.append(loss)

        self.model.update_Layer()
Beispiel #5
0
 def __init__(self,datastart,dataend,analysestart):
     self.datastart=datastart
     self.dataend=dataend
     self.analysestart=analysestart
     self.bound= [[0.0001,0.6],[0.01,3],[1.05,1.4],[0.4,0.7]]
     DataLoader   = loader.load_data(analysestart,datastart,dataend)
     cmf.set_parallel_threads(1)
     
     ###################### Forcing data ####################################
     ClimateFilename     = 'Climate_Face_new2.csv'
     try:
         self.meteoarray=np.load(ClimateFilename+str(datastart.date())+str(dataend.date())+'.npy')
         self.rain      = cmf.timeseries.from_array(begin = self.datastart, step = timedelta(hours=1), data=self.meteoarray['Nd_mm_day'])#in mm/day
         self.rHmean    = cmf.timeseries.from_array(begin = self.datastart, step = timedelta(hours=1), data=self.meteoarray['Rh'])
         self.Windspeed = cmf.timeseries.from_array(begin = self.datastart, step = timedelta(hours=1), data=self.meteoarray['Wind'])
         self.Rs        = cmf.timeseries.from_array(begin = self.datastart, step = timedelta(hours=1), data=self.meteoarray['Rs_meas'])
         self.T         = cmf.timeseries.from_array(begin = self.datastart, step = timedelta(hours=1), data=self.meteoarray['Temp'])
     
     except:
         DataLoader.climate_pickle(ClimateFilename)
         self.meteoarray=np.load(ClimateFilename+str(datastart.date())+str(dataend.date())+'.npy')
         self.rain      = cmf.timeseries.from_array(begin = self.datastart, step = timedelta(hours=1), data=self.meteoarray['Nd_mm_day'])#in mm/day
         self.rHmean    = cmf.timeseries.from_array(begin = self.datastart, step = timedelta(hours=1), data=self.meteoarray['Rh'])
         self.Windspeed = cmf.timeseries.from_array(begin = self.datastart, step = timedelta(hours=1), data=self.meteoarray['Wind'])
         self.Rs        = cmf.timeseries.from_array(begin = self.datastart, step = timedelta(hours=1), data=self.meteoarray['Rs_meas'])
         self.T         = cmf.timeseries.from_array(begin = self.datastart, step = timedelta(hours=1), data=self.meteoarray['Temp'])
     
     
     self.piezometer          = 'P4'
     self.gw_array            = DataLoader.groundwater(self.piezometer)
     ###########################################################################
     
     ###################### Evaluation data ####################################    
     eval_soil_moisture = DataLoader.soil_moisture('A1')
     self.eval_dates    = eval_soil_moisture['Date']
     self.observations  = eval_soil_moisture['A1']        
@author: abhijit.tomar

Module for predicting digits with RandomForestClassifier
'''
import Load_Data
import pickle
import pandas as pd
from predict import Get_Optimal
import json

from sklearn.ensemble import RandomForestClassifier

if __name__ == '__main__':
    # Load the training and test data
    X_train,y_train,X_test = Load_Data.load_data()
    # Initialize classifier
    clf = RandomForestClassifier()
    # Set up possible values for hyper-parameters. These would be used by GridSearch to derive optimal set of hyper-parameters
    tuned_parameters = [{'n_estimators': [10,20,30,40,50,60,70,80,90,100,150,200], 
                         'bootstrap': [True],'warm_start': [True,False],
                         'oob_score': [True,False],'verbose': [100]}]
    # Initialize scoring metric for this problem space
    scores = ['accuracy']
    # Generate optimal set of hyper-parameters for the above classifier
    Get_Optimal.generate_optimal(X_train, y_train, clf, tuned_parameters, scores)
    # Load the optimal hyper-parameters
    param_map = json.load(open('../../resources/params/'+type(clf).__name__+'_for_accuracy_params.json'))
    # Reinitialize the classifier but now with the optimal hyper-parameters
    clf = RandomForestClassifier(**param_map)
    # Train the classifier
Beispiel #7
0
import Load_Data as ld
from keras import layers, models, optimizers
import tensorflow as tf
from keras.utils import np_utils
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler

# GPU memory management
gpu = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpu[0], True)

dir_path_train = r"C:/Users/ASUS/Desktop/AMLS_Assignment/AMLS_PROJECT/UCI_HAR_Dataset/train"
dir_path_test = r"C:/Users/ASUS/Desktop/AMLS_Assignment/AMLS_PROJECT/UCI_HAR_Dataset/test"

data_train, label_train = ld.build_dataset(dir_path_train, 'Inertial Signals',
                                           'train')
data_test, label_test = ld.build_dataset(dir_path_test, 'Inertial Signals',
                                         'test')
ld.distribution_ana(label_train)
ld.distribution_ana(label_test)
print(data_train.shape)
print(label_train.shape)
print(data_test.shape)
print(label_test.shape)


def evaluate_cnn_1d(data_train, label_train, data_test, label_test):
    label_train = np_utils.to_categorical(label_train - 1)
    label_test = np_utils.to_categorical(label_test - 1)
    #print("Model building begin:")
    cnn_1d = models.Sequential()
Beispiel #8
0

def get_parameter():
    global parameter
    for i in range(len(parameter)):
        t = parameter[i][0]
        for par in parameter[i]:
            t = torch.add(t, par)
        t = torch.sub(t, parameter[i][0])
        result_parameter[i] = t


if __name__ == '__main__':
    predicts = []
    reals = []
    dataset = Load_Data.Sample_Dataset()
    result_parameter = [[], []]
    parameter = [[], []]
    for j in range(2):
        front = int(dataset.len * 0.8)
        back = int(dataset.len * 0.2) + 1
        train_data, test_data = random_split(
            dataset=dataset,
            lengths=[front, back],
            generator=torch.Generator().manual_seed(0))
        train_Loader = DataLoader(dataset=train_data,
                                  batch_size=320,
                                  shuffle=True)
        test_Loader = DataLoader(dataset=test_data,
                                 batch_size=320,
                                 shuffle=True)
Beispiel #9
0
num_output = 64

model.add(keras.layers.Dense(num_output, activation='relu'))
model.add(keras.layers.Dropout(0.3))

model.add(keras.layers.Dense(2 * num_output, activation='relu'))
model.add(keras.layers.Dropout(0.3))

model.add(keras.layers.Dense(num_classes, activation='softmax'))

model.compile(optimizer='Adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

datasets = Load_Data.init(epochs)

# step_per_epoch = len(one_queue_images) / batch_size
spe = int((len(datasets.train_images) / epochs) / batch_size)
history = model.fit_generator(Load_Data.DATA_ITERATOR(datasets.train_images,
                                                      datasets.train_labels,
                                                      batch_size=batch_size),
                              steps_per_epoch=spe,
                              epochs=epochs)

print('Visualizing data.....')
# visualize data
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
    #Necessário mapear para o local da sua maquina onde esta as imagens
    #Baixar as imagens em http://btsd.ethz.ch/shareddata/
    #BelgiumTSC_Training e BelgiumTSC_Testing

    #Local das imagens
    ROOT_PATH = "C:/Users/gfsilva/Documents/DataSetClassifierImage/"
    train_data_dir = os.path.join(ROOT_PATH, "BelgiumTSC_Training/Training")
    Val_data_dir = os.path.join(ROOT_PATH, "BelgiumTSC_Testing/Testing")

    #Load modelo treinado
    #print("Iniciando a leitura do modelo treinado")
    #ClassLoad = Class_Load()
    #ClassLoad.load_model(model,"modeloTreinado.h5")

    #Carregando as imagens
    LoadData = Load_Data()
    images, labels = LoadData.load_data(train_data_dir)

    #Redimensionando as imagens para 32x32
    ClassRedimImage = Class_Redim_Image()
    images32 = ClassRedimImage.redim_image(images)

    #Separando as imagens em treino e teste
    ClassTraining = Class_Training(epochs=500)
    #ClassTraining.div_training_test(images32, labels)
    X_train, X_test, y_train, y_test = ClassTraining.div_training_test(
        images32, labels)

    #Dividindo os pixels das imagens por 255, para que todos os pixels tenham um valor de 0 a 1 (Facilita o treinamento e melhora a acuracia do modelo)
    X_train, X_test, y_train, y_test, input_shape = ClassRedimImage.redim_pixels(
        X_train, X_test, y_train, y_test)
Beispiel #11
0
    def __init__(self, datastart, dataend, analysestart):
        self.datastart = datastart
        self.dataend = dataend
        self.analysestart = analysestart
        self.bound = [[0.0001, 0.6], [0.01, 3], [1.05, 1.4], [0.4, 0.7]]
        DataLoader = loader.load_data(analysestart, datastart, dataend)
        cmf.set_parallel_threads(1)

        ###################### Forcing data ####################################
        ClimateFilename = 'Climate_Face_new2.csv'
        try:
            self.meteoarray = np.load(ClimateFilename + str(datastart.date()) +
                                      str(dataend.date()) + '.npy')
            self.rain = cmf.timeseries.from_array(
                begin=self.datastart,
                step=timedelta(hours=1),
                data=self.meteoarray['Nd_mm_day'])  #in mm/day
            self.rHmean = cmf.timeseries.from_array(begin=self.datastart,
                                                    step=timedelta(hours=1),
                                                    data=self.meteoarray['Rh'])
            self.Windspeed = cmf.timeseries.from_array(
                begin=self.datastart,
                step=timedelta(hours=1),
                data=self.meteoarray['Wind'])
            self.Rs = cmf.timeseries.from_array(
                begin=self.datastart,
                step=timedelta(hours=1),
                data=self.meteoarray['Rs_meas'])
            self.T = cmf.timeseries.from_array(begin=self.datastart,
                                               step=timedelta(hours=1),
                                               data=self.meteoarray['Temp'])

        except:
            DataLoader.climate_pickle(ClimateFilename)
            self.meteoarray = np.load(ClimateFilename + str(datastart.date()) +
                                      str(dataend.date()) + '.npy')
            self.rain = cmf.timeseries.from_array(
                begin=self.datastart,
                step=timedelta(hours=1),
                data=self.meteoarray['Nd_mm_day'])  #in mm/day
            self.rHmean = cmf.timeseries.from_array(begin=self.datastart,
                                                    step=timedelta(hours=1),
                                                    data=self.meteoarray['Rh'])
            self.Windspeed = cmf.timeseries.from_array(
                begin=self.datastart,
                step=timedelta(hours=1),
                data=self.meteoarray['Wind'])
            self.Rs = cmf.timeseries.from_array(
                begin=self.datastart,
                step=timedelta(hours=1),
                data=self.meteoarray['Rs_meas'])
            self.T = cmf.timeseries.from_array(begin=self.datastart,
                                               step=timedelta(hours=1),
                                               data=self.meteoarray['Temp'])

        self.piezometer = 'P4'
        self.gw_array = DataLoader.groundwater(self.piezometer)
        ###########################################################################

        ###################### Evaluation data ####################################
        eval_soil_moisture = DataLoader.soil_moisture('A1')
        self.eval_dates = eval_soil_moisture['Date']
        self.observations = eval_soil_moisture['A1']
Beispiel #12
0
from tensorflow import keras

# 导入辅助库
import numpy as np
import matplotlib.pyplot as plt

#导入自定义的载入数据模块(官网给出的直接下载数据集操作不可用,所以手动下载数据集并读取数据)
import Load_Data

#print(tf.__version__)
#显示tensorflow版本信息

#载入训练数据及其标签,测试数据及其标签
#具体载入方法见load_data函数内部
(train_images, train_labels), (test_images,
                               test_labels) = Load_Data.load_data()

#由于标签中的类别名称是0-9,这里将类别名写出来
class_names = [
    'T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt',
    'Sneaker', 'Bag', 'Ankle boot'
]

#查看数据信息
print(train_images.shape)
print(len(train_labels))
print(train_labels)
print(test_images.shape)
print(len(test_labels))
'''
#检查训练图像
import numpy as np
import Load_Data as ld
import ants
import nibabel as nib
import ImagesFunctions as imf
from scipy import stats
from nilearn import plotting, datasets
import matplotlib
from timeit import default_timer
startTime = default_timer()
stat_map_list = list()
coord, timeseries, indiceTimeseries, coordMatrix = ld.Load_Timeseries()
coord = np.int_(coord)
time2 = default_timer()

print("Tiempo en cargar los datos: ", time2 - startTime)
stat_map_list, zfisher_list = imf.Get_Pearson_Correlation(
    timeseries, indiceTimeseries)
print(stat_map_list[0].shape)
print("Matriz zfisher", np.asmatrix(zfisher_list[0]).shape)
fsaverage = datasets.fetch_surf_fsaverage()
print(fsaverage['pial_left'])
time3 = default_timer()
print("Tiempo que tarda en hacer el analisis: ", time3 - time2)
x = imf.oneSample_ttest(zfisher_list, timeseries, coord)