Exemplo n.º 1
0
def main():
    # Reading Harts Essential Genes
    if not os.path.isfile('./pickled/essential_nonessential_genes.p'):
        rd.ReadEssential_NonEssential_Genes()
        print('Finished Reading Harts Essentiality Genes')
    essential_genes, nonessential_genes = pickle.load(
        open("./pickled/essential_nonessential_genes.p", "rb"))

    if not os.path.isfile('./input/GBM-lib1_l2fc.csv'):
        rd.ProcessTKODataForDESEQ()

    # RunAlgorithms('Simulation', 'All')

    # rd.ProcessTKODataForDESEQ()
    with open(ff_name, "a") as ff:
        print('#################################', file=ff)

    filenames = [
        'HCT116_1-lib1', 'HCT116_2-lib1', 'DLD1-lib1', 'HeLa-lib1',
        'RPE1-lib1', 'GBM-lib1'
    ]
    # RunAlgorithms(filenames[1], 'All')

    for i in range(len(filenames)):
        with open(ff_name, "a") as ff:
            print(filenames[i], file=ff)
        RunAlgorithms(filenames[i], 'All')
    return ()
Exemplo n.º 2
0
def main(): 
    
    depth_file_2016_names = read_file_in_folder(bird_depth_2016_path)
    depth_file_2017_names = read_file_in_folder(bird_depth_2017_path)
    depth_file_2018_names = read_file_in_folder(bird_depth_2018_path)
    
    GPS_file_2016_names = read_file_in_folder(bird_GPS_2016_path)
    GPS_file_2017_names = read_file_in_folder(bird_GPS_2017_path)
    GPS_file_2018_names = read_file_in_folder(bird_GPS_2018_path)
    
    depth_files = [depth_file_2016_names, depth_file_2017_names, depth_file_2018_names]
    GPS_files = [GPS_file_2016_names, GPS_file_2017_names, GPS_file_2018_names]
    
    #
    for i in range(len(depth_files)):
        for j in range(0, len(depth_files[i])):
            Trip_time = Read_Data.Read_bird_trip(GPS_files[i][j], 0, 99)
            GPS_Lon = Read_Data.Read_bird_trip(GPS_files[i][j], 5, 0)
            GPS_Lat = Read_Data.Read_bird_trip(GPS_files[i][j], 6, 0)
            water_depth_trip = read_depth_trip_txt(depth_files[i][j])
                        
            bird_trip_define_label(Trip_time, GPS_Lon, GPS_Lat, trip_day_define, trip_hour_define, index_sex, sst_data, trip_index)
            bird_trip_number_of_dives_hour(Trip_time, trip_day_define, trip_hour_define, index_sex, water_depth_data)
            
    
        
    t_sum.write_xlsm_sum('features_' + str(trip_start_de) + '-' + str(trip_hour_define))
    
    
    print ('fin')
Exemplo n.º 3
0
def search():
	a = request.args.get('a', 0, type=str)
	user_id = request.args.get('uid', 0, type=str)
	query = a
	db = dbms.Database()
	res = db.find_queryresults(query)
	non_relevant = db.find_nonrelevant(query) 
	update = False
	#print "NR: "
	#print non_relevant
	if  res == None:
		res2 = Read_Data.work(query)
		temp = copy.deepcopy(res2)    	#Important - Deep copy of returned results list
	else:
		#print "Reading from DB..."
		if len(res) < 10:
			res = Read_Data.work(query)
			update = True
			print "Results < 10"
		temp = copy.deepcopy(res)
	
	non_relevant_flag = False
	
	temp2 = copy.deepcopy(temp)
		
	for course in temp2:
		if course[1] in non_relevant:
			print "Removing non-relevant course ", course[1]
			temp.remove(course)
			non_relevant_flag = True


	if non_relevant_flag or update:
		#print "Updating DB..."
		db.update_queryresults(query, temp)
	else:
		db.insert_queryresults(query, temp)
	
	if len(temp) > 10:
		temp = temp[0:10]	
	
	user_info = conn_data(db, user_id, temp)	
	print "User Info list: ", user_info
	#print "User not interested list: ", db.find_notinterested(user_id)
	for i in range(len(user_info)):
		print user_info[i]
		temp[i].append(user_info[i]["basic"])
		temp[i].append(user_info[i]["adv"])
		temp[i].append(user_info[i]["likes"])

	ret = isbn.getisbnData(query)
	return jsonify(result = temp, result2 = ret)
Exemplo n.º 4
0
    def DataReader(self, Input_filename, NumDataSet):

        ### Read Data ###
        print('Read Data')
        self.Data_header1, self.Data_array1, self.Data_stddev1 = ReadData.readData(
            Input_filename, NumDataSet)
        self.Input_filename_ = Input_filename
        self.NumDataSet_ = NumDataSet

        ReadData.plot_inputdata(self.Data_header1, self.Data_array1,
                                self.Data_stddev1)

        ### Find Sample Size ###
        Data_array_numrows = self.Data_array1.shape[0]  # Time + All Inducers
        Data_array_numcols = self.Data_array1.shape[
            1]  # Number of RFP Data Per Inducer
        self.Sample_size = (Data_array_numrows - 1) * Data_array_numcols
Exemplo n.º 5
0
    def DataReader(self, Input_filename, NumDataSet, Inducer_unit):

        ### Read Data ###
        print('Read Data')
        self.Data_header1, self.Data_array1, self.Data_stddev1, self.Inducer1, self.Inducer_log1, self.Sample_size \
            = ReadData.run_readdata(Input_filename, NumDataSet, Inducer_unit)

        self.Input_filename_ = Input_filename
        self.NumDataSet_ = NumDataSet
Exemplo n.º 6
0
Num_Cross_Folders = 5
G_Mean = np.linspace(0, 0, Num_Cross_Folders)
Sensitivity = np.linspace(0, 0, Num_Cross_Folders)
Specificity = np.linspace(0, 0, Num_Cross_Folders)
G_Mean_GAN = np.linspace(0, 0, Num_Cross_Folders)
Sensitivity_GAN = np.linspace(0, 0, Num_Cross_Folders)
Specificity_GAN = np.linspace(0, 0, Num_Cross_Folders)

for j in range(Num_Cross_Folders):
    #        dir_train = "glass1-5-fold/glass1-5-" + str(j+1) + "tra.dat"
    #        dir_test = "glass1-5-fold/glass1-5-" + str(j+1) + "tst.dat"
    dir_train = "page-blocks0-5-fold/page-blocks0-5-" + str(j + 1) + "tra.dat"
    dir_test = "page-blocks0-5-fold/page-blocks0-5-" + str(j + 1) + "tst.dat"

    RD.Initialize_Data(dir_train)
    Train_Feature = RD.get_feature()
    Train_Label = RD.get_label()
    Train_Label = Train_Label.ravel()
    print(Train_Feature.shape)
    print(Train_Label.size)

    #    clf = svm.SVC(C=1, kernel='rbf', gamma= 0.2)
    #    clf.fit(Train_Feature, Train_Label)

    Feature_samples = RD.get_positive_feature()
    G = GAN_Build(Feature_samples)
    Sudo_Samples = Over_Sampling(G, RD.Num_negative - RD.Num_positive, 6)
    print(Sudo_Samples[0])
    print(Sudo_Samples[-1])
    Train_Feature = np.concatenate((Train_Feature, Sudo_Samples))
from gurobipy import *
import Read_Data
import numpy as np
import time
import DJRP_20190704_V1
#filenumbers = ['01','02','03','04','05','06','07','08','09','10']
filenumbers = ['01', '02', '03', '04', '05']
for number in filenumbers:
    folder = "_Daten_und_Loesungen\\E\\"
    filename = "setE-0" + number + ".txt"
    #nitems, planningHorizon, MJC, DPP, DAVG, HLC, MNC, CNT_WEIGHT, CNT_VALUE, REQ_WEIGHT, REQ_VALUE, MIN_WEIGHT, MIN_VALUE = \
    nitems, planningHorizon, S, D, DAVG, h, s_minor, w, v, weightREQ, valueREQ, minWeight, minValue = \
        Read_Data.read_Data (folder+filename)

    # creat periods and items to iterate through
    periods = range(planningHorizon)
    items = range(nitems)
    maxIter = 100
    minAmountItems = np.linspace(0.85, 0.99, num=maxIter)
    # Big M with sum over Demand
    M = {}
    for i in items:
        M[i] = 0
        for t in periods:
            M[i] = M[i] + D[i, t]

    start = time.time()
    DJRP_Model, DJRP_z, DJRP_y, DJRP_B, DJRP_I = \
    DJRP_20190704_V1.DJRP_run (nitems, planningHorizon, S, D, DAVG, h, s_minor, w, v, weightREQ, valueREQ, minWeight, minValue)
    #%% Creat patterns
    # Creat subpatterns and initialize for every period a pattern that can order everything
Exemplo n.º 8
0
from gurobipy import *
import Read_Data
import Static_Model
import matplotlib.pyplot as plt
import numpy as np
import time

#nitems, planningHorizon, MJC, DPP, DAVG, HLC, MNC, CNT_WEIGHT, CNT_VALUE, REQ_WEIGHT, REQ_VALUE, MIN_WEIGHT, MIN_VALUE = \
inst_set = 'setB-002'
instance = inst_set + '.txt'
nitems, planningHorizon, S, D, DAVG, h, s, w, v, weightREQ, valueREQ, minWeight, minValue = \
    Read_Data.read_Data (instance)

# Periods and items to iterate through
periods = range(planningHorizon)
items = range(nitems)

#Start modeling
m = Model("DJRP_model")

#Introduction of variables
z = {}  #An order is placed at period t
y = {}  #An order is placed at period t for item i
B = {}  #Order quantity of item i arriving at the beginning of period t
I = {}  #Level of inventory of item i at the end of period t
for t in periods:
    z[t] = m.addVar(vtype=GRB.BINARY, name="Z_%s" % str(t))
    for i in items:
        y[i, t] = m.addVar(vtype=GRB.BINARY, name="Y_%s%s" % (str(i), str(t)))
        B[i, t] = m.addVar(vtype=GRB.INTEGER,
                           name="B_%s%s" % (str(i), str(t)),
Exemplo n.º 9
0
	authentication = linkedin.LinkedInAuthentication(API_KEY, API_SECRET, RETURN_URL, linkedin.PERMISSIONS.enums.values())
	webbrowser.open(authentication.authorization_url)  # open this url on your browser
	return

@app.route('/')
def home():
	return render_template('login.html')

@app.route('/redir.html')
def redir():
	authentication = linkedin.LinkedInAuthentication(API_KEY, API_SECRET, RETURN_URL, linkedin.PERMISSIONS.enums.values())
	application = linkedin.LinkedInApplication(authentication)	

	if request.args.get('code', '') != '':
		authentication.authorization_code = request.args.get('code', '')
		authentication.get_access_token()
		ret = {}
		ret = application.get_profile(selectors=['id', 'first-name', 'last-name', 'location', 'distance', 'num-connections', 'skills', 'educations', 'interests', 'courses', 'following', 'related-profile-views', 'job-bookmarks', 'certifications'])
		db = dbms.Database()
		db.insert_user(ret)
		name = ret['firstName']
		return render_template('index.html')		
	else:
		print "No Auth Code\n"
		return render_template('login.html')

if __name__ == '__main__':
	app.debug = True
	Read_Data.preprocess()
	app.run()
Exemplo n.º 10
0
import OptimizationFile
import Date_Gen as dg
import CovarianceCalc_updated
import Read_Data as rd
import TrendIndcatorCal
import RegionalFactor
import tvcw
import calendar

incep_date = pd.to_datetime('2014-05-20')
data_incep_date = pd.to_datetime('2013-05-8')
end_date = pd.to_datetime('2014-05-25')
#calDate = pd.to_datetime('2014-05-16')
#calDate = pd.to_datetime('2013-05-10')
"""Input Data"""
df_input_px, df_input_div, df_reg_factor, df_descrip, df_tax, sr_trading_days = rd.read_data(
    data_incep_date, end_date)
"""Cleaning Data"""
""" Cleaning data to replace nan with previous day value"""
df_input_clean = df_input_px.fillna(method='pad')
"""reading long term Vol"""
LongTermVol = np.transpose(np.array(df_descrip.loc[:, "Long_Term_Volatility"]))
"""reading Gap"""
Gap = np.transpose(np.array(df_descrip.loc[:, "Gap"]))
"""reading max and minimum"""
minWeight = df_descrip.loc[:, "MinWeight_EF"]
maxWeight = df_descrip.loc[:, "MaxWeight_EF"]
weigthTuple = []
for k in range(0, len(minWeight)):
    a = minWeight.iloc[k]
    b = maxWeight.iloc[k]
    weigthTuple.append((a, b))
            if ii not in nominal_feature:
                z[ii] = np.max(
                    np.where(bounds[:, ii] <= initial_sample[ii])[0])
                if z[ii] > 99:
                    z[ii] -= 1
            else:
                z[ii] = initial_sample[ii]
        data[k, :] = z


file = 'High_IR_Data/shuttle-2_vs_5.dat'

name = file.split('.')[0]
print(name)

RD.Initialize_Data(file)

print('Number of Positive: ', RD.Num_positive)
print('Number of Negative: ', RD.Num_negative)

nominal_feature = []
data = RD.get_feature()
num_samples = data.shape[0]
num_features = data.shape[1]
num_bins = 100
bounds = np.zeros((num_bins + 1, num_features))
for i in range(num_features):
    if i not in nominal_feature:
        bounds[:, i] = np.histogram(data[:, i], bins=num_bins)[1]

nf = RD.get_negative_feature()
Exemplo n.º 12
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

"利用pytorch的构架来实现BP神经网络"
"本示例用于实现澳大利亚电力数据的拟合实验,每天半个小时采样一点,采样为48个点,输入数据为7周相同时刻的点,输出为第8周同时刻的点."
"模型结构为,输入-隐藏-输出:7-50-1"

import torch
from torch.autograd import Variable
import matplotlib.pyplot as plt
import numpy as np
import math
import Read_Data

# 读取数据,由于做归一化处理,保留最大最小值
training_in, training_out, test_in, test_out, maxE, minE = Read_Data.ReadElectricity(
)
# 将numpy的array格式转换为torch的Tensor,记得将类型转为floatTensor
x = torch.from_numpy(training_in).float()
y = torch.from_numpy(training_out).float()

# 转换为Variable的类型
x = Variable(x)
y = Variable(y)

x_test = torch.from_numpy(test_in).float()
y_test = torch.from_numpy(test_out).float()
x_test = Variable(x_test)
y_test = Variable(y_test)

step = 7000
Exemplo n.º 13
0
#nominal_value = ['M', 'F', 'I']

nominal_index = [1,2,3]
nominal_value = [['icmp', 'tcp', 'udp'],
                 ['auth', 'bgp', 'courier', 'csnet_ns', 'ctf', 'daytime', 'discard', 'domain', 'domain_u',
                  'echo', 'eco_i', 'ecr_i', 'efs', 'exec', 'finger', 'ftp', 'ftp_data', 'gopher', 'hostnames',
                  'http', 'http_443', 'imap4', 'IRC', 'iso_tsap', 'klogin', 'kshell', 'ldap', 'link', 'login',
                  'mtp', 'name', 'netbios_dgm', 'netbios_ns', 'netbios_ssn', 'netstat', 'nnsp', 'nntp', 'ntp_u',
                  'other', 'pm_dump', 'pop_2', 'pop_3', 'printer', 'private', 'red_i', 'remote_job', 'rje', 'shell',
                  'smtp', 'sql_net', 'ssh', 'sunrpc', 'supdup', 'systat', 'telnet', 'tftp_u', 'time', 'tim_i', 'urh_i',
                  'urp_i', 'uucp', 'uucp_path', 'vmnet', 'whois', 'X11', 'Z39_50'],
                 ['OTH', 'REJ', 'RSTO', 'RSTOS0', 'RSTR', 'S0', 'S1', 'S2', 'S3', 'SF', 'SH']]


#RD.Initialize_Data(file)
RD.Initialize_Data(file, has_nominal=True, nominal_index=nominal_index, nominal_value=nominal_value)
print('Number of Positive: ', RD.Num_positive)
print('Number of Negative: ', RD.Num_negative)

nominal_feature = [1,2,3,6,7,8,10,11,13,14,17,18,19,20,21]
#nominal_feature = [0,1,2,3,4,5,6,7,8,9]
data = RD.get_feature()
num_samples = data.shape[0]
num_features = data.shape[1]
num_bins = 100
bounds = np.zeros((num_bins+1, num_features))
for i in range(num_features):
    if i not in nominal_feature:
        bounds[:, i] = np.histogram(data[:, i], bins=num_bins)[1]

nf = RD.get_negative_feature()
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 25 09:14:27 2019
@author: LeaGrahn
"""
from gurobipy import *
import Read_Data

#nitems, planningHorizon, MJC, DPP, DAVG, HLC, MNC, CNT_WEIGHT, CNT_VALUE, REQ_WEIGHT, REQ_VALUE, MIN_WEIGHT, MIN_VALUE = \
nitems, planningHorizon, S, D, DAVG, h, s, w, v, weightREQ, valueREQ, minWeight, minValue = \
    Read_Data.read_Data ('setB-001.txt')

# creat periods and items to iterate through
periods = range(planningHorizon)
items = range(nitems)

#%% Creat patterns
import itertools
# Initialization
patterns = []
elements = []
combinations = []


#
## Fill patterns with one-item-patterns
#demand={}
#maxweight={}
#maxvalue={}
Exemplo n.º 15
0
def computeInitialSolution():
    # http://apmonitor.com/che263/index.php/Main/PythonOptimization

    #import statistics

    #nitems, planningHorizon, MJC, DPP, DAVG, HLC, MNC, CNT_WEIGHT, CNT_VALUE, REQ_WEIGHT, REQ_VALUE, MIN_WEIGHT, MIN_VALUE = \
    nitems, planningHorizon, S, D, DAVG, h, s, w, v, weightREQ, valueREQ, minWeight, minValue = \
        Read_Data.read_Data ('setA-002.txt')

    items = range(nitems)
    periods = range(planningHorizon)

    Dm = {}
    ss = {}
    Soc = S[0]
    for i in items:
        Dm[i] = 0
        ss[i] = s[i, 0]
        for t in periods:
            Dm[i] = Dm[i] + D[i, t]
        Dm[i] = Dm[i] / planningHorizon
    # Modelling optimization function

    def objective(x):
        T = x[nitems]
        Total_holding_cost = 0
        Total_setup_cost = 0
        for i in items:
            Total_holding_cost = Total_holding_cost + x[i] * T * 0.5 * Dm[
                i] * h[i]
            Total_setup_cost = Total_setup_cost + (1 / T) * (ss[i] / x[i])
        Total_setup_cost = Total_setup_cost + (Soc / T)
        return Total_holding_cost + Total_setup_cost

    #def objective(x):
    #    return sum((x[0:items] * x[items] * Dm[0:items] * 0.5 * h[0:items]) \
    #              + (1 / x[items]) * (ss[0:items] / x[0:items])) + (1 / x[items]) * Soc

    def constraint_value(x):
        c_v = 0
        T = x[nitems]
        if (valueREQ == 1):
            for i in items:
                c_v = c_v + (x[i] * T * Dm[i] * v[i])
            c_v = c_v - minValue
        return c_v

    def constraint_weight(x):
        c_w = 0
        T = x[nitems]
        if (weightREQ == 1):
            for i in items:
                c_w = c_w + (x[i] * T * Dm[i] * w[i])
            c_w = c_w - minWeight
        return c_w

    # Define decision variables and initialize parameters
    x0 = np.zeros(
        nitems + 1
    )  # the number of integer multipliers of T that a replensishment of item i will last
    for i in items:
        x0[i] = 1.0
    x0[items] = planningHorizon  # T

    # show initial objective
    print('Initial Objective: ' + str(objective(x0)))

    # Constraints
    # Constraints
    bnds = []
    for i in range(nitems + 1):
        bnds.append([1.0, 5.0])

    con1 = {
        'type': 'ineq',
        'fun': constraint_value
    }  # for instance of the formula, a - b >= 0
    con2 = {'type': 'ineq', 'fun': constraint_weight}
    cons = ([con1, con2])
    ##
    start = time.time()
    # SLSQP only solver can solve nonlinear
    solution = minimize(objective,x0,method='SLSQP',\
                        bounds=bnds, constraints=cons)

    x = solution.x

    end = time.time()
    print('conputational time = {}'.format(end - start))

    # show final objective
    print('Final Objective: ' + str(objective(x)))

    # print solution
    print('Solution')
    for i in items:
        #     print(str(x[i]))
        print('x{} = '.format(i) + str(x[i]))
    print('T = ' + str(x[nitems]))

    #Round values from the static solution

    Ts = int(round(x[nitems]))
    k = {}
    for i in items:
        k[i] = int(round(x[i]))
    TC_rounded = 0
    for i in items:
        TC_rounded = TC_rounded + k[i] * Ts * Dm[i] * 0.5 * h[i] + (1 / Ts) * (
            ss[i] / k[i])
    TC_rounded = (TC_rounded + Soc / Ts) * planningHorizon

    #Initialize z and y

    z = {}
    y = {}
    B = {}
    I = {}
    for i in items:
        for t in range(planningHorizon):
            z[t] = 0
            y[i, t] = 0
            B[i, t] = 0

    #Conversion from the static to the dynamic

    for i in items:
        Dm[i] = round(Dm[i])
        for t in range(0, planningHorizon, Ts * k[i]):
            B[i, t] = Dm[i] * Ts * k[i]
            z[t] = 1
            y[i, t] = 1

    for i in items:
        for t in range(planningHorizon):
            if t != 0:
                I[i, t] = I[i, t - 1] + B[i, t] - D[i, t]
            else:
                I[i, t] = B[i, t] - D[i, t]

    #Total cost with the dynamic objective function with the static solution.
    newTC = 0

    for t in range(planningHorizon):
        for i in items:
            newTC = newTC + s[i, t] * y[i, t] + h[i] * I[i, t]
        newTC = newTC + S[t] * z[t]

    print(TC_rounded)
    print(newTC)

    return z, y, B
Exemplo n.º 16
0
from gurobipy import *
import Read_Data
import Static_Model

#nitems, planningHorizon, MJC, DPP, DAVG, HLC, MNC, CNT_WEIGHT, CNT_VALUE, REQ_WEIGHT, REQ_VALUE, MIN_WEIGHT, MIN_VALUE = \
nitems, planningHorizon, S, D, DAVG, h, s, w, v, weightREQ, valueREQ, minWeight, minValue = \
    Read_Data.read_Data ('setA-002.txt')
zs, ys, Bs = Static_Model.computeInitialSolution()

# creat periods and items to iterate through
periods = range(planningHorizon)
items = range(nitems)
M = {}
for i in items:
    M[i] = 50000

warehouseC = 0
budgetC = 0
minC = 0
truckC = 0
#%% Creat patterns
import itertools
# Initialization
patterns = []
combinations = []
elements = [0] * nitems
patterns.append(tuple(elements))

#fill patterns with only initial solution pattern
for t in periods:
    if zs[t] == 1:
from __future__ import print_function
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
from matplotlib import colors as mcolors
import Read_Data as RD
import seaborn as sns

colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)

#file = 'shuttle-2_vs_5.dat'
file = 'abalone19.dat'
name = file.split('.')[0]
RD.Initialize_Data(file,
                   has_nominal=True,
                   nominal_index=[0],
                   nominal_value=['M', 'F', 'I'])
print('Number of Positive: ', RD.Num_positive)
print('Number of Negative: ', RD.Num_negative)

#df = pd.DataFrame(RD.Features, columns=['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9'])
df = pd.DataFrame(RD.Features,
                  columns=[
                      'Sex', 'Length', 'Diameter', 'Height', 'Whole_Weight',
                      'Shucked_Weight', 'Viscera_Weight', 'Shell_Weight'
                  ])
#df['Label'] = pd.Series(RD.Labels, index=df.index)
df['Label'] = RD.Labels

#sns.FacetGrid(df, hue='Label').map(plt.scatter, 'Sex', 'Length')
Exemplo n.º 18
0
    print("Input data = "+species+" from " + data_dir)
    print("Total no. of particles =" +str(N_part_total))
    print("No. of particles used =" +str(N_part))

#Partition the total input file with N particles into mpi_size parts:
mpi_borders = np.linspace(0, N_part, mpi_size + 1).astype('int')

my_border_low = mpi_borders[mpi_rank]
my_border_high = mpi_borders[mpi_rank+1]

#Redefining N_part as number of particles analysed per processor
N_part = my_border_high - my_border_low


#Read Trajectory Data
time, U, E, B, Energy = Read_Data.Read_Trajectory_Data(my_border_low, my_border_high, data_dir, species)
#Comment: U is used to denote the proper relativistic velocity, which is U = gamma*V

N = len(time)

#Process Trajectory Data using chosen wavelet and edge mode
if mpi_rank==0:
    print('Processing Data for particles...')

#Transfrom the data and compute new quantities:
time, V, E, b, V_par, V_perp, vpar, vperp, Acc_mag, Acc_par, Acc_perp1, Acc_perp2 = Process_Data.Process_raw(time, U, E, B, Omega_c, q, m)
N_pad = len(vpar[0,:])

ref=int(np.ceil(N/4))

#vpar_Global=Diffusion(vpar,ref,N)
Exemplo n.º 19
0
        #        dir_train = "glass1-5-fold/glass1-5-" + str(j+1) + "tra.dat"
        #        dir_test = "segment0-5-fold/segment0-5-" + str(j+1) + "tst.dat"
        #        dir_train = "segment0-5-fold/segment0_SMOTE/result" + str(j) + "s0.tra"
        #        dir_test = "segment0-5-fold/segment0_SMOTE/result" + str(j) + "s0.tst"
        #        dir_train = "segment0-5-fold/segment0_SMOTE_TomekLinks/result" + str(j) + "s0.tra"
        #        dir_test = "segment0-5-fold/segment0_SMOTE_TomekLinks/result" + str(j) + "s0.tst"
        #         dir_train = "yeast4-5-fold/yeast4-5-" + str(j + 1) + "tra.dat"
        dir_test = "yeast4-5-fold/yeast4-5-" + str(j + 1) + "tst.dat"
        #        dir_train = "yeast4-5-fold/yeast4_SMOTE/result" + str(j) + "s0.tra"
        #        dir_test = "yeast4-5-fold/yeast4_SMOTE/result" + str(j) + "s0.tst"
        #        dir_train = "yeast4-5-fold/yeast4_SMOTE_ENN/result" + str(j) + "s0.tra"
        #        dir_test = "yeast4-5-fold/yeast4_SMOTE_ENN/result" + str(j) + "s0.tst
        dir_train = "yeast4-5-fold/yeast4_SMOTE_RSB/yeast4-5-" + str(
            j + 1) + "tra.dat"

        RD.Initialize_Data(dir_train)
        Train_Feature_o = RD.get_feature()
        Train_Label = RD.get_label()
        Train_Label = Train_Label.ravel()

        RD.Initialize_Data(dir_test)
        Test_Feature_o = RD.get_feature()
        Test_Label = RD.get_label()
        Test_Label = Test_Label.ravel()

        min_max_scaler = preprocessing.MinMaxScaler()
        all_set = np.concatenate((Train_Feature_o, Test_Feature_o))
        min_max_scaler.fit(all_set)
        Train_Feature = min_max_scaler.transform(Train_Feature_o)
        Test_Feature = min_max_scaler.transform(Test_Feature_o)
Exemplo n.º 20
0
from __future__ import print_function
from matplotlib import pyplot as plt
from matplotlib import colors as mcolors
import numpy as np
import Read_Data as RD

colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)

#dir = "wine-5-fold/wine-5-1tra.dat"
dir = "KSMOTE_IECON15_InputData.csv"

RD.Initialize_Data(dir)

for i in range(0, RD.Num_Features):
    for j in range(i + 1, RD.Num_Features):
        if i != j:
            fig = plt.figure()
            p1 = plt.scatter(RD.Stage_1_Feature[:, i],
                             RD.Stage_1_Feature[:, j],
                             marker='o',
                             color='#539caf',
                             label='1',
                             s=10,
                             alpha=0.4)
            p2 = plt.scatter(RD.Stage_2_Feature[:, i],
                             RD.Stage_2_Feature[:, j],
                             marker='+',
                             color=colors["forestgreen"],
                             label='2',
                             s=20,
                             alpha=0.6)
Exemplo n.º 21
0
import tensorflow as tf
import train_nn
import train_nn_TrainingAugmentation
import Read_Data

IMAGE_SIZE = 256
BATCH_SIZE = 6
with open('classes.txt') as f:
    con = f.read()
class_ = con.splitlines()
label = {}
for i in range(len(class_)):
    a, b = class_[i].split('#')
    label[int(a)] = b
print(label)
img_test, img_name = Read_Data.Read_Test_TFRecords(
    'Weed_InputData_Final_Test*', IMAGE_SIZE)
img_test_batch, img_name_batch = tf.train.batch([img_test, img_name],
                                                batch_size=BATCH_SIZE)
keep_prob = tf.placeholder(tf.float32)
#logits = train_nn_TrainingAugmentation.Model(img_test_batch, keep_prob)
logits = train_nn.Model(img_test_batch, keep_prob)
pred = tf.argmax(tf.nn.softmax(logits), 1)
saver = tf.train.Saver()
with tf.Session() as sess:
    sess.run(
        [tf.global_variables_initializer(),
         tf.local_variables_initializer()])
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    # results04
    saver = tf.train.import_meta_graph(
Exemplo n.º 22
0
from sklearn.tree import DecisionTreeClassifier
import numpy as np
import pickle

import Read_Data
import Score_Of_Algorithm

data = {}
for index in range(1, int(Read_Data.count_files("./folds")/2) + 1):
    data[f'X{index}_train'], data[f'y{index}_train']= Read_Data.read_data_in_folds_folder(f'./folds/patient{index}_train.csv')
    data[f'X{index}_test'] , data[f'y{index}_test'] = Read_Data.read_data_in_folds_folder(f'./folds/patient{index}_test.csv')

dtc = DecisionTreeClassifier()

accuracy_score, precision_score, recall_score, f1_score = [], [], [], []
for index in range(1, int(len(data)/4) + 1):
    dtc.fit(data[f'X{index}_train'], data[f'y{index}_train'])
    # Saving model to disk
    pickle.dump(dtc, open(f'./models/Decision_Tree_{index}.pkl', 'wb'))

    pred = dtc.predict(data[f'X{index}_test'])

    accuracy_score.append(Score_Of_Algorithm.accuracy_can_modify(data[f'y{index}_test'], pred))

    x, y = Score_Of_Algorithm.precision_recall_can_modify(data[f'y{index}_test'], pred)
    precision_score.append(x)
    recall_score.append(y)

    f1_score.append(Score_Of_Algorithm.f1_score_can_modify(data[f'y{index}_test'], pred))
    print("Accuracy score:", accuracy_score[index - 1])
    print("Precision and Recall score:", precision_score[index - 1], recall_score[index - 1])
Exemplo n.º 23
0
import Read_Data

IMAGE_SIZE = 256
BATCH_SIZE = 20
'''
with open('classes.txt') as f:
	con = f.read()
class_ = con.splitlines()
label = {}
for i in range(len(class_)):
	a, b = class_[i].split('#')
	label[int(a)] = b
print(label)
'''
#img_valid, img_label = Read_Data.Read_TFRecords('Weed_InputData_Valid_120.tfre*')
img_valid, img_label = Read_Data.Read_TFRecords('Weed_InputData_Valid_Augmentation*')
img_p, label_p = Read_Data.Preprocess(img_valid, img_label, IMAGE_SIZE)
img_valid_batch, img_label_batch = tf.train.batch([img_p, label_p], batch_size = BATCH_SIZE)
keep_prob = tf.placeholder(tf.float32)
#logits = train_nn.Model(img_valid_batch, keep_prob)
#logits = train_nn_Training.Model(img_valid_batch, keep_prob)
logits = train_nn_TrainingAugmentation.Model(img_valid_batch, keep_prob)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(img_label_batch, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
with tf.Session() as sess:
	sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
	coord = tf.train.Coordinator()
	threads = tf.train.start_queue_runners(sess = sess, coord = coord)
	# BEST Model_Saver06_Final_Augmentation
	saver = tf.train.import_meta_graph('./Model_Saver06_Final_Augmentation/model_save.ckpt-13.meta')
Exemplo n.º 24
0
def search():
	a = request.args.get('a', 0, type=str)
	res = Read_Data.work(a)
	return jsonify(result = res)
Exemplo n.º 25
0
import pandas as pd
import numpy as np
import Read_Data
import datetime
""" Defining BT period """
incep_date = pd.to_datetime('2011-01-05')  #inceptiondate for index is fixed
end_date = pd.to_datetime(
    input("Enter date in backtest end date in YYYY-MM-DD format: ")
)  #Taking end date of BT from user
next_opendate = pd.to_datetime(
    input("Enter date next Opendate in YYYY-MM-DD format: "))

print("Start and End Date defined")  #printing status on console
"""Reading data"""
df_input_px, df_mapping, df_rebal_details, df_input_CD, df_input_CAo = Read_Data.read_data(
    incep_date, end_date)
"""FX conversion methodology"""
df_FX_conv_method = pd.DataFrame(
    columns=["FX", "Ticker", "Methodology", "Factor"])
df_FX_conv_method["FX"] = df_mapping["Currency"]
df_FX_conv_method["Ticker"] = df_mapping["Ccy_mapping"]
df_FX_conv_method.set_index("FX", inplace=True)
df_FX_conv_method.drop_duplicates(inplace=True)

for curncy in df_FX_conv_method.index:
    if curncy[-1].islower():
        df_FX_conv_method.loc[curncy, "Factor"] = 100
    else:
        df_FX_conv_method.loc[curncy, "Factor"] = 1
    if df_FX_conv_method.loc[curncy, "Ticker"][:3] == "USD":
        df_FX_conv_method.loc[curncy, "Methodology"] = "inverse"