示例#1
0
def optimise(time = 1.0, a_initial = 0.0, a_final = 1e-3, b_initial = 0.0, b_final = 1e-2, iters=10):
    a_step = (a_final-a_initial) / iters
    b_step = (b_final-b_initial) / iters

    a = a_initial
    b = b_initial

    data = np.load(os.path.join('noisy_data', str(time) + '.npy'))

    error = 100000
    result = None
    a_final = None
    b_final = None
    for i in range(iters):
        b = b_initial + i * b_step # incrementing b
        for j in range(iters):
            a = a_initial + j * a_step # incrementing a from minimum to maximum for each b value
            result = turing(a, b, time) # getting the turing pattern for that particular time
            error_new = calc_error(result, data) # calculating the error between the data we have and the turing pattern
                                                 # of the given parameters
            print(a, b, error_new)
            if error_new < error: # saving the parameters if the error is lower
                error = error_new
                final_result = result
                a_final = a
                b_final = b

    plot(data, time) # plotting the target data
    plot(final_result, time) # plotting the data that would be produced from the target data for comparison
    print(a_final, b_final, error)
    return a_final, b_final
示例#2
0
def train():
    start = time.time()

    training_set = load_data()

    vocab_list = get_vocab_list(training_set)

    # X is an {m X n + 1} dimensional matrix
    X, y = extract_feature_matrix(vocab_list, training_set)

    # lambda, or the regularization constant
    reg_const = 1

    # parameters of the hypothesis
    theta = init_params(X.shape[1])

    # for declaring convergence
    epsilon = 10**-3

    # for determining how big each "step" of gradient descent is
    alpha = 0.06

    (theta, error, error_history,
     iterations) = gradient_descent(X, y, theta, reg_const, alpha, epsilon)

    np.savetxt('/results/params.txt', theta)
    np.savetxt('/results/vocab_list.txt', vocab_list, fmt='%s')

    end = time.time()

    print('final cost is {}'.format(error))
    print('gradient descent time is {}s'.format(int(end - start)))

    x_label = 'iterations'
    y_label = 'Cost (J)'
    title = 'Cost of hypothesis versus iterations of gradient descent'
    plot_data.plot(iterations, error_history, x_label, y_label, title)
示例#3
0
import sys

sys.path.append('E:/Github/Machine-Learning-Algorithms/SimulateData')
import simulate_data
import plot_data
from logistic_regression import logistic_regression

data = simulate_data.simulate()

weights = logistic_regression(features=data[0],
                              target=data[1],
                              steps=300000,
                              learning_rate=5e-5,
                              add_intercept=True)

print(weights)

plot_data.plot(data[0], data[1])
示例#4
0
# 对tabu、sa生成初始解
solution1 = m_instance.Problem_solution(instance)
solution2 = m_instance.Problem_solution(instance)
# print(solution1)
# print(id(solution1))
# print(solution2)
# print(id(solution2))

# 用各算法求解最大割问题
time0 = time.clock()
tabu_solution = ts.tabu_search(solution1, instance, 0.05, 1000)
time1 = time.clock()
sa_solution = sa.simulated_annealing(solution2, instance)
time2 = time.clock()
ga_solution = ga.genetic_algorithm(instance)
time3 = time.clock()

# 计算算法求解时间
elapse1 = time1 - time0
elapse2 = time2 - time1
elapse3 = time3 - time2

# 绘制各算法对应图象
plot('tabu6.txt')
plot('sa.txt')
plot('ga.txt')

# 打印求解时间
print("Time used:", elapse1)
print("Time used:", elapse2)
print("Time used:", elapse3)
def run(dens, size):
    map = [[random.randint(0,0) for row in range(size)] for col in range(size)]
    map, dists = search_area((size/2,size/2), map, dens)
    plot_data.plot(map,dists)
    img = cv2.imread("/Users/2020shatgiskessell/Desktop/Missing_Child_Recognition/mapped_area.png")
    return img, dists
# importing the required module
import matplotlib.pyplot as plt
import pid
import plot_data
import reference
import sample_generator

samples = sample_generator.generate(121)

(instantanious_time, references,
 outputs) = pid.implement(0.5, 0.5, 0.006, reference.get_function, samples)

x_plots = [instantanious_time, instantanious_time]
y_plots = [references, outputs]
colors = ['green', 'yellow']
legend_labels = ['Expected', 'Actual']

plot_data.plot(x_plots, y_plots, legend_labels, 'Time(s)',
               'Motor Speed(rad/s)', colors,
               'PID Implementation for DC Motor\nK_p=0.5 K_i=0.5 K_d=0.006')
示例#7
0
data = numpy.loadtxt(fname="../Data/ex1data1.txt", delimiter=",")

# checking and storing the the size of the 'data' array
row = data.shape[0]
clm = data.shape[1]

# extracting data where x represent population of the city  and y represents the profits
x = data[0:row, 0:clm - 1]
y = data[0:row, clm - 1]

# plotting data
title = "Regression"
x_lab = "Population of the city in 10,000s"
y_lab = "profits in $10,000s"
plotsymbol = "+"
plot_data.plot(x, y, title, x_lab, y_lab, plotsymbol)

# initializing theta
theta = numpy.zeros(clm)
# add ones in X matrix
x1 = numpy.c_[numpy.ones(x.shape[0]), x]

# calculate initial cost
cost = Optimization.cal_cost(theta, x1, y)
print("Initial cost is ", cost)

# optimize gradients
alpha = 0.01
iteration = 1500
optimum_theta = Optimization.cal_grad(x1, y, theta, alpha, iteration)
print("optimum parameters are = ", optimum_theta)
示例#8
0
from simulate_data import simulate
from plot_data import plot

data = simulate()
plot(data[0], data[1])
示例#9
0
            process_log(path_to_process_model, path_to_log, file + '.xes', file + '_processed.xes')'''

    # Create CSV
    current_time = datetime.now()
    dt_string = current_time.strftime("%d%m%Y_%H%M%S")

    csv_name = 'results/' + dt_string + '.csv'
    # Give attribute labels
    with open(csv_name, 'a', newline='') as csvfile:
        fieldnames = [
            'errors_pr_mil', 'feature process', 'hidden neurons', 'alpha',
            'activation function', 'Accuracy', 'AUC', "F1"
        ]
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)

        writer.writeheader()

    #run LSTM
    #Define which logs the model is trained on. Can add multiple in one list like: ['80','500']
    dif_logs = [['80'], ['100'], ['150'], ['200'], ['500'], ['750']]
    #dif_logs=[['80','500','200'],['100','80','500','750'],['80','100','150','200'],['80','100','150','200','500','750'],['500'],['200','500','750']]

    for logs in dif_logs:
        run_lstm(csv_name, path_to_log, logs)

    performance_metrics_to_plot = ["AUC", "F1", "Accuracy"]
    for metric in performance_metrics_to_plot:
        plot(csv_name, dif_logs, metric)

    plot_activation_hidden(csv_name, 'One hot encoding')
示例#10
0
window_size = 100

# Defines file name to read data
file_name = 'assets/data_set.csv'

# Reads data from the csv file
raw_data = csv_reader.read_csv(file_name)

# x values of data set
x = raw_data[0]

# y values of data set
y = raw_data[1]

# Plots raw data
plot_data.plot(x, y, 'Raw Data')

# Filters data from Dr. Harasha's filter
filtered_data_from_dr_harsha_filter = dr_harsha_filter.filter_by_dr_harsha_filter(
    y, 0.05)

# Plots data recieved from Dr. Harsha's filter
plot_data.plot(x, filtered_data_from_dr_harsha_filter,
               'Dr. Harsha\'s Filter Data')

# Filters data from Moving average filter
window_average_y = moving_average_filter.filter_by_moving_average_filter(
    y, window_size)

# Sets corresponding x-axis for moving average filter data
window_average_x = x[window_size - 1:len(x)]
示例#11
0
#spects=np.append(spects,params[:,:3],axis=1)
#params=params[:,3:]
print(f"size of spects {np.shape(spects)} and params {np.shape(params)}")
#whighening and saving of parameter and spectra
x_long = save_param_set(params)
print(np.shape(x_long))
y_long = save_spect_set(spects)
retreved = y_to_obs(y_long)
retreved2 = y_to_obs(y_long[0:5, :])
#y_long=np.append(y_long,params[:,:3],axis=1)
parameter_size, spectrum_size = np.shape(x_long[:, :])[1], np.shape(y_long)[1]
print(f"param_size: {parameter_size}, spect_size {spectrum_size}")
print(
    f"shape of whitened {np.shape(y_long)} and original specturm {np.shape(spects)} and of the wavelenth {np.shape(wavelenth)}"
)

#wparam=whightened_param(param)
#print(f"param: {param[0,:]}, wparam {wparam[0,:]}")
#print(mu_x)
#wspect=save_spect_set(spect)
#print(f"spect: {spect[0,:]}, wspect {wspect[0,:]}")

#parameter_size=30
#spectrum_size=1250

if plotting:
    import matplotlib
    matplotlib.use("TkAgg")
    import matplotlib.pyplot as plt
    plot_data.plot()
    plt.show()