import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline

from common_functions import load_data

if __name__ == '__main__':
    X, y = load_data('ex2data2.txt')

    x1, x2 = X.T
    f_y = y.ravel()
    plt.plot(x1[f_y == 0], x2[f_y == 0], 'yo')
    plt.plot(x1[f_y == 1], x2[f_y == 1], 'bx')

    plt.show()

    pf = PolynomialFeatures(degree=6)
    reg = LogisticRegression(C=10)
    pipeline = Pipeline([("polynomial_features", pf),
                         ("logistic_regression", reg)])

    pipeline.fit(X, f_y)
    theta = reg.coef_.T

    u = np.linspace(-1, 1.5, 50)
    v = np.linspace(-1, 1.5, 50)

    X1, X2 = np.meshgrid(u, v)
    X1, X2 = X1.reshape(-1, 1), X2.reshape(-1, 1)
Beispiel #2
0
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import pinv

from common_functions import load_data, J_liner_regression, add_zero_feature, gradient_descent, matrix_args, feature_normalize

if __name__ == '__main__':
    X, y = load_data('ex1data2.txt')

    mu, sigma, X = feature_normalize(X)
    X = add_zero_feature(X)

    iterations = 400
    alphas = [0.01, 0.1]
    f, axarr = plt.subplots(len(alphas), sharex=True)
    plt.xlabel('Number of Iterations')
    plt.ylabel('Cost J')
    for i, alpha in enumerate(alphas):
        theta = np.zeros((X.shape[1], 1))
        theta, J_history = gradient_descent(J_liner_regression, X, y,
                                            iterations, theta, alpha)

        axarr[i].set_title('Alpha = {}'.format(alpha))
        axarr[i].plot(range(len(J_history)), J_history)

    plt.show()
    # % Estimate the price of a 1650 sq-ft, 3 br house
    # % ====================== YOUR CODE HERE ======================
    # % Recall that the first column of X is all-ones. Thus, it does
    # % not need to be normalized.
Beispiel #3
0
    % (time.time() - start))

# ===== combine and/or average data =====

info = cf.params_for_input(cell_type, 'clustered')
clus_info = info['clustered']

# collates data loaded from files
data_all = {}

for i, iteration in enumerate(iterations):
    round_data = {}
    for r in range(n_rounds):
        # load data
        name = name = '{}_{}-{}_validation'.format(cell_type, r, iteration)
        data = cf.load_data('{}/{}.json'.format(folder, name))
        round_data[r] = data
    # combine data
    data_all[i] = round_data

data = data_all

# collates data for each cell iteration
for i in range(len(iterations)):

    data[i]['all'] = {}

    for lab in clus_info['label']:

        data[i]['all'][lab] = {
            'vm': [],
Beispiel #4
0
import common_functions as cf
import scipy.stats as stats
import matplotlib.pyplot as plt

modulation = 1

cell_type = 'dspn'
mod_type = 'ACh'

if not modulation:

    # load data
    data = cf.load_data('Data/{}_HFI[0]+0_validation.json'.format(cell_type))
    clus_info = data['meta']['clustered']
    clus_labels = clus_info['label']

    # normality checking =====

    # gets stats
    data['stats'] = {}
    for clus_lab in clus_labels:

        data['stats'][clus_lab] = {'dur': {}, 'amp': {}}
        # duration data
        data['stats'][clus_lab]['dur'] = cf.norm_dist(
            data['all'][clus_lab]['dur'])
        # amplitude data
        data['stats'][clus_lab]['amp'] = cf.norm_dist(
            data['all'][clus_lab]['amp'])

    # plot histograms
import matplotlib.pyplot as plt
import numpy as np

from scipy.optimize import minimize

from common_functions import load_data, add_zero_feature, lr_accuracy, cf_lr as cost_function, gf_lr as grad_function

if __name__ == '__main__':
    X, y = load_data('ex2data1.txt')

    x1, x2 = X.T
    f_y = y.ravel()
    plt.plot(x1[f_y == 0], x2[f_y == 0], 'yo')
    plt.plot(x1[f_y == 1], x2[f_y == 1], 'bx')

    plt.show()

    X = add_zero_feature(X)
    m, n = X.shape
    initial_theta = np.ones((n, 1))

    theta = minimize(cost_function,
                     initial_theta,
                     method='BFGS',
                     jac=grad_function,
                     options={
                         'disp': False
                     },
                     args=(X, y)).x
    print theta
    print cost_function(theta, X, y)
import matplotlib.pyplot as plt

from sklearn.linear_model import LinearRegression

from common_functions import load_data

if __name__ == '__main__':
    X, y = load_data('ex1data1.txt')
    plt.xlabel('Population of City in 10,000s')
    plt.ylabel('Profit in $10,000s')

    plt.plot(X, y, 'rx')
    plt.show()

    reg = LinearRegression()
    reg.fit(X, y)

    plt.xlabel('Population of City in 10,000s')
    plt.ylabel('Profit in $10,000s')
    plt.plot(X, reg.predict(X))
    plt.plot(X, y, 'rx')
    plt.show()
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression

from common_functions import load_data

if __name__ == '__main__':
    X, y = load_data('ex2data1.txt')

    x1, x2 = X.T
    f_y = y.ravel()
    plt.plot(x1[f_y==0], x2[f_y==0], 'yo')
    plt.plot(x1[f_y==1], x2[f_y==1], 'bx')

    plt.show()

    lr = LogisticRegression(C=100)
    lr.fit(X, f_y)
    theta = np.array([lr.intercept_[0], lr.coef_[0, 0], lr.coef_[0, 1]])

    x1_boundery = np.array([np.min(x1)-2, np.max(x1)+2])
    x2_boundery = (-1/theta[2])*(theta[1]*x1_boundery + theta[0])

    plt.plot(x1[f_y==0], x2[f_y==0], 'yo')
    plt.plot(x1[f_y==1], x2[f_y==1], 'bx')
    plt.plot(x1_boundery, x2_boundery)
    plt.show()


    print 'Train Accuracy: {}%'.format(lr.score(X, y)*100)
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline

from common_functions import load_data

if __name__ == '__main__':
    X, y = load_data('ex1data2.txt')

    ss = StandardScaler()
    reg = LinearRegression()
    pipeline = Pipeline([("standart_scaler", ss),
                         ("linear_regression", reg)])

    pipeline.fit(X, y)
    prize = pipeline.predict(np.array([1650.0, 3]))
    print prize
import matplotlib.pyplot as plt
import common_functions as cf
import scipy.stats as stats

HFI = 0
cell_type = 'dspn'
mod_type = 'DA'
mod_tar = 'all'

if HFI == 0:

    if mod_tar == 'indiv':

        # load data
        data = cf.load_data(
            'Data/{}_HFI[0]+0_{}-modulation-{}.json'.format(
                cell_type, mod_type, mod_tar)
        )  #cf.load_data('C:/Users/tomth/OneDrive/Documents/Work/Courses/Level 4/Honours/Data/dspn_n16.json')
        ctrl_data = cf.load_data(
            'Data/{}_HFI[0]+0_validation.json'.format(cell_type))

        # ===== organise data =====

        # clustered stimulation data
        stim_n = data['meta']['clustered']['params']['stim_n']
        pre_t = data['meta']['clustered']['params']['pre_t']
        isi = data['meta']['clustered']['params']['isi']
        clus_stim_t = data['meta']['clustered']['params']['stim_t']
        clus_stop_t = data['meta']['clustered']['params']['stop_t']
        clus_targets = data['meta']['clustered']['target']
        clus_labels = data['meta']['clustered']['label']
Beispiel #10
0
'''
Plots voltage traces as well as potential duration and amplitude information.
    - Currently implemented for plotting basic plateau potentials
'''

import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
import common_functions as cf

HFI = 0

if not HFI:

    # load data
    data = cf.load_data('Data/ispn_HFI[0]+0_validation.json')

    # plotting =================

    clus_info = data['meta']['clustered']

    # simulation data
    stim_n = clus_info['params']['stim_n']
    stim_t = clus_info['params']['stim_t']
    stop_t = clus_info['params']['stop_t']
    pre_t = clus_info['params']['pre_t']
    isi = clus_info['params']['isi']
    cell_type = data['meta']['cell type']
    targets = clus_info['target']
    target_labels = clus_info['label']
    model_iterator = data['meta']['iterations']
Beispiel #11
0
import common_functions as cf
import numpy as np

modulation = 1

cell_type = 'dspm'
mod_type = 'ACh'

if not modulation:

    delta = list(np.arange(0, 100 + 1, 20))

    # load data
    spike_data = {}
    for i, r in enumerate(delta):
        data = cf.load_data('Data/{}_HFI[1]+{}_validation.json'.format(
            cell_type, delta[i]))
        spike_data[r] = data['all']

    clus_info = data['meta']['clustered']
    clus_labels = clus_info['label']

    # collects iteration-wise spike data into single vector (control data)
    spiked = {}
    for delt in delta:
        spiked[delt] = {}
        for clus in clus_labels:
            spiked[delt][clus] = []
            for i in range(len(spike_data[delt][clus]['spiked'])):
                for j in range(len(spike_data[delt][clus]['spiked'][i])):
                    spiked[delt][clus].append(
                        spike_data[delt][clus]['spiked'][i][j])