plt.ylabel('Microchip Test 2')
    show()



# Initialization

# Load Data
#  The first two columns contains the X values and the third column
#  contains the label (y).

data = pd.read_csv('ex2data2.txt', header=None, names=[1,2,3])
X = data[[1, 2]]
y = data[[3]]

plotData(X.values, y.values)

# Labels and Legend
plt.xlabel('Microchip Test 1')
plt.ylabel('Microchip Test 2')
show()
raw_input("Program paused. Press Enter to continue...")


# =========== Part 1: Regularized Logistic Regression ============

# Add Polynomial Features

# Note that mapFeature also adds a column of ones for us, so the intercept
# term is handled
X = X.apply(mapFeature, axis=1)
    plt.xlabel('Microchip Test 1')
    plt.ylabel('Microchip Test 2')



# Initialization

# Load Data
#  The first two columns contains the X values and the third column
#  contains the label (y).

data = pd.read_csv('ex2data2.txt', header=None, names=[1,2,3])
X = data[[1, 2]]
y = data[[3]]

plotData(X.values, y.values)

# Labels and Legend
plt.xlabel('Microchip Test 1')
plt.ylabel('Microchip Test 2')


# =========== Part 1: Regularized Logistic Regression ============

# Add Polynomial Features

# Note that mapFeature also adds a column of ones for us, so the intercept
# term is handled
X = X.apply(mapFeature, axis=1)

# Initialize fitting parameters
#

from ml import plotData, plotDecisionBoundary
# Load Data
#  The first two columns contains the exam scores and the third column
#  contains the label.

data = np.loadtxt('C:\Users\HTDA\Coursera-Stanford-ML-Python-local\ex2\ex2data1.txt', delimiter=',')
X = data[:, 0:2]
y = data[:, 2]

# ==================== Part 1: Plotting ====================

print 'Plotting data with + indicating (y = 1) examples and o indicating (y = 0) examples.'

plotData(X, y)
plt.legend(['Admitted', 'Not admitted'], loc='upper right', shadow=True, fontsize='x-large', numpoints=1)

plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')



# # ============ Part 2: Compute Cost and Gradient ============
# #  Setup the data matrix appropriately, and add ones for the intercept term
m, n = X.shape

# Add intercept term to x and X_test
X = np.concatenate((np.ones((m, 1)), X), axis=1)

# Initialize fitting parameters
from ml import plotData, plotDecisionBoundary
# Load Data
#  The first two columns contains the exam scores and the third column
#  contains the label.

data = np.loadtxt('ex2data1.txt', delimiter=',')
X = data[:, 0:2]
y = data[:, 2]

# ==================== Part 1: Plotting ====================

print(
    'Plotting data with + indicating (y = 1) examples and o indicating (y = 0) examples.'
)

plotData(X, y)
plt.legend(['Admitted', 'Not admitted'],
           loc='upper right',
           shadow=True,
           fontsize='x-large',
           numpoints=1)

plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
plt.show()
input("Program paused. Press Enter to continue...")

# # ============ Part 2: Compute Cost and Gradient ============
# #  Setup the data matrix appropriately, and add ones for the intercept term
m, n = X.shape
from predict import predict
from ml import plotData
from ml import mapFeature
from ml import plotDecisionBoundary
import numpy as np
from scipy.optimize import fmin_bfgs

## Load Data
#  The first two columns contains the exam scores and the third column
#  contains the label.

data = np.loadtxt('ex2data2.txt', delimiter=",")
X = data[:, :2]
y = data[:, 2]

plt, p1, p2 = plotData(X, y)

# # Labels and Legend
plt.xlabel('Microchip Test 1')
plt.ylabel('Microchip Test 2')
plt.legend((p1, p2), ('y = 1', 'y = 0'), numpoints=1, handlelength=0)

plt.show(
    block=False
)  # prevents having to close the graph to move forward with ex2_reg.py

raw_input('Program paused. Press enter to continue.\n')

## =========== Part 1: Regularized Logistic Regression ============
#  In this part, you are given a dataset with data points that are not
#  linearly separable. However, you would still like to use logistic