Exemple #1
0
 def test_print_result(self):
     self.assertEqual(Problem2.print_result('C', 0, 0),
                      'Area of circle: 0 Circumference of circle: 0')
     self.assertEqual(Problem2.print_result('S', 2, 8),
                      'Area of square: 2 Perimeter of square: 8')
     self.assertEqual(Problem2.print_result('R', 49, 28),
                      'Area of rectangle: 49 Perimeter of rectangle: 28')
Exemple #2
0
    def test_diagonal_sum(self):
        matrix1 = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]

        matrix2 = []

        matrix3 = [[11, 2, 4], [4, 5, 6], [10, 8, -12]]

        matrix4 = [[1.0, 30, 40], [0, 40.3, 23], [0, 0, 100]]

        case1 = Problem2.diagonalSum(matrix1)
        case2 = Problem2.diagonalSum(matrix2)
        case3 = Problem2.diagonalSum(matrix3)
        case4 = Problem2.diagonalSum(matrix4)

        # What "assert" functions do, is simple. If they DON'T get the value you expect, it RAISES an error :)
        self.assertIsNotNone(case1, "Your function is not returning a value")
        self.assertEquals(15, case1,
                          "Your function is not returning the correct value")
        self.assertEqual(0, case2,
                         "The Matrix is empty, the value should be 0")

        self.assertEqual(4, case3,
                         "Your function is not returning the correct value")
        self.assertEqual(141.3, case4,
                         "Your function is not returning the correct value")
def main():
  print "=======================Problem 1========================="
  Problem1.main()
  print "=======================Problem 2========================="
  Problem2.main()
  print "=======================Problem 3========================="
  Problem3.main()
  print "=======================Problem 4========================="
  Problem4.main()
def problem2():
    import Problem2
    print "Solving Problem 2"
    answers = Problem2.solve()
    
    utilMap = answers[ 0 ] 
    with open( "generated/P2-output.txt" , 'w' ) as f:
        f.write( str(utilMap) + "\n" )
        
    distribution10 = answers[ 1 ]
    with open( "generated/P2-data-10.txt" , 'w' ) as f:
        for i in range(0, len(distribution10) ):
            f.write( str(distribution10[ i ]) + "\n" )
            
    distribution100 = answers[ 2 ]
    with open( "generated/P2-data-100.txt" , 'w' ) as f:
        for i in range(0, len(distribution100) ):
            f.write( str(distribution100[ i ]) + "\n" )
            
    distribution1000 = answers[ 3 ]
    with open( "generated/P2-data-1000.txt" , 'w' ) as f:
        for i in range(0, len(distribution1000) ):
            f.write( str(distribution1000[ i ]) + "\n" )
            
    print "Finished Solving Problem 2"
Exemple #5
0
def problem2():
    import Problem2
    print "Solving Problem 2"
    answers = Problem2.solve()

    utilMap = answers[0]
    with open("generated/P2-output.txt", 'w') as f:
        f.write(str(utilMap) + "\n")

    distribution10 = answers[1]
    with open("generated/P2-data-10.txt", 'w') as f:
        for i in range(0, len(distribution10)):
            f.write(str(distribution10[i]) + "\n")

    distribution100 = answers[2]
    with open("generated/P2-data-100.txt", 'w') as f:
        for i in range(0, len(distribution100)):
            f.write(str(distribution100[i]) + "\n")

    distribution1000 = answers[3]
    with open("generated/P2-data-1000.txt", 'w') as f:
        for i in range(0, len(distribution1000)):
            f.write(str(distribution1000[i]) + "\n")

    print "Finished Solving Problem 2"
import Problem2 as p2
import cvxopt as opt
import numpy as np

# parameters
name = '2'
print '======Training======'
# load data from csv files
train = loadtxt('data/data'+name+'_train.csv')
# use deep copy here to make cvxopt happy
X = train[:, 0:2].copy()
Y = train[:, 2:3].copy()

# Define parameters
C = 0.1
K = p2.gaussian_gram(X, gamma = 2)

def column_kernel(SVM_X,x):
    '''
    Given an array of X values and a new x to predict, 
    computes the  vector whose i^th entry is k(SVM_X[i],x)
    '''
    def k(y):
        #return np.dot(x,y) # returns the identity kernel
    
        #return (1+np.dot(x,y)) # returns the linear basis kernel
        
        gamma = 2
        sqr_diff = np.linalg.norm(x-y)**2
        sqr_diff *= -1.0*gamma 
        return np.exp(sqr_diff)
def wrapper_linear(name, C):
    print '======Training======'
    # load data from csv files
    train = loadtxt('data/data' + name + '_train.csv')
    # use deep copy here to make cvxopt happy
    X = train[:, 0:2].copy()
    Y = train[:, 2:3].copy()

    # Define parameters

    K = p2.linear_gram(X)

    def column_kernel(SVM_X, x):
        '''
        Given an array of X values and a new x to predict, 
        computes the  vector whose i^th entry is k(SVM_X[i],x)
        '''
        def k(y):
            #return np.dot(x,y) # returns the identity kernel

            return (1 + np.dot(x, y))  # returns the linear basis kernel

        return np.apply_along_axis(k, 1, SVM_X).reshape(-1, 1)

    # Carry out training, primal and/or dual
    a, SVM_alpha, SVM_X, SVM_Y, support = p2.dual_SVM(X, Y, C, K)

    def get_prediction_constants():
        ay = SVM_alpha * SVM_Y
        # get gram matrix for only support X values
        SVM_K = K[support]
        SVM_K = SVM_K.T[support]
        SVM_K = SVM_K.T

        # compute bias
        bias = np.nansum(
            [SVM_Y[i] - np.dot(ay.T, SVM_K[i])
             for i in range(len(SVM_Y))]) / len(SVM_Y)

        return ay, bias

    # Define the predictSVM(x) function, which uses trained parameters
    # Define the predict_gaussianSVM(x) function, which uses trained parameters, alpha

    ay, bias = get_prediction_constants()

    def predictSVM(x):
        '''
        The predicted value is given by h(x) = sign( sum_{support vectors} alpha_i y_i k(x_i,x) )
        '''
        debug = False

        x = x.reshape(1, -1)
        if debug: print 'Classify x: ', x

        # predict new Y output
        kernel = column_kernel(SVM_X, x)
        y = np.dot(ay.T, kernel)
        if debug:
            print 'New y ', y
        return y + bias

    def classification_error(X_train, Y_train):
        ''' Computes the error of the classifier on some training set'''
        n, d = X_train.shape
        incorrect = 0.0
        for i in range(n):
            if predictSVM(X_train[i]) * Y_train[i] < 0:
                incorrect += 1
        return incorrect / n

    train_err = classification_error(X, Y)
    # plot training results
    plotDecisionBoundary(X,
                         Y,
                         predictSVM, [-1, 0, 1],
                         title='SVM Train on dataset ' + str(name) +
                         ' with C = ' + str(C))
    pl.savefig('prob2linear_kernelSVMtrain_' + str(name) + '_with C=' +
               str(C) + '.png')

    print '======Validation======'
    # load data from csv files
    validate = loadtxt('data/data' + name + '_validate.csv')
    X = validate[:, 0:2]
    Y = validate[:, 2:3]

    validation_err = classification_error(X, Y)

    # plot validation results
    plotDecisionBoundary(X,
                         Y,
                         predictSVM, [-1, 0, 1],
                         title='SVM Validate on dataset ' + str(name) +
                         ' with C = ' + str(C))
    pl.savefig('prob2linear_kernelSVMvalidate_' + str(name) + '_with C=' +
               str(C) + '.png')

    f = open(
        'errors for linear kernel dataset ' + str(name) + ' with C = ' +
        str(C) + '.txt', 'w')
    f.write('Train err: ')
    f.write(str(train_err))
    f.write('\n')
    f.write('Validate err: ')
    f.write(str(validation_err))
    f.write('\n')
    f.write('Number of SVMs: ')
    f.write(str(len(SVM_Y)))
    f.close()

    print 'Done plotting...'
Exemple #8
0
 def test_circle(self):
     self.assertEqual(Problem2.circle(0), (0, 0))
     self.assertEqual(Problem2.circle(2), (12.57, 12.57))
     self.assertEqual(Problem2.circle(15), (706.86, 94.25))
Exemple #9
0
 def test_choose_shape(self):
     self.assertEqual(Problem2.choose_shape('D'), 'Not a valid shape')
     self.assertEqual(Problem2.choose_shape('asfasdf'), 'Not a valid shape')
Exemple #10
0
 def test_rectangle(self):
     self.assertEqual(Problem2.rectangle(0, 0), (0, 0))
     self.assertEqual(Problem2.rectangle(2, 0), (0, 4))
     self.assertEqual(Problem2.rectangle(3, 6), (18, 18))
Exemple #11
0
 def test_square(self):
     self.assertEqual(Problem2.square(0), (0, 0))
     self.assertEqual(Problem2.square(2), (4, 8))
     self.assertEqual(Problem2.square(7), (49, 28))
Exemple #12
0
def test_month_length():
    assert p2.month_length("January") == 31, "failed on January"
    assert p2.month_length("February") == 28, "failed on February"
    assert p2.month_length("February",leap_year=True) == 29, "failed on February(leap year)"
    assert p2.month_length("March") == 31, "failed on March"
    assert p2.month_length("April") == 30, "failed on April"
    assert p2.month_length("May") == 31, "failed on May"
    assert p2.month_length("June") == 30, "failed on June"
    assert p2.month_length("July") == 31, "failed on July"
    assert p2.month_length("August") == 31, "failed on August"
    assert p2.month_length("September") == 30, "failed on September"
    assert p2.month_length("October") == 31, "failed on Octuber"
    assert p2.month_length("November") == 30, "failed on November"
    assert p2.month_length("December") == 31, "failed on December"
    assert p2.month_length("abc") == None, "failed on invalid input"
Exemple #13
0
# -*- coding: utf-8 -*-
__author__ = 'Vietworm'
import Problem1
import Problem2

"""Problem 1:
If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
"""

if Problem1.sumMultiples() == 233168:
    print "Problem 1 solved!"

"""Problem 2:
Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.
"""

if Problem2.sumFibonacci(4000000) == 4613732:
    print "Problem 2 solved!"
def containsError(sensorNetwork):
    '''Function check a dataset, log sensor errors and return a filtered dataset.
    
    Function accepts a dataset as a parameter.
    The dataset is examined for sensor errors.
    Identified errors are modified to -1, a non-valid float value.
    The Cluster and Sensor ID are logged to repair.
    The filtered dataset is returned.'''

    # Loop through entire dataset, sensor by sensor
    for i in range(0, 32):
        for j in range(0, 16):

            # If the sensor reports an error
            if sensorNetwork[i][j] == 'err':
                # Filter the error to -1 (non-valid float)
                sensorNetwork[i][j] = -1
                # Log the failed sensor information to the log file for repair.
                logger.info("SENSOR ERROR: Cluster: " + str(i + 1) +
                            ", Sensor: " + str(j + 1))

    # Return the filtered dataset
    return sensorNetwork


# Main - no specific main, not required in assessment. This block is demonstrating the functions
sensorData = dataGenerate.dataStreamGenerate()
corruptSensorData = corruptDataSet(sensorData)
dataStore.storeDataset(corruptSensorData)

filteredSensorData = containsError(corruptSensorData)
import Parameters as P
import Problem2 as Cls
import SupportTransientState as Support

# create multiple cohorts for when the drug is not available
multiCohortFairCoin = Cls.MultiCohort(
    ids=range(P.NUM_SIM_COHORTS),  # [0, 1, 2 ..., NUM_SIM_COHORTS-1]
    pop_sizes=[P.REAL_POP_SIZE] *
    P.NUM_SIM_COHORTS,  # [REAL_POP_SIZE, REAL_POP_SIZE, ..., REAL_POP_SIZE]
    coin_probs=[P.FAIR_PROB] * P.NUM_SIM_COHORTS  # [p, p, ...]
)
# simulate all cohorts
multiCohortFairCoin.simulate(P.TIME_STEPS)

# create multiple cohorts for when the drug is available
multiCohortUnfairCoin = Cls.MultiCohort(
    ids=range(
        P.NUM_SIM_COHORTS, 2 * P.NUM_SIM_COHORTS
    ),  # [NUM_SIM_COHORTS, NUM_SIM_COHORTS+1, NUM_SIM_COHORTS+2, ...]
    pop_sizes=[P.REAL_POP_SIZE] *
    P.NUM_SIM_COHORTS,  # [REAL_POP_SIZE, REAL_POP_SIZE, ..., REAL_POP_SIZE]
    mortality_probs=[P.FAIR_PROB * P.UNFAIR_PROB] * P.NUM_SIM_COHORTS)
# simulate all cohorts
multiCohortUnfairCoin.simulate(P.TIME_STEPS)

# print outcomes of each cohort
Support.print_outcomes(multiCohortFairCoin, 'When coin is fair:')
Support.print_outcomes(multiCohortUnfairCoin, 'When coin is unfair:')

# draw histograms of average survival time
Support.draw_histograms(multiCohortFairCoin, multiCohortUnfairCoin)
        max_x = [0, 0, 0]  # Used to store the abscicas at which the maximum error occurs

        # Generating the ordinates and absciccas

        interval = [-5, 5]
        spacing = 10 / N
        points = [interval[0] + spacing / 2 + spacing * m for m in range(N)]  # creates the absiccas
        values = [f(x) for x in points]  # creates the ordinates

        # Newton approximation:

        newton_poly = pb1.newton_interpolation(values, points)  # This gives us our newton_polynomial

        # Newton with Chebyshev zeros:

        cheb_zeros = pb2.chebyshev(-5, 5, N)  # Retrieves the Chebyshev zeros
        cheb_values = [f(x) for x in cheb_zeros]  # Retrieves the ordinates for the Chebyshev zeros
        cheb_newton_poly = pb1.newton_interpolation(cheb_values, cheb_zeros)  # Generates polynomial

        spline_coeff = pb3.SplineCoefficients(points, values)  # Returns our spline interpolation coefficients

        x = linspace(interval[0], interval[1], 1001)

        for i in range(1001):  # We loop through the 1001 points
            x_coord = x[i]
            # We evaluate all of the approximations at the iteration absica and store it in an array

            y = [pb1.get_value_of_polynomial(newton_poly, points, x_coord),
                 pb1.get_value_of_polynomial(cheb_newton_poly, cheb_zeros, x_coord),
                 pb3.evaluate_spline(spline_coeff, points, x_coord)]