示例#1
0
    print 'Cost at initial theta (zeros): %f' % cost
    print 'Gradient at initial theta (zeros):'
    print grad
    print ''

    ## ============= Part 2: Regularization and Accuracies =============
    #  Optional Exercise:
    #  In this part, you will get to try different values of lambda and
    #  see how regularization affects the decision coundart
    #
    #  Try the following values of lambda (0, 1, 10, 100).
    #
    #  How does the decision boundary change when you vary lambda? How does
    #  the training set accuracy vary?

    # Optimize
    print 'Start Optimize'
    result_Newton_CG = minimize(lambda t: costFunctionReg(t, X_long, y_long, lamb),
                                initial_theta, method='Newton-CG', jac=True)
    print 'End Optimize'
    theta = result_Newton_CG['x']

    # Plot Boundary
    f = plotData(X, y, theta)
    plt.title('lamb = %f' % lamb)

    # Compute accuracy on our training set
    p = predict(theta, X_long)

    print 'Train Accuracy: %f' % (np.mean(p == y) * 100)
])
#%%
datasetTrain = ImageDataset(path + '/Dataset/images',
                            path + '/Dataset/training_list.csv',
                            transform=transform)
datasetValidation = ImageDataset(path + '/Dataset/images',
                                 path + '/Dataset/validation_list.csv',
                                 transform=transform)
#%%
datasetTest = ImageDataset(path + '/Dataset/images',
                           path + '/Dataset/testing_list_blind.csv',
                           transform=transform,
                           mode='test')

#%%
from models import getClassificationModel
model_name = 'ResNet18CrossEntropyReg10_1532170755.190167.pth'
model = getClassificationModel(previous_state_path=modelPath + model_name)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 512)
#%%
from helperFunctions import predict
CNNOutputTrain = predict(model, datasetTrain, 'image')
CNNOutputValidation = predict(model, datasetValidation, 'image')
CNNOutputTest = predict(model, datasetTest, 'image')
#%%
torch.save(CNNOutputTrain, modelPath + 'FeaturesResNet18Train512.pth')
torch.save(CNNOutputValidation,
           modelPath + 'FeaturesResNet18Validation512.pth')
torch.save(CNNOutputTest, modelPath + 'FeaturesResNet18Test512.pth')
    featureLoaderTrain,
    featureLoaderValidation,
    epochs=epochs,
    lr=lr,
)
# print(regressionLogs)

#%% save model
import time
modelName = "RegressionNNDropoutAleBeta0.85%d_%f.pth" % (epochs, time.time())
torch.save(modelTrained.state_dict(), modelPath + modelName)

#%%
from helperFunctions import plot_logs_regression
plot_logs_regression(regressionLogs)

#%%
from helperFunctions import predict, get_gt
import numpy as np
predictions = predict(modelTrained, ValidationDataset, 'features')
gt = get_gt(ValidationDataset, 'target')

#%%
from evaluate import evaluate_localization
errors = evaluate_localization(predictions, gt)
print("Errors:")
print("Mean Location Error: %0.4f" % (errors[0], ))
print("Median Location Error: %0.4f" % (errors[1], ))
print("Mean Orientation Error: %0.4f" % (errors[2], ))
print("Median Orientation Error: %0.4f" % (errors[3], ))
示例#4
0
    print ''

    ## ============= Part 2: Regularization and Accuracies =============
    #  Optional Exercise:
    #  In this part, you will get to try different values of lambda and
    #  see how regularization affects the decision coundart
    #
    #  Try the following values of lambda (0, 1, 10, 100).
    #
    #  How does the decision boundary change when you vary lambda? How does
    #  the training set accuracy vary?

    # Optimize
    print 'Start Optimize'
    result_Newton_CG = minimize(
        lambda t: costFunctionReg(t, X_long, y_long, lamb),
        initial_theta,
        method='Newton-CG',
        jac=True)
    print 'End Optimize'
    theta = result_Newton_CG['x']

    # Plot Boundary
    f = plotData(X, y, theta)
    plt.title('lamb = %f' % lamb)

    # Compute accuracy on our training set
    p = predict(theta, X_long)

    print 'Train Accuracy: %f' % (np.mean(p == y) * 100)
示例#5
0
def main():
    ## ======== Settings ======================
    # Set rated tons (i.e. capacity) of chiller being analyzed
    ratedTons = 3700

    ## ========  Read, clean, and format data sets for machine learning =======
    # Import the data
    filename = "chiller_data.csv"
    chillerData = pandas.read_csv(filename, low_memory=False)

    # Clean the data
    print 'Length before cleaning data:'
    print len(chillerData.index)
    chillerData = dataCleaner(chillerData, ratedTons)
    print 'Length after cleaning data:'
    print len(chillerData.index)

    # Create X and y data sets (uses 'Status' for constant)
    # try: adding and removing variables like 'Pci' and 'Fcond' to see effect on model performance
    # e.g. X = chillerData.loc[:,['Status','PER^2','PER','Tci','Fcond']]
    # note: adding another variable will invalidate the 3D data visualization below
    X = chillerData.loc[:, ['Status', 'PER^2', 'PER', 'Tci']]
    y = chillerData.loc[:, ['KWperTon']]
    resample = int(0.2 * len(y))
    rows = random.sample(X.index, resample)
    X_train = X.drop(rows)
    y_train = y.drop(rows)

    # Keep X_train and y_train dataframes for visuals
    X_plot = X_train
    y_plot = y_train

    # Create validation and test sets
    X = X.ix[rows]
    y = y.ix[rows]
    X_val = X.iloc[::2]
    y_val = y.iloc[::2]
    X_test = X.iloc[1::2]
    y_test = y.iloc[1::2]

    # Convert data to numpy format
    X_train = X_train.as_matrix()
    y_train = y_train.values
    X_val = X_val.as_matrix()
    y_val = y_val.values
    X_test = X_test.as_matrix()
    y_test = y_test.values

    # Option for regularization (not used when lamda = 0.0)
    lamda = 0.0

    # Train the model
    theta = train(X_train, y_train, lamda)

    ## ==========  Plot the data and model in 3D  ==============
    # Initialize a figure
    fig = plt.figure()
    ax = fig.gca(projection='3d')
    plt.hold(True)

    # Create a surface plot from the trained model
    x_surf = np.arange(0.25, 0.95, 0.05)
    y_surf = np.arange(45.0, 90.0, 2.5)
    x_surf, y_surf = np.meshgrid(x_surf, y_surf)
    z_surf = theta[0] + theta[1] * (x_surf**
                                    2) + theta[2] * x_surf + theta[3] * y_surf
    ax.plot_surface(x_surf,
                    y_surf,
                    z_surf,
                    cmap=plt.cm.jet,
                    cstride=1,
                    rstride=1)

    # Add the actual data as a scatter plot
    ax.scatter(X_plot['PER'],
               X_plot['Tci'],
               zs=y_plot['KWperTon'],
               s=10,
               c='#A0A0A0')
    plt.title(
        'Predicted vs. Actual Chiller Efficiency (rotate for better view')
    ax.set_xlabel('PER')
    ax.set_ylabel('T_ci')
    ax.set_zlabel('kW/Ton')

    plt.show()

    ## ============= Calculate training, validation, and test error ===========
    # Calculate Mean Absolute Error and R^2 for trainig, validation, and test data
    MAE_train, r2_train, stdev_train = predict(X_train, y_train, theta)
    MAE_val, r2_val, stdev_val = predict(X_val, y_val, theta)
    MAE_test, r2_test, stdev_test = predict(X_test, y_test, theta)

    # Print results
    print 'Theta:'
    print theta

    print 'R^2 (training data):'
    print r2_train

    print 'R^2 (validation data):'
    print r2_val

    print 'R^2 (test data):'
    print r2_test

    print 'Mean absolute error (training data):'
    print MAE_train

    print 'Mean absolute error (validation data):'
    print MAE_val

    print 'Mean absolute error (test data):'
    print MAE_test

    print 'Standard deviation of error (training data):'
    print stdev_train

    print 'Standard deviation of error (validation data):'
    print stdev_val

    print 'Standard deviation of error (test data):'
    print stdev_test
示例#6
0
#%%
transform = transforms.Compose([transforms.Resize([224,224]),
                                transforms.ToTensor(),
                                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                     std=[0.229, 0.224, 0.225])])
                                                     
classificationDataset = ImageDataset(path+'/Dataset/images', path+'/Dataset/testing_list_blind.csv', transform=transform, mode='test')
imageFeatures = torch.Tensor(torch.load(modelPath+'FeaturesResNet18Test512.pth')).unsqueeze(1)
regressionDataset = FeatureDataset(imageFeatures, None)

#%%
from models import getClassificationModel, NNRegressorDropout
modelName = modelPath+'old models/ResNet18CrossEntropyReg10_1532170755.190167.pth'
classificationModel = getClassificationModel(previous_state_path=modelName)
regressionModel = NNRegressorDropout(512)

#%%
from helperFunctions import predict, predictLabel
predRegression = predict(regressionModel, regressionDataset, input_key='features')
predClassification = predictLabel(classificationModel, classificationDataset)
#%%
final_matrix = []
numElements = len(classificationDataset)
for i in range(numElements):
    final_matrix.append([classificationDataset[i]['path'],
                         *predRegression[i],
                         predClassification[i]])
final_matrix = np.stack(final_matrix).astype(str)
#%%
np.savetxt(modelPath+'outputTest.csv', final_matrix, delimiter=',', fmt="%s")