예제 #1
0
currentCos = func.findCurrentCos(sem)
AutoEncoder = at.AutoEncoder
autoencoder = torch.load('net2.pkl')

# start encode
print('Encoding')
encoded, _ = autoencoder(score)
encoded = encoded.detach().numpy()
# get similarity
print('Getting Similarity')
s_len = len(encoded)
similarity = np.zeros((s_len, s_len))
for a in range(s_len):
    for b in range(s_len):
        if a == b:
            similarity[a][b] = 0
        else:
            similarity[a][b] = func.getSimilarity(encoded[a], encoded[b])

# start predict
score = score.numpy()
pred = func.predict(similarity, score)
# Generate the top K high score of not pass cos for every student
suggest = func.generate(allCos, pred, 50)

# Parse the recommend cos with specify semester and K courses
result = func.parseCurrentCos(stds, suggest, sem, K)

print("Transfer to csv file . . .")
result = pd.DataFrame(result)
result.to_csv('RS_auto_2.csv', index=False)
def main():

    # Setup the parameters you will use for this exercise
    input_layer_size = 400  # mnist dataset 20x20
    hidden_layer_size = 25
    num_labels = 10

    ## Part 1: Loading and Visualizing Data
    print("Loading and Visualizing Data ...")
    dat = loadmat("./ex4data1.mat")
    X = dat['X']
    y = dat['y']
    m = X.shape[0]

    # Randomly select 100 data points to display
    rand_indices = np.random.permutation(m)
    sel = X[rand_indices[:100], :]

    displayData(sel)

    ## Part 2: Loading Parameters
    # Load the weights into variables Theta1 and Theta2
    dat1 = loadmat("./ex4weights.mat")
    Theta1 = dat1["Theta1"]
    Theta2 = dat1["Theta2"]

    # Unroll parameters
    nn_params = np.vstack([Theta1.reshape(-1, 1), Theta2.reshape(-1, 1)])

    ## Part 3: Compute Cost (Feedforward)
    print("\nFeedforward Using Neural Network ...")

    # Weight regularization parameter
    lmbd = 0

    J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
                          num_labels, X, y, lmbd)

    print("Cost at parameters (loaded from ex4weights): {}\n\
          (this value should be about 0.2877629)".format(J))

    ## Part 4: Implement Regularization
    print("\nChecking Cost Function (w/ Regularization) ...")
    lmbd = 1

    J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
                          num_labels, X, y, lmbd)

    print("Cost at parameters (loaded from ex4weights): {}\n\
          (this value should be about 0.383770)".format(J))

    ## Part 5: Sigmoid Gradient
    print("\nEvaluationg sigmoid gradient...")

    g = sigmoidGradient(np.array([-1, -0.5, 0, 0.5, 1]))
    print("Sigmoid gradient evaluated at [-1, -0.5, 0, 0.5, 1]:")
    print(g)
    print("\n")

    ## Part 6: Initializing Parameters
    print("\nInitializing Neural Network Parameters ...")

    # initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size)
    # initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels)

    # Unroll parameters
    # initial_nn_params = np.vstack([initial_Theta1.reshape(-1, 1), initial_Theta2.reshape(-1, 1)])

    ## Part 7: Implement Backpropagation
    print("\nChecking Backpropagation...")

    checkNNGradients()

    ## Part 8: Implement Regularization
    print("\nChecking Backpropagation (w/ Regularization) ...")

    # Check gradients by running checkNNGradients
    lmbd = 3
    checkNNGradients(lmbd)

    # Also output the costFunction debugging values
    debug_J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
                                num_labels, X, y, lmbd)

    print("\n\nCost at (fixed) debugging parameters (w/ lambda = {}): {}"\
    "\n(for lambda = 3, this value should be about 0.576051)\n".format(lmbd, debug_J))

    ## Part 8: Training NN
    print("\nTraining Neural Network...")

    lmbd = 1  # TODO optimize() can't not work with regularization now, should be 1 here
    nn_params, _ = fmin_nn1(input_layer_size, hidden_layer_size, num_labels, X,
                            y, lmbd)
    Theta1 = nn_params[:hidden_layer_size * (input_layer_size + 1)].reshape(
        hidden_layer_size, (input_layer_size + 1))
    Theta2 = nn_params[hidden_layer_size * (input_layer_size + 1):].reshape(
        num_labels, (hidden_layer_size + 1))

    ## Part 9: Visualize Weights
    print("\nVisualizing Neural Network ...")

    displayData(Theta1[:, 1:])

    ## Part 10: Implement Predict
    pred = predict(Theta1, Theta2, X)
    pred[pred == 0] = 10  # label 10 is set to 0 in the nn model

    print("\nTraining Set Accuracy: {}".format(
        np.mean(np.double(pred == y.ravel())) * 100))

    plt.show()
예제 #3
0
# Find the grades of every students as lists in a list
grades = func.findGrades(stds, cos)
print("Finish")

# Compute the similarity between every students
print("Getting the similarity between student . . .")
similarity = np.zeros((len(stds), len(stds)))
s_len = len(stds)
for a in range(s_len):
    for b in range(s_len):
        similarity[a][b] = func.getSimilarity(grades[a], grades[b])
print("Finish")

# Predict the cos score of not pass cos for every student
print("Predicting cos score and generate recommend. . .")
pred = func.predict(similarity, grades)

# Generate the top K high score of not pass cos for every student
suggest = func.generate(cos, pred, 30)

# Fill the empty suggest base on K-Means clustering
func.fillEmpty(suggest, pred)
print("Finish")

# Parse the recommend cos with specify semester and K courses
print("Matching the course name . . .")
result = func.parseCurrentCos(stds, suggest, sem, K)
print("Finish")

print("Transfer to csv file . . .")
result = pd.DataFrame(result)
예제 #4
0
import numpy as np
from func import compute_cost, gradient_descent, predict

data = np.matrix(np.loadtxt('ex1data2.txt', delimiter=','))
X = data[:, 0:2]
X = np.c_[np.ones((X.shape[0], 1)), X]
y = data[:, 2]

# Nine task
a = np.linalg.pinv(np.dot(X.T, X))
b = a.dot(X.T)
c = b.dot(y)
print(c)

asd = compute_cost(X, y, c)
print(asd)

test = np.ones((2, 3))
test[0][1] = 272000
test[1][1] = 314000
test[0][2] = 2
test[1][2] = 3

print('prediction ->' + str(predict(test, c)))
예제 #5
0
# Cost function
m = X.shape[0]
X_ones = np.c_[np.ones((m, 1)), X]
theta = np.matrix('[1; 2]')
print(compute_cost(X_ones, y, theta))

# Call method gradient_descent
theta, J_th = gradient_descent(X_ones, y, 0.02, 500)
print(theta)

# Cost change while gradient descent
plt.plot(np.arange(500), J_th, 'k-')
plt.title('Снижение ошибки при градиентном спуске')
plt.xlabel('Итерация')
plt.ylabel('Ошибка')
plt.grid()
plt.show()

# Call method predict
test = np.ones((2, 2))
test[0][1] = 2.72
test[1][1] = 3.14
test_prediction = predict(test, theta)
print(test_prediction)

# Sixth task
x = np.arange(min(X), max(X))
plt.plot(x, theta[1] * x.ravel() + theta[0], 'g--')
plt.plot(X, y, 'b.')
plt.grid()
plt.show()
예제 #6
0
        # 評価用データの読み込み
        labels_ev, imgfiles_ev, labeldict, dmy = read_image_list(IMAGE_LIST_EV,
                                                                 DATA_DIR,
                                                                 dic=labeldict)

        # 追加条件の指定
        conditions = {}
        conditions['in_channels'] = CHANNELS  # 入力画像のチャンネル数が 1(グレースケール画像)

        # 識別精度評価
        predict(
            device=dev,  # 使用するGPUのID,変えなくて良い
            in_data=imgfiles_ev,  # 入力データ,今回はMNIST画像
            out_data=labels_ev,  # 出力(正解)データ,今回はクラスラベル
            model=cnn,  # 学習済みネットワークモデル
            loss_func=nn.CrossEntropyLoss(
            ),  # 評価用損失関数,今回は softmax cross entropy
            batchsize=batchsize,  # バッチサイズ
            additional_conditions=conditions  # 上で指定した追加条件
        )

    else:

        ### 画像ファイル名を指定して実行した場合・・・指定された画像に対する認識結果を表示 ###

        # 入力画像のカラーモードを設定
        color_mode = 0 if CHANNELS == 1 else 1

        # 入力画像を読み込む
        img = load_single_image(in_filepath, mode=color_mode)
예제 #7
0
def main():
    ## Load Data
    # The first two columns contains the exam scores and the third column
    # contains the label.

    data = np.loadtxt("./ex2data1.txt", delimiter=',')
    X = data[:, :2]
    y = data[:, 2]

    ## Part 1: Plotting
    print("""Plotting data with + indicating (y = 1) examples and o 
            indicating (y = 0) examples.""")
    plotData(X, y)
    plt.xlabel("Exam 1 score")
    plt.ylabel("Exam 2 score")
    plt.legend(["Admitted", "Not admitted"])

    ## Part 2: Compute Cost and Gradient

    # Setup the data matrix appropriately, and add ones for the intercept term
    m, n = X.shape

    # Add intercept term to x and X_test
    X = np.hstack([np.ones((m, 1)), X])

    # Initialize fitting parameters
    initial_theta = np.zeros((n + 1, 1))

    # Compute and display initial cost and gradient
    cost = costFunction(initial_theta, X, y)
    grad = gradient(initial_theta, X, y)

    print("\nCost at initial theta (zeros): {}".format(cost))
    print("Expected cost (approx): 0.693")
    print("Gradient at initial theta (zeros):\n{}".format(grad))
    print("Expected gradients (approx):\n -0.1000\n -12.0092\n -11.2628")

    # Compute and display cost and gradient with non-zero theta
    test_theta = np.array([-24, 0.2, 0.2])
    cost = costFunction(test_theta, X, y)
    grad = gradient(test_theta, X, y)

    print("\nCost at test theta: {}".format(cost))
    print("Expected cost (approx): 0.218")
    print("Gradient at test theta:\n{}".format(grad))
    print("Expected gradients (approx):\n 0.043\n 2.566\n 2.647")

    ## Part 3: Optimizing using fminunc
    options = {"maxiter": 400}
    ## Two implementation here
    # theta, cost = scipy_fminunc(costFunction, initial_theta, (X, y), options)
    theta, cost = tf_fmin(X, y, initial_theta)

    # Print theta to screen
    print("Cost at theta found by fminunc: {}".format(cost))
    print("Expected cost (approx): 0.203")
    print("theta: {}".format(theta))
    print("Expected theta (approx): \n-25.161\n 0.206\n 0.201")

    # Plot Boundary
    plotDecisionBoundary(theta, X, y)

    # Put some labels
    plt.xlabel("Exam 1 score")
    plt.ylabel("Exam 2 score")

    # Legend, specific for the exercise
    plt.legend(["Admitted", "Not admitted", "Decision Boundary"])
    plt.axis([30, 100, 30, 100])

    ## Part 4: Predict and Accuracies
    prob = sigmoid(np.array([1, 45, 85]) @ theta)
    print(
        "For a student with scores 45 and 85, we predict an admission probability of {}"
        .format(prob))
    print("Expected value: 0.775 +/- 0.002")

    # Compute accuracy on our training set
    p = predict(theta, X)

    print("Train Accuracy: {}".format(
        np.mean(np.float64(p == y.reshape(-1, 1))) * 100))
    print("Expected accuracy (approx): 89.0")

    plt.show()
예제 #8
0
args = parser.parse_args()

image_path = args.image_path
save_dir = args.save_directory
mode = args.mode
topk = args.topk

cat_to_name = args.cat_name_path
with open(cat_to_name, 'r') as f:
    cat_to_name = json.load(f)

load_pretrained_model = args.model_arch
model = getattr(models, load_pretrained_model)(pretrained=True)

loaded_model = load_checkpoint(model, save_dir, mode)

processed_img = process_image(image_path)

probs_list, classes_list = predict(processed_img, loaded_model, topk)

print(probs_list)
print(classes_list)

names = []
for i in classes_list:
    names.append(cat_to_name[i])

print('Most likely flower is {} with proabablity percentage: {}'.format(
    names[0], (round(probs_list[0] * 100, 4))))
예제 #9
0
# вычисление стоимости
primary_cost = compute_cost(X_ones, y, theta)
print('initial cost -> ' + str(primary_cost))

# градиентный спуск
theta, J_th = gradient_descent(X_ones, y, 0.000000002, 1000)
plt.plot(np.arange(1000), J_th, 'k-')
plt.title('Снижение ошибки при градиентном спуске')
plt.xlabel('Итерация')
plt.ylabel('Ошибка')
plt.grid()
plt.show()

# веса
print('weights:')
print(theta)

# вычисление новой стоимости
new_cost = compute_cost(X_ones, y, theta)
print('new cost -> ' + str(new_cost))

print('cost  difference -> ' + str(primary_cost - new_cost))

test = np.ones((2, 3))
test[0][1] = 272000
test[1][1] = 314000
test[0][2] = 2
test[1][2] = 3
print('prediction ->' + str(predict(test, theta)))
예제 #10
0
    parser.add_argument("--category_names",
                        type=str,
                        help="Mapping of Categories to Real Names File Path")
    parser.add_argument('--gpu', action='store_true', help='Enable GPU')

    arguments = parser.parse_args()
    return arguments


if __name__ == "__main__":

    arguments = arg_parser()

    with open(arguments.category_names, 'r') as f:
        cat_to_name = json.load(f)

    image_dir = arguments.image_dir
    checkpoint_dir = arguments.checkpoint_dir
    topk = arguments.top_k
    gpu = arguments.gpu
    device = torch.device("cuda" if gpu else "cpu")

    model, class_to_idx = load_checkpoint(checkpoint_dir, device)

    idx_to_class = {idx: label for label, idx in class_to_idx.items()}

    top_p, top_label = predict(image_dir, model, topk, device, idx_to_class)

    print("Prediction:")
    for probabilities, classes in zip(top_p[0].tolist(), top_label):
        print(cat_to_name[classes], "{:.4f}".format(probabilities))