Exemplo n.º 1
0
def main(path: str, verbose: bool = False, separator: str = ',', number_of_target_feature: int = -1,
         file_with_params: str = "params.json", learning_rate: float = 0.01, epsilon: float = 10e-10) -> None:
    feature_arrays, target_values, columns, target_column = read_dataset(
        path, verbose=verbose, separator=separator, number_of_target_feature=number_of_target_feature
    )
    gd = GradientDescent(verbose=verbose, columns=columns, target_column=target_column)
    gd.fit(
        feature_arrays, target_values, file_with_params=file_with_params, learning_rate=learning_rate, epsilon=epsilon
    )
    # print("R2 score1:", lr1_confidence)

elif model == "gradient_descent":
    GOOGL_closing_data = features[:, 5].reshape(-1, 1)
    n = 3

    #Data Processing
    data0 = features[:, 5]
    example0 = data0[:-n].reshape(-1, 1)
    target = GOOGL_closing_data[n:]

    #Train and Test
    train_features, train_targets, test_features, test_targets = metrics.train_test_split(
        example0, target, 0.8)
    gd = GradientDescent()
    gd.fit(train_features, train_targets)
    gd_confidence = gd.score(test_features, test_targets)
    print("R2 score:", gd_confidence)

elif model == "kmeans":
    # Need to make continuous for higher Mutual Info Score
    kmeans = KMeans(2)
    kmeans.fit(trainf)
    labels = kmeans.predict(testf)
    #acc = metrics.adjusted_mutual_info(testt.flatten(), labels)
    print(labels)

    cm = metrics.confusion_matrix(testt.flatten(), labels)
    a = metrics.accuracy(testt.flatten(), labels)
    p, r = metrics.precision_and_recall(testt.flatten(), labels)
    f = metrics.f1_measure(testt.flatten(), labels)
Exemplo n.º 3
0
GRADIENT DESCENT CHECK
"""

# split training and testing data, normalize features
average_death_rate = np.reshape(average_death_rate, (-1, 1))

# option 1: normalize features
#confirmed_features = normalize(confirmed_features, axis=1)

# option 2: binary inputs
gradients = np.gradient(confirmed_features)
confirmed_features = -np.sign(gradients[0])
print(confirmed_features.shape)

data = np.hstack((confirmed_features, average_death_rate))
np.random.shuffle(data)
training_data = data[0:212, :]
testing_data = data[212:, :]
training_features = training_data[:, :-1]
training_targets = training_data[:, -1]
testing_features = testing_data[:, :-1]
testing_targets = testing_data[:, -1]

model = GradientDescent('squared', regularization='l1', reg_param=.5)
model.fit(training_features.astype(float), training_targets.astype(float))
prediction = model.confidence(testing_features.astype(float))

error = ((testing_targets - prediction)**2).mean()

r = np.correlate(prediction, testing_targets)
print(r)