def testKMeans(self): data = DataRetriever("../Datasets/metadata.json") data.retrieveData("computerHardware") kValue = 15 t = Timer() t.start() mediods = KMediods(data.getDataSet(), data.getDataClass(), data.getDescreteAttributes(), data.getContinuousAttributes(), data.getPredictionType(), kValue, 100) t.stop() print(f"Time: {t}") print(mediods) mediods.to_csv('kmedoids.csv', index=False)
maxItter = 100 kValue = 78 # These are only used for image segmentation and abalone # frac = .25 # random_state = 69 # kValue = m.floor(frac * kValue) dataSetUnNormalized = data.getDataSet() # dataSetUnNormalized[data.getDataClass()] = np.log(dataSetUnNormalized[data.getDataClass()] + 0.001) // This is for Forest Fires sn = StandardNormalizer(dataSetUnNormalized[data.getContinuousAttributes()]) dataSetUnNormalized[data.getContinuousAttributes()] = sn.train_fit() dataSetNormalized = dataSetUnNormalized # dataSetNormalized = dataSetNormalized.sample(frac=frac, random_state=random_state) # dataSetNormalized = dataSetNormalized.reset_index() # dataSetNormalized = dataSetNormalized.drop(["idNumber"], axis=1) #// For Glass medoids = KMediods(dataSetNormalized, data.getDataClass(), data.getDescreteAttributes(), data.getContinuousAttributes(), data.getPredictionType(), kValue, maxItter) medoids.to_csv('./CSVOutput/' + "normalized" + dataSetName + 'MedoidsClustered.csv', index=False) print(f"CSV for " + dataSetName + " has been created!")
train_set = train_set.reset_index(drop=True) ohe = OneHotEncoder() discrete_attr = dataRetriever.getDescreteAttributes() if dataRetriever.getDataClass() in discrete_attr: discrete_attr.remove(dataRetriever.getDataClass()) train_set = ohe.train_fit(train_set, discrete_attr) test_set = ohe.fit(test_set) # Normalize Data sn = StandardNormalizer(train_set[dataRetriever.getContinuousAttributes()]) train_set[dataRetriever.getContinuousAttributes()] = sn.train_fit() test_set[dataRetriever.getContinuousAttributes()] = sn.fit(test_set[dataRetriever.getContinuousAttributes()]) # Train network and change architecture in respect to data set nn = NeuralNetwork(train_set, 2, [6,16], dataRetriever.getPredictionType(), dataRetriever.getDataClass()) fitness_matrix, average_fitness = nn._particle_swarm_optimize(70, max_iter=500) predictions = nn._feed_forward(test_set.drop(dataRetriever.getDataClass(), axis=1), testing=True) actual = test_set[dataRetriever.getDataClass()] metrics = np.asarray(metrics) fig, ax = plt.subplots(3) ax[0].plot(fitness_matrix[:,0], label="1") ax[0].plot(fitness_matrix[:,1], label="34") ax[0].plot(fitness_matrix[:,2], label="68") ax[0].plot(fitness_matrix[:,3], label="Best") ax[0].legend() print(f"Average Accuracy: {np.asarray(metrics).mean()} ± {metrics.std()}")
import json dataRetriever = DataRetriever("../Datasets/metadata.json") dataRetriever.retrieveData("vote") data = dataRetriever.getDataSet() data = data.dropna() data = data.sample(frac=1.0, random_state=93) data = data.reset_index(drop=True) # data = data.drop('idNumber', axis=1) class_col = dataRetriever.getDataClass() # data[class_col] = np.log(data[class_col] + 0.001) contAttr = dataRetriever.getContinuousAttributes() discAttr = dataRetriever.getDescreteAttributes() predictionType = dataRetriever.getPredictionType() output_json = {} iter_num = 0 for test, train in KFolds(data, 5, stratisfied=True, class_col=class_col): #KFolds doesn't have the capability of returning a validate set #K is set to desired k/2 and the validate set is half of the test set sn = StandardNormalizer(train[contAttr]) train[contAttr] = sn.train_fit() test1 = test.sample(frac=0.5, random_state=13) test2 = test.drop(test1.index)
def network_tuner(*nodes_per_hidden_layer): """ This function is used to calcuate the optimal network architecture The user should input the dataset they would like to operate with and change the performance metric in accordance to the data set type IE regression or classification """ MSEs = [] bestNetwork = {} learning_rate = 0.0001 maxItter = 500 batch_size = .5 dataRetriever = DataRetriever("../Datasets/metadata.json") dataRetriever.retrieveData("glass") dataset = dataRetriever.getDataSet().dropna() dataset = dataset.reset_index(drop=True) # This line is used to normalize the data for Forest Fires # dataset[dataRetriever.getDataClass()] = np.log(dataset[dataRetriever.getDataClass()]+0.1) dataset[dataRetriever.getContinuousAttributes()] = (dataset[dataRetriever.getContinuousAttributes()]-dataset[dataRetriever.getContinuousAttributes()].mean())/dataset[dataRetriever.getContinuousAttributes()].std() test_set = dataset.sample(frac=0.1, random_state=69) train_set = dataset.drop(test_set.index) test_set = test_set.reset_index(drop=True) train_set = train_set.reset_index(drop=True) ohe = OneHotEncoder() discrete_attr = dataRetriever.getDescreteAttributes() if dataRetriever.getDataClass() in discrete_attr: discrete_attr.remove(dataRetriever.getDataClass()) datasetEncoded = ohe.train_fit(train_set, dataRetriever.getDescreteAttributes()) testEncoded = ohe.fit(test_set) output = None nn = NeuralNetwork(datasetEncoded, 0, [], dataRetriever.getPredictionType(), dataRetriever.getDataClass()) for i in range(maxItter): # We don't call an inital feedforward because backpropagate starts with a feedforward call # batch_size represents the number of data points per batch output = nn._back_propagate(learning_rate=learning_rate, batch_size=batch_size) final = nn.test(testEncoded.drop(dataRetriever.getDataClass(), axis=1)) output = nn._feed_forward(testEncoded.drop(dataRetriever.getDataClass(), axis=1), testing=True) actual = testEncoded[dataRetriever.getDataClass()] ## ===================== Classification ================= correct = 0 acc = 0 for i, row in enumerate(final): if row == actual.iloc[i]: correct += 1 # final = final.reshape(final.shape[0]) # MSE = ((actual-final)**2).mean() # MSEs.append(MSE) bestNetwork['network'] = nn bestNetwork['acc'] = acc bestNetwork['arc'] = [0] # # ============================================ # # ============ Compare Acc to Most Common Class values = test_set[dataRetriever.getDataClass()].value_counts() # USED FOR CLASSIFICATION # print(f'Accuracy: {acc}') # print(f'Max Class Prior: {values.max()/values.sum()}') # print(f"Class Distribution:\n{values}") # print("Final: ", final) # print("Actual: ", list(actual)) # print() numOfLayer = len(nodes_per_hidden_layer) print("Number of Hidden Layers: ", numOfLayer) for layer in range(numOfLayer): print(f"Layer Number: {layer + 1}") combinations = list(itertools.product(*nodes_per_hidden_layer[:layer+1])) for combo in combinations: output = None print("Node Combination: ",list(combo)) print(combo) nn = NeuralNetwork(datasetEncoded, layer, list(combo), dataRetriever.getPredictionType(), dataRetriever.getDataClass()) for i in range(maxItter): # We don't call an inital feedforward because backpropagate starts with a feedforward call # batch_size represents the number of data points per batch output = nn._back_propagate(learning_rate=learning_rate, batch_size=batch_size) final = nn.test(testEncoded.drop(dataRetriever.getDataClass(), axis=1)) output = nn._feed_forward(testEncoded.drop(dataRetriever.getDataClass(), axis=1), testing=True) actual = testEncoded[dataRetriever.getDataClass()] ## ===================== Classification ================= correct = 0 acc = 0 for i, row in enumerate(final): if row == actual.iloc[i]: correct += 1 acc = correct/len(test_set) # # # ============================================ # # # ============ Compare Acc to Most Common Class values = test_set[dataRetriever.getDataClass()].value_counts() # USED FOR CLASSIFICATION # print(f'Accuracy: {acc}') # print(f'Max Class Prior: {values.max()/values.sum()}') # # print(f"Class Distribution:\n{values}") # print("Final: ", final) # print("Actual: ", list(actual)) # print() if acc > bestNetwork['acc']: bestNetwork['network'] = nn bestNetwork['acc'] = acc bestNetwork['arc'] = combo # final = final.reshape(final.shape[0]) # MSE = ((actual-final)**2).mean() # MSEs.append(MSE) # if MSE < bestNetwork['acc']: # bestNetwork['network'] = nn # bestNetwork['acc'] = MSE # bestNetwork['arc'] = combo return bestNetwork#, MSEs
ohe = OneHotEncoder() discrete_attr = dataRetriever.getDescreteAttributes() if dataRetriever.getDataClass() in discrete_attr: discrete_attr.remove(dataRetriever.getDataClass()) train_set = ohe.train_fit(train_set, discrete_attr) test_set = ohe.fit(test_set) # Normalize Data sn = StandardNormalizer(train_set[dataRetriever.getContinuousAttributes()]) train_set[dataRetriever.getContinuousAttributes()] = sn.train_fit() test_set[dataRetriever.getContinuousAttributes()] = sn.fit( test_set[dataRetriever.getContinuousAttributes()]) # Train network and change architecture in respect to data set nn = NeuralNetwork(train_set, 2, [2, 2], dataRetriever.getPredictionType(), dataRetriever.getDataClass()) nn.train(maxIter, learning_rate, batch_size) # predictions = nn.test(test_set.drop(dataRetriever.getDataClass(), axis=1)) # # ca = ClassifierAnalyzer(test_set[dataRetriever.getDataClass()], predictions) # correct = 0 # actual = test_set[dataRetriever.getDataClass()] # for i, row in enumerate(predictions): # if row == actual.iloc[i]: correct += 1 # metrics.append(correct/len(actual)) break metrics = np.asarray(metrics) prior = 1 / dataset[dataRetriever.getDataClass()].nunique()
train_set = train_set.reset_index(drop=True) ohe = OneHotEncoder() if dataRetriever.getDataClass() in discrete_attr: discrete_attr.remove(dataRetriever.getDataClass()) datasetEncoded = ohe.train_fit(train_set, dataRetriever.getDescreteAttributes()) testEncoded = ohe.fit(test_set) # ======================= Create Best Individual ================ print(title_text) best = NeuralNetwork(datasetEncoded, 1, [25], dataRetriever.getPredictionType(), dataRetriever.getDataClass()) fitnesses = best.genetic_algorithm(population_size, maxItter, batch_size, mutation_rate, 10, cost_func[current_data_set]) # ======================= Test Best Individual ================ final = best.test(testEncoded.drop(dataRetriever.getDataClass(), axis=1)) output = best._feed_forward(testEncoded.drop(dataRetriever.getDataClass(), axis=1), testing=True) actual = testEncoded[dataRetriever.getDataClass()] if dataRetriever.getPredictionType() == "classification": # ## ===================== Classification ================= print("Best") correct = 0