示例#1
0
def example():
    size = 10
    maxSize = 20
    inputGen = InputGenerator(size, maxSize * 2, maxSize * 2)
    inputs = inputGen.generate()

    bf = BruteForce(inputs)
    bfpath, bftotalDistance = bf.optimise()
    bf.plotPath(name="Brute_Force")

    nn = NearestNeighbor(inputs)
    nnpath, nntotalDistance = nn.optimise()
    nn.plotPath(name="NN")

    ni = NearestInsertion(inputs)
    nipath, nitotalDistance = ni.optimise()
    ni.plotPath(name="NI")

    to = Two_Opt(inputs)
    topath, tototalDistance = to.optimise()
    to.plotPath(name="2-opt")
示例#2
0
    def findSigma(self):
        """
		Support method to find the sigma value by summing the distances from every point to its nearest neighbor,
		and dividing by the total number of points. This results in the same sigma for every prototype node.

		:return: List. distances calculated using 1NN.
		"""
        distance = []
        for index, row in self.prototypes.iterrows():
            modified_prototype_set = self.prototypes.drop(
                [index])  # Remove current point from data set
            distance.append(
                NearestNeighbor.oneNearestNeighbor(
                    row,
                    modified_prototype_set,
                    return_distance=True,
                    class_header=self.class_header))

        return distance
示例#3
0
from data_utils import load_CIFAR10
import matplotlib.pyplot as plt
from NearestNeighbor import NearestNeighbor
from time import clock

# Load the raw CIFAR-10 data.
cifar10_dir = '/home/wjb/cs231n/assignment/assignment1/cs231n/datasets/cifar-10-batches-py'

#use NN
Xtr, Ytr, Xte, Yte = load_CIFAR10(cifar10_dir)# a magic function we provide
# flatten out all images to be one-dimensional
Xtr_rows = Xtr.reshape(Xtr.shape[0], 32 * 32 * 3) # Xtr_rows becomes 50000 x 3072
Xte_rows = Xte.reshape(Xte.shape[0], 32 * 32 * 3) # Xte_rows becomes 10000 x 3072


nn = NearestNeighbor() # create a Nearest Neighbor classifier class
nn.train(Xtr_rows, Ytr) # train the classifier on the training images and labels

start = clock()
Yte_predict = nn.predict(Xte_rows) # predict labels on the test images
finish = clock()
print(finish - start)

# and now print the classification accuracy, which is the average number
# of examples that are correctly predicted (i.e. label matches)
print ('accuracy: %f' % ( np.mean(Yte_predict == Yte) ) )


#use KNN and tune the hyperparameters K to get the best result
# assume we have Xtr_rows, Ytr, Xte_rows, Yte as before
# recall Xtr_rows is 50,000 x 3072 matrix
示例#4
0
                    if predicted == self.Y_test[i]:
                        success += 1
            logging.info('Classifier {}: Accuracy: {}'.format(
                classifier,
                float(success) / self.Y.shape[0]))
            del (classifier)


if __name__ == "__main__":
    logging.info('Test started ...')
    # print(metrics.classification_report(expected, predicted))
    # confusion_matrix = metrics.confusion_matrix(expected, predicted)
    # print(confusion_matrix)
    # data_generator = generator

    # Tester(data_generator, [NearestNeighbor(k=1), NearestNeighbor(k=2), Parzen(r=0.1), Bayesian()]).run()
    train_path = '/home/mohammad/Downloads/POCS-persian-number-dataset/train'
    test_path = '/home/mohammad/Downloads/POCS-persian-number-dataset/test'
    classifiers = [
        NearestNeighbor(k=1),
        NearestNeighbor(k=3),
        Parzen(r=0.1),
        Bayesian()
    ]
    # classifiers = [Parzen(r=100000)]
    intensity = Intensity()
    for size in range(10, 200, 10):
        Tester(*load_data(train_path, intensity.extract, size),
               *load_data(test_path, intensity.extract, size),
               classifiers=classifiers).run()
示例#5
0
from NearestNeighbor import NearestNeighbor

Xtr, Ytr, Xte, Yte = load_CIFAR10('data/cifar10/') # a magic function we provide
# flatten out all images to be one-dimensional
Xtr_rows = Xtr.reshape(Xtr.shape[0], 32 * 32 * 3) # Xtr_rows becomes 50000 x 3072
Xte_rows = Xte.reshape(Xte.shape[0], 32 * 32 * 3) # Xte_rows becomes 10000 x 3072

# assume we have Xtr_rows, Ytr, Xte_rows, Yte as before
# recall Xtr_rows is 50,000 x 3072 matrix
Xval_rows = Xtr_rows[:1000, :] # take first 1000 for validation
Yval = Ytr[:1000]
Xtr_rows = Xtr_rows[1000:, :] # keep last 49,000 for train
Ytr = Ytr[1000:]

# find hyperparameters that work best on the validation set
validation_accuracies = []
for k in [1, 3, 5, 10, 20, 50, 100]:

  # use a particular value of k and evaluation on validation data
  nn = NearestNeighbor()
  nn.train(Xtr_rows, Ytr)
  # here we assume a modified NearestNeighbor class that can take a k as input
  Yval_predict = nn.predict(Xval_rows, k = k)
  acc = np.mean(Yval_predict == Yval)
  print ('accuracy: %f' % (acc,))

  # keep track of what works on the validation set
  validation_accuracies.append((k, acc))

示例#6
0
def nn(test_points, train_points):
    dims = len(test_points[0])
    train_points = [x.tostring() for x in train_points]
    test_points = [x.tostring() for x in test_points]
    nn = NearestNeighbor(dims, train_points)
    return [nn.nn(x) for x in test_points]
from TSPsetup import TSPGrid
import plotMap as pm
from NearestNeighbor import NearestNeighbor
from GeneticAlgorithm import GeneticAlgorithm
import distanceHelpers as dh

problem_space = TSPGrid(10, 9)
setOfPoints = problem_space.citiesToVisit

# Genetic Algorithm
maxEpochs = 1000
populationSize = 5000
mutationPercent = 0.01
ga = GeneticAlgorithm(setOfPoints, maxEpochs, populationSize, mutationPercent)
pm.PlotRoute(setOfPoints, ga.path, "Genetic Algorithm",
             dh.getDistanceOfRoute(ga.path, setOfPoints))

# Nearest Neighbor Algorithm
nn = NearestNeighbor(setOfPoints)
pm.PlotRoute(setOfPoints, nn.path, "Nearest Neighbor",
             dh.getDistanceOfRoute(nn.path, setOfPoints))
示例#8
0
    def initialize(self):
        self.__samples, self.__labels = self.__featurespace.getSamples()
        if len(self.__labels) == 0:
            self.__clf = None
            return

        if self.__classifier == self.LogReg:
            maxIter = self.__parameters.getLogRegMaxNumIterations()
            learningRate = self.__parameters.getLogRegLearningRate()
            self.__clf = LinearLogisticRegression(learningRate=learningRate,
                                                  maxIterations=maxIter)
            self.__clf.fit(self.__samples, self.__labels)

        elif self.__classifier == self.Norm:
            norm = self.__parameters.getNormNorm()
            self.__clf = NormClassifier(norm)
            self.__clf.fit(self.__samples, self.__labels)

        elif self.__classifier == self.NaiveBayes:
            #			self.__clf = naive_bayes.GaussianNB()
            #			self.__clf.fit(self.__samples, self.__labels)
            self.__clf = GaussianClassifier(samplesIndependent=True)
            self.__clf.fit(self.__samples, self.__labels)

        elif self.__classifier == self.Gauss:
            self.__clf = GaussianClassifier(samplesIndependent=False)
            self.__clf.fit(self.__samples, self.__labels)

        elif self.__classifier == self.GMM:
            numComponents = self.__parameters.getGmmNumComponentsPerClass()
            maxIterations = self.__parameters.getGmmMaxNumIterations()
            self.__clf = GMMClassifier(numComponents, maxIterations)
            self.__clf.fit(self.__samples, self.__labels)

        elif self.__classifier == self.kNN:
            algo = self.__parameters.getKNNAlgorithm()
            k = self.__parameters.getKNNNumberOfNeighbors()
            w = self.__parameters.getKNNWeightFunction()
            if algo == 'scikit-learn':
                self.__clf = neighbors.KNeighborsClassifier(k, weights=w)
            else:  # 'own'
                if k == 1:
                    self.__clf = NearestNeighbor()
                else:
                    self.__clf = kNearestNeighbor(k)
            self.__clf.fit(self.__samples, self.__labels)

        elif self.__classifier == self.LinReg:
            lossFunc = self.__parameters.getLinRegLossFunction()
            a = self.__parameters.getLinRegLossFunctionParam()
            self.__clf = LinearRegression(lossFunc, a, True)
            self.__clf.fit(self.__samples, self.__labels)

        elif self.__classifier == self.Perceptron:
            maxIter = self.__parameters.getPerceptronMaxNumIterations()
            learningRate = self.__parameters.getPerceptronLearningRate()
            batchMode = self.__parameters.getPerceptronBatchMode()
            self.__clf = Perceptron(batchMode=batchMode,
                                    learningRate=learningRate,
                                    maxIterations=maxIter)
            self.__clf.fit(self.__samples, self.__labels)

        elif self.__classifier == self.MLP:
            layers = self.__parameters.getMLPHiddenLayers()
            act = self.__parameters.getMLPActivationFunction()
            algo = self.__parameters.getMLPOptimizationAlgorithm()
            alpha = self.__parameters.getMLPAlpha()
            rate = self.__parameters.getMLPLearningRate()
            self.__clf = sklearn.neural_network.MLPClassifier(
                hidden_layer_sizes=layers,
                activation=act,
                algorithm=algo,
                alpha=alpha,
                learning_rate=rate)
            self.__clf.fit(self.__samples, self.__labels)

        elif self.__classifier == self.SVM:
            algorithm = self.__parameters.getSVMAlgorithm()
            kernel = self.__parameters.getSVMKernel()
            C = self.__parameters.getSVMC()
            gamma = self.__parameters.getSVMGamma()
            coef0 = self.__parameters.getSVMCoef0()
            degree = self.__parameters.getSVMDegree()
            if algorithm == 'LinearSVC':
                self.__clf = svm.LinearSVC(C=C)
            elif algorithm == 'SVC':
                self.__clf = svm.SVC(kernel=kernel,
                                     C=C,
                                     gamma=gamma,
                                     coef0=coef0,
                                     degree=degree)
            elif algorithm == 'HardMarginSVM':
                self.__clf = HardMarginSVM()
            elif algorithm == 'SoftMarginSVM':
                self.__clf = SoftMarginSVM(C=C)
            else:
                self.__clf = KernelSVM(C=C, gamma=gamma)
            self.__clf.fit(self.__samples, self.__labels)

        elif self.__classifier == self.DecisionTree:
            algorithm = self.__parameters.getDecisionTreeAlgorithm()
            criterion = self.__parameters.getDecisionTreeCriterion()
            splitter = self.__parameters.getDecisionTreeSplitter()
            maxDepth = self.__parameters.getDecisionTreeMaxDepth()
            minSamplesSplit = self.__parameters.getDecisionTreeMinSamplesSplit(
            )
            minSamplesLeaf = self.__parameters.getDecisionTreeMinSamplesLeaf()
            minWeightedFractionLeaf = self.__parameters.getDecisionTreeMinWeightedFractionLeaf(
            )
            maxLeafNodes = self.__parameters.getDecisionTreeMaxLeafNodes()
            trials = self.__parameters.getDecisionTreeNumTrialsPerSplit()
            if algorithm == 'sklearn':
                self.__clf = tree.DecisionTreeClassifier(
                    criterion=criterion,
                    splitter=splitter,
                    max_features=2,
                    max_depth=maxDepth,
                    min_samples_split=minSamplesSplit,
                    min_samples_leaf=minSamplesLeaf,
                    min_weight_fraction_leaf=minWeightedFractionLeaf,
                    max_leaf_nodes=maxLeafNodes)
            else:
                self.__clf = DecisionTree(maxDepth, minSamplesLeaf, trials)
            self.__clf.fit(self.__samples, self.__labels)

        elif self.__classifier == self.RandomForest:
            algorithm = self.__parameters.getRandomForestAlgorithm()
            numTrees = self.__parameters.getRandomForestNumTrees()
            criterion = self.__parameters.getRandomForestCriterion()
            maxDepth = self.__parameters.getRandomForestMaxDepth()
            minSamplesSplit = self.__parameters.getRandomForestMinSamplesSplit(
            )
            minSamplesLeaf = self.__parameters.getRandomForestMinSamplesLeaf()
            minWeightedFractionLeaf = self.__parameters.getRandomForestMinWeightedFractionLeaf(
            )
            maxLeafNodes = self.__parameters.getRandomForestMaxLeafNodes()
            trials = self.__parameters.getRandomForestNumTrialsPerSplit()
            # print('Num trees: {0}'.format(numTrees))
            # print('Max depth: {0}'.format(maxDepth))
            # print('Min samples split: {0}'.format(minSamplesSplit))
            # print('Min samples leaf: {0}'.format(minSamplesLeaf))
            # print('Min weighted fraction leaf: {0}'.format(minWeightedFractionLeaf))
            # print('Max leaf nodes: {0}'.format(maxLeafNodes))
            # print('Num trials per node: {0}'.format(trials))
            if algorithm == 'sklearn':
                self.__clf = ensemble.RandomForestClassifier(
                    n_estimators=numTrees,
                    criterion=criterion,
                    max_features=2,
                    max_depth=maxDepth,
                    min_samples_split=minSamplesSplit,
                    min_samples_leaf=minSamplesLeaf,
                    min_weight_fraction_leaf=minWeightedFractionLeaf,
                    max_leaf_nodes=maxLeafNodes)
            else:
                self.__clf = RandomForest(numTrees, maxDepth, minSamplesLeaf,
                                          trials)
            self.__clf.fit(self.__samples, self.__labels)

        else:
            print("unsupported classifier")
示例#9
0
    def createNeurons(self, verbose=False):
        """
		Method which dictates calculating prototypes and sigma based on set_reduction_method.
		Also creates neurons and runs other methods for calculations regarding neurons.
		"""
        # Depending on the set_reduction_method, we use different algorithms to calculate prototypes
        if self.set_reduction_method == "means":
            print("Calculating centers for Gaussian function by means...")
            self.prototypes = Cluster.byMeans(
                self.training_set,
                number_of_clusters=self.cluster_count,
                class_header=self.class_header,
                verbosity=0)
        elif self.set_reduction_method == "medoids":
            print("Calculating centers for Gaussian function by medoids...")
            self.prototypes = Cluster.byMedoids(self.training_set,
                                                self.cluster_count,
                                                self.class_header,
                                                verbosity=0)

        elif self.set_reduction_method == "condensed":
            print(
                "Calculating centers for Gaussian function using condensed nearest neighbor..."
            )
            self.prototypes = NearestNeighbor.condensedNearestNeighbor(
                self.training_set, self.class_header)

        else:
            print(
                "'%s' is an invalid set reduction method, please check it and try again."
                % self.set_reduction_method)
            sys.exit()

        if not self.regression:
            print(
                "Generating output layer of size %d with sigmoid activation functions..."
                % self.output_count) if verbose else None
            self.output_layer = FFNetwork(len(self.prototypes),
                                          [self.output_count, 'sigmoid'],
                                          self.training_set,
                                          class_header=self.class_header,
                                          learning_rate=self.learning_rate,
                                          use_momentum=self.use_momentum,
                                          regression=self.regression)
        else:
            print(
                "Generating output layer with a single linear activation function for regression..."
            ) if verbose else None
            self.output_layer = FFNetwork(len(self.prototypes),
                                          [self.output_count, 'linear'],
                                          self.training_set,
                                          class_header=self.class_header,
                                          learning_rate=self.learning_rate,
                                          use_momentum=self.use_momentum,
                                          regression=self.regression)

        print(
            "Generating widths for basis functions using nearest neighbor proximity..."
        ) if verbose else None
        sigma_list = self.findSigma()

        # for every point in prototype list, create a neuron and store that point and sigma in said neuron
        print("Generating layer of Gaussian basis functions of size %d..." %
              len(self.prototypes)) if verbose else None
        for i in range(len(self.prototypes)):
            self.function_layer.append(
                RBFNeuron(self.prototypes.iloc[i], sigma_list[i],
                          self.class_header))

        print("\nTRAINING NEURONS ON TRAINING DATA OF %d ENTRIES" %
              len(self.training_set)) if verbose else None
        self.training_set.apply(lambda row: self.train(row), axis=1)
示例#10
0
from load_full_CIFAR10 import load_CIFAR10

Xtr, Ytr, Xte, Yte = load_CIFAR10('data/cifar10/')
Xtr_rows = Xtr.reshape(Xtr.shape[0], 32 * 32 * 3)
Xte_rows = Xte.reshape(Xte.shape[0], 32 * 32 * 3)

# recall Xtr_rows is 50,000 x 3072 matrix
Xval_rows = Xtr_rows[:1000, :]  # take first 1000 for validation
Yval = Ytr[:1000]
Xtr_rows = Xtr_rows[1000:, :]  # keep last 49,000 for train
Ytr = Ytr[1000:]

# Iterate over hyperparameter selections for k-values
print "beginning hyperparameter search... \n"
validation_accuraccies = []
for k in [5, 10, 20, 50, 100]:

    nn = NearestNeighbor()
    nn.train(Xtr_rows, Ytr)
    Y_predictions = nn.predict(Xte_rows, k)

    # So we are going to want to Adjust this because of the new output
    # signature of the nn.predict method.
    # I'm realizing more and more how much I actually do like cpp....
    #
    # Let's check out how the original image_class works out here first
    acc = np.mean(Y_predictions == Yval)
    print 'accuracy: %f' % (acc, )

    validation_accuraccies.append((k, acc))
示例#11
0
def analyze(bfFlag=False):
    runTimeStat = {"nn": [], "bf": [], "ni": [], "to": []}

    solutionStat = {"nn": [], "bf": [], "ni": [], "to": []}

    maxSize = 500
    if bfFlag:
        maxSize = 12
    '''
    Running experiences ....
    '''
    for size in range(3, maxSize):
        inputGen = InputGenerator(size, maxSize * 2, maxSize * 2)
        inputs = inputGen.generate()

        nn = NearestNeighbor(inputs)
        startTime = time.time()
        nnpath, nntotalDistance = nn.optimise()
        runTime = time.time() - startTime
        runTimeStat["nn"].append(runTime)
        solutionStat["nn"].append(nntotalDistance)

        if bfFlag:
            bf = BruteForce(inputs)
            startTime = time.time()
            bfpath, bftotalDistance = bf.optimise()
            runTime = time.time() - startTime
            runTimeStat["bf"].append(runTime)
            solutionStat["bf"].append(bftotalDistance)

        ni = NearestInsertion(inputs)
        startTime = time.time()
        nipath, nitotalDistance = ni.optimise()
        runTime = time.time() - startTime
        runTimeStat["ni"].append(runTime)
        solutionStat["ni"].append(nitotalDistance)

        to = Two_Opt(inputs)
        startTime = time.time()
        topath, tototalDistance = to.optimise()
        runTime = time.time() - startTime
        runTimeStat["to"].append(runTime)
        solutionStat["to"].append(tototalDistance)
    '''
    Plotting the results
    '''
    x = np.arange(3, maxSize)
    if not bfFlag:
        fig, ax = plt.subplots(nrows=2,
                               ncols=3,
                               figsize=(12, 12),
                               squeeze=False)

        ax[0][0].plot(x, runTimeStat["nn"], '-b', label="NN runtime")
        ax[0][0].plot(x,
                      fitParobola(x, runTimeStat["nn"])(x),
                      '--m',
                      label="Fitting parabola")
        ax[0][0].set_title(
            title("Runtime of Nearest Neighbor vs fitting parabola"))
        ax[0][0].set_xlabel("Number of nodes")
        ax[0][0].set_ylabel("seconds")
        ax[0][0].legend()

        ax[0][1].plot(x, runTimeStat["ni"], '-r', label="NN runtime")
        ax[0][1].plot(x,
                      fitParobola(x, runTimeStat["ni"])(x),
                      '--m',
                      label="Fitting parabola")
        ax[0][1].set_title(
            title("Runtime of Nearest Insertion vs fitting parabola"))
        ax[0][1].set_xlabel("Number of nodes")
        ax[0][1].set_ylabel("seconds")
        ax[0][1].legend()

        ax[0][2].plot(x, runTimeStat["to"], '-g', label="2-Opt runtime")
        ax[0][2].plot(x,
                      fitParobola(x, runTimeStat["to"])(x),
                      '--m',
                      label="Fitting parabola")
        ax[0][2].set_title(title("Runtime of 2-Opt vs fitting parabola"))
        ax[0][2].set_xlabel("Number of nodes")
        ax[0][2].set_ylabel("seconds")
        ax[0][2].legend()

        ax[1][1].plot(x, runTimeStat["nn"], '-b', label="NN runtime")
        ax[1][1].plot(x, runTimeStat["ni"], '-r', label="NI runtime")
        ax[1][1].plot(x, runTimeStat["to"], '-g', label="2-opt runtime")

        ax[1][1].set_title(title("Runtime Comparison"))
        ax[1][1].set_xlabel("Number of nodes")
        ax[1][1].set_ylabel("seconds")
        ax[1][1].legend()

        fig.delaxes(ax.flatten()[5])
        fig.delaxes(ax.flatten()[3])
        plt.tight_layout(True)
        plt.savefig("runtime_woBF.png", dpi=800)
        plt.close()
    else:
        fig, ax = plt.subplots()
        ax.plot(x, runTimeStat["nn"], '-b', label="NN runtime")
        ax.plot(x, runTimeStat["ni"], '-r', label="NI runtime")
        ax.plot(x, runTimeStat["to"], '-g', label="2-opt runtime")
        ax.plot(x, runTimeStat["bf"], '--k', label="Brute Force runtime")

        ax.set_title(title("Runtime Comparison"))
        ax.set_xlabel("Number of nodes")
        ax.set_ylabel("seconds")
        ax.legend()
        plt.tight_layout(True)
        plt.savefig("runtime_wBF.png", dpi=800)
        plt.close()
    fig, ax = plt.subplots()
    ax.plot(x, solutionStat["nn"], '-b', label="NN solution")
    ax.plot(x, solutionStat["ni"], '-r', label="NI solution")
    ax.plot(x, solutionStat["to"], '-g', label="2-opt solution")
    if bfFlag:
        ax.plot(x, solutionStat["bf"], '--k', label="Brute Force solution")

    ax.set_title("Total Distance Comparison")
    ax.set_xlabel("Number of nodes")
    ax.set_ylabel("Distance")
    ax.legend()
    plt.tight_layout(True)
    if bfFlag:
        plt.savefig("solution_wBF.png", dpi=800)
    else:
        plt.savefig("solution_woBF.png", dpi=800)
    plt.close()
示例#12
0
    def optimizedOrder(self, order):
        self.checkInvalid(order)

        # Remove duplicates from order
        order = list(dict.fromkeys(order)) 

        # Corner Case
        if len(order) == 0:
            return []

        order       = [0] + order + [-1]
        numNodes    = len(order)
        adj         = [[0 for i in range(numNodes)] for j in range(numNodes)]
        adjList     = {}
        d           = Distance(self.xMax, self.yMax, self.grid)

        for i,j in enumerate(order): 
            adjList[i] = j

        
        for i,x in enumerate(order):
            for j,y in enumerate(order):
                if x == y:  
                    adj[i][j] = INF
                else:
                    adj[i][j] = d.findRoute(self.orderBook[x], self.orderBook[y])[0]



        # Calculate the original cost and effort
        origCost, origEffort = 0, 0
        weight = 0
        for i in range(len(adj)-1):
            origCost += adj[i][i+1]
            weight += self.weightInfo[adjList[i]][3]
            origEffort += weight * adj[i][i+1]
        

        # Calculate lower bound
        #lowerBound = LowerBound(adj).lbound()
        #print("lower bound: {}".format(lowerBound))

        #---------------------------------------#
        #           Branch and Bound            #
        #---------------------------------------#
        if self.algorithm == 'bnb':
            b = BNB(adj, self.weightInfo, adjList, adj)
            cost,path,effort = b.optimalPath()


        #---------------------------------------#
        #           Nearest Neighbor            #
        #---------------------------------------#
        elif self.algorithm == 'nn': #elif len(order)-2 > THRESHOLD:
            n = NearestNeighbor(np.array(adj), self.weightInfo, adjList)
            cost,path,effort = n.findMinPath()

        
        #---------------------------------------#
        #               Held-Karp               #
        #---------------------------------------#
        else:
            cost,path = self.cost(adj)
            
            weight = 0
            effort = 0
            for i in range(len(path)-1):
                weight += self.weightInfo[adjList[path[i]]][3]
                effort += weight * adj[path[i]][path[i+1]]

            weight += self.weightInfo[adjList[path[-1]]][3]
            effort+= weight * adj[path[-1]][len(adj)-1]


        if self.invalidItem:
            print("Note: The effort output is missing some information")

        
        path = [adjList[i] for i in path]
        return [origCost, origEffort, cost, path, effort]