Exemplo n.º 1
0
Arquivo: UI.py Projeto: SabaAlex/AI
    def __process(nrOfIterations, learningRate, hiddenNeuronsNumber, aConst):
        dataset = ProblemData("resources/data.data")
        trainX, trainY, testX, testY = dataset.splitData()

        neuralNetwork = ANN(deepcopy(trainX), deepcopy(trainY), learningRate,
                            hiddenNeuronsNumber, aConst)

        iterations = []
        for i in range(nrOfIterations):
            neuralNetwork.feedForward()
            neuralNetwork.backProp()
            iterations.append(i)

        for i in range(len(testX)):
            predictedOut = neuralNetwork.getOutput(testX[i])
            print("Predicted output: {0}\nReal value: {1}".format(
                predictedOut, testY[i]))

        matplotlib.pyplot.plot(iterations,
                               neuralNetwork.getLoss(),
                               label='loss value vs iteration')
        matplotlib.pyplot.xlabel('Iterations')
        matplotlib.pyplot.ylabel('Loss function')
        matplotlib.pyplot.legend()
        matplotlib.pyplot.show()
Exemplo n.º 2
0
def main():
    """ Solve riddle. """
    house_number = 5
    riddle = ProblemData(house_number, build_assertions())

    for rounds in range(15):
        progress_made = False  # have any changes (progress) been made this round?
        riddle, progress_made = deduce(riddle)
        if not progress_made:
            print("\t\tNo progress made in round #" + str(rounds + 1) + ". Breaking loop.")
            print("\t\tAssertions used: " + str(riddle.assertions_used.count(True)) + "/" 
                  + str(len(riddle.assertions_used))) 
            break


    riddle.house_state()
    riddle.element_state()
Exemplo n.º 3
0
def main():
    if (len(sys.argv) < 2):
        sys.stderr.write("Error: Not enough arguments.\n")
        sys.stderr.write("usage: " + sys.argv[0] + " <input_file>\n")
        return 1
        
    input_file = sys.argv[1]
    try:
        ProblemData.readFile(input_file)
    except:
        sys.stderr.write("Error: Failed file parsing: Invalid input file.\n")
        return 2

    print("--------------------------------------")
    print("---  Timetable Scheduling Problem  ---")
    print("--------------------------------------\n")
    print("1. Genetic Algorithm")
    print("2. Hill Climbing")
    print("3. Simulated Annealing\n")

    while True:
        try:
            option = int(input("Choose the desired algorithm: "))
            if option in [1, 2, 3]:
                break
        except ValueError:
            continue
    
    t1 = time.time()

    if option == 1:
        geneticAlgorithm()
    elif option == 2:
        hillClimbing()
    elif option == 3:
        simulatedAnnealing()

    t2 = time.time()

    print("Execution time: {:.4f} seconds".format(t2-t1))
def main():

    # loading the data as prblmData
    prblmData = ProblemData(defaultSignalValue=defaultSignalValue,
                            numNodes=numNodes)
    prblmData = prblmData.loadData(useStoredData=useStoredData,
                                   inputFileName=inputFileName,
                                   storeReadData=storeReadData,
                                   storeDataName=storeDataName,
                                   rowReadUntil=readDataUntilRow)

    # partitioning data and providing test and train sets and corresponding labels as dataPar
    dataPar = DataPartition()
    dataPar = dataPar.makeTrainTest(prblmData=prblmData,
                                    readSampleSize=sampleSize,
                                    testPartitionSize=testPartitionSize,
                                    randomState=0,
                                    doNormalize=False,
                                    useSubSample=useSubSample,
                                    storeSubSample=True,
                                    subSamplePcklName=subSamplePcklName)

    # providing normalized data of dataPar as dataParNormal
    dataParNormal = DataPartition()
    dataParNormal = dataParNormal.makeTrainTest(
        prblmData=prblmData,
        readSampleSize=sampleSize,
        testPartitionSize=testPartitionSize,
        randomState=0,
        doNormalize=True,
        useSubSample=useSubSample,
        storeSubSample=True,
        subSamplePcklName=subSamplePcklName)

    # Feature reduction of normalized data by PCA(with pcaNcomponents dimensions) as dataParPca
    pcaNcomponent = 10
    pcaObj = PCA(n_components=pcaNcomponent)
    fit = pcaObj.fit(dataParNormal.fVecTrain)
    dataParPca = DataPartition()
    dataParPca.fVecTrain = pcaObj.fit_transform(dataParNormal.fVecTrain)
    dataParPca.fVecTest = pcaObj.fit_transform(dataParNormal.fVecTest)
    dataParPca.labelTrain = dataParNormal.labelTrain
    dataParPca.labelTest = dataParNormal.labelTest
    dataParPca.isNormalized = True

    # plotting data by first and second principle components of applied PCA on data
    plt.plot(dataParPca.fVecTrain[:, 0], dataParPca.fVecTrain[:, 1], 'b.')
    plt.title('2D PCA')
    plt.show()

    # Three classifiers (random forest, KNN, SVM) with hyper parameters (will be tuned by cross validation) are defined below:
    # parameter cv shows number of partitions for cross validation of hyper parameter tuning
    # n_jobs = -1 run the data on multiple cores

    # Random Forest:
    # n_estimators: number of trees to make the forest
    #criteria: measuring quality of a split. “gini” for the Gini impurity and “entropy” for the information gain
    # max_features: number of features to consider when looking for the best split
    paramGrid_rf = {
        'n_estimators': [5, 10, 17, 30],
        'criterion': ['gini', 'entropy'],
        'max_features': ['auto', 0.01, 0.1, 0.9],
        'n_jobs': [-1]
    }
    #paramGrid_rf = {'n_estimators': [30] ,'criterion': ['gini'] ,'n_jobs': [-1]} # n_jobs => runs in parallel
    clf_rf = GridSearchCV(RandomForestClassifier(), paramGrid_rf,
                          cv=3)  # ,scoring='%s_macro' % score

    # KNN:
    # n_neighbors: number of neighbours to consider
    # weights: how to weight labels of neighbors. uniform or distance(consider reverse of the neighbors distance)
    # metric: how to measure the distance: 'minkowski', 'euclidean' or 'manhattan'
    paramGrid_knn = {
        'n_neighbors': [3, 5, 9, 15],
        'weights': ['uniform', 'distance'],
        'metric': ['minkowski', 'euclidean', 'manhattan']
    }
    #paramGrid_knn = {'n_neighbors': [5, 9], 'weights': ['distance'],'metric': ['manhattan']}
    clf_knn = GridSearchCV(KNeighborsClassifier(algorithm='kd_tree',
                                                n_jobs=-1),
                           paramGrid_knn,
                           cv=3)  #, verbose=10  # ,scoring='%s_macro' % score

    # SVM:
    # C : Penalty parameter for miss classification
    # kernel: used kernel 'linear', 'poly', 'rbf' // rbf is more time consuming but seems to be more concordat to problem
    # gamma: kernel coefficient. How far the influence of a single training data reaches [low: far / high: close]
    param_grid_svm = {'C': [0.1], 'kernel': ['rbf'], 'gamma': [0.01]}
    # param_grid_svm = {'C': [0.01, 0.1, 1, 10], 'kernel': ['linear', 'poly', 'rbf'], 'gamma': [0.001, 0.01, 0.1, 1]} # rbf is time consuming comparing to others
    clf_svm = GridSearchCV(
        svm.SVC(), param_grid_svm,
        cv=3)  # , verbose=10: write the result of each epoc of cv

    clfNames = ['random_forest', 'knn', 'svm']
    dataTypes = ['original ', 'normalized', 'nomalized_PCA']

    for idx, clf in enumerate([clf_rf, clf_knn, clf_svm]):
        for idx2, datap in enumerate([dataPar, dataParNormal, dataParPca]):
            runCl = RunClassifier()
            prediction, accuracy, conf_matrix, clf.best_params = runCl.doClassification(
                clf,
                datap.fVecTrain,
                datap.fVecTest,
                datap.labelTrain,
                datap.labelTest,
                showPlot=True,
                savePickleModel=savePickleModel,
                clfName=clfNames[idx],
                dataType=dataTypes[idx2])
            print('\n+++++++++++++++++++\n')
Exemplo n.º 5
0
#I = array([0,3,1,0])
#J = array([0,3,1,2])
#V = array([4,5,7,9])
#A = sparse.coo_matrix((V,(I,J)),shape=(4,4))



# We define the data for the problem, i.e., 
#  * the cross sections for node;
#  * the length of every node;
#  * the boudnary conditions;
#  * and the local refinement of every node;
#  

prob_data = ProblemData()
#                     D               Sigma_a  nu   Sigma_f  Delta_x
prob_data.set_xs('0',[1/(3*0.416667), 0.334,   1.0, 0.000,   2.7]) #moderator
prob_data.set_xs('1',[1/(3*0.416667), 0.334,   1.0, 0.178,   2.4]) #fuel
prob_data.set_composition(['0', '1', '0', '1', '0', '1', '0'])
prob_data.set_bc(0,0)
prob_data.set_refinements(20)

prob_data.setup()


state = State(prob_data)



matrices = SpatialD(state)