Esempio n. 1
0
def calculate_and_save_results(inputpath, outputpath):
    """
    Docstring
    """
    rd.load_data(inputpath)
    mass = rd.particle_mass()
    x_min = rd.x_minimum()
    x_max = rd.x_maximum()
    npoint = rd.n_point()
    fval = rd.first_eigenvalue()
    lval = rd.last_eigenvalue()
    inter_type = rd.interpolation_type()
    x_pot = rd.x_potential()
    y_pot = rd.y_potential()
    delta = (x_max-x_min)/npoint
    const_a = 1/(mass*delta**2)
    x_axis = np.linspace(x_min, x_max, npoint)

    #interpolating
    y_pot_inter = []
    if inter_type == "polynomial":
        y_pot_inter = KroghInterpolator(x_pot, y_pot)
    else:
        y_pot_inter = interp1d(x_pot, y_pot, kind=inter_type)

    #calculating
    expected_x = np.array([])
    uncertainties = np.array([])
    energies = np.array([])
    norm_eigenvecs = np.array([])
    maindiag = y_pot_inter(x_axis)+const_a
    seconddiag = np.full(npoint-1, -1/2*const_a)
    selectrange = (fval-1, lval-1)
    ev = eigh_tridiagonal(maindiag, seconddiag, select="i", select_range=selectrange)
    (energies, eigenvecs) = ev
    norm_eigenvecs = np.array([normalize(eigenvec, delta) for eigenvec in eigenvecs.T])
    expected_x = np.array([exp_x(eigenvec, delta, x_axis) for eigenvec in norm_eigenvecs])
    uncertaintylist = [uncertainty(eigenvec, delta, x_axis) for eigenvec in norm_eigenvecs]
    uncertainties = np.array(uncertaintylist)

    rd.save_xyformat(outputpath + "/potential.dat", x_axis, y_pot_inter(x_axis))
    rd.save_nxyformat(outputpath + "/wavefuncs.dat", x_axis, norm_eigenvecs.T)
    rd.save_xyformat(outputpath + "/energies.dat", energies, ["" for _ in energies])
    rd.save_xyformat(outputpath + "/expvalues.dat", expected_x, uncertainties)
    layers = [length, 2 * n_imfs, 3, n_imfs]
elif net_mode == 4:
    from htt_coe_cnn_model_new import *
    layers = [length, 3, 1, n_imfs]
elif net_mode == 5:
    from cnn_kernel_test_model import *
    layers = [length, 2 * n_imfs, 3, n_imfs]

#######################################################

for linenum in linenum_all:
    print(linenum, " trainging test")
    curdir = os.path.abspath(os.curdir)
    filename = os.path.join(curdir, './data/' + linenum + writefile + suffix)

    data, label, name = load_data(filename, length, kinds, fault_type)
    label = label[0::length]
    data = np.reshape(data, [-1, length, 3])

    #################### whether using HHT and its coe or not ###############
    if net_mode == 1:
        ### data for CNN only
        pass
    # else:
    res = multicore(data, 3, kinds)
    res = np.array(res)
    print(res.shape)
    data2 = np.zeros((kinds, length, n_imfs * 2, 3))
    for i in range(kinds):
        data2[i, :, :, :] = res[i, :, 2:, :]
    for i in range(0, 2):
        prob_face[i] = prob_face[i] * prior_prob[i]
    return prob_face.index(max(prob_face))


#source_train_images = "D:/project2 AI/facedata/facedatatrain"
#source_train_labels = "D:/project2 AI/facedata/facedatatrainlabels"
#source_test_images = "D:/project2 AI/facedata/facedatatest"
#source_test_labels = "D:/project2 AI/facedata/facedatatestlabels"

source_train_images = "/Users/jainipatel/Downloads/data/facedata/facedatatrain"
source_train_labels = "/Users/jainipatel/Downloads/data/facedata/facedatatrainlabels"
source_test_images = "/Users/jainipatel/Downloads/data/facedata/facedatatest"
source_test_labels = "/Users/jainipatel/Downloads/data/facedata/facedatatestlabels"

fetch_data_train = rd.load_data(source_train_images, 451, 70, 60)
fetch_data_test = rd.load_data(source_test_images, 150, 70, 60)
Y_train_labels = labels = rd.load_label(source_train_labels)
X_train = rd.matrix_transformation(fetch_data_train, 70, 60)
X_test = rd.matrix_transformation(fetch_data_test, 70, 60)
Y_test_labels = rd.load_label(source_test_labels)

tem = 0.99
accuracy_array = []
percent_training = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
total_training_time = 0
start1 = time.time()

for i in range(0, 10):
    start = time.time()
    tem -= 0.10
Esempio n. 4
0
def calculate_and_save_results(inputpath, outputpath):
    """
    this function does three things:
        -interpolating the given points of the potential defined by the user
        -solving the schrodinger equation and calculating the eigenstates,
    eigenvalues, expected values and uncertainties
        -saving the results in .dat files

    :type inputpath: string
    :param inputpath: path of the schrodinger.inp file containing the input information

    :type outputpath: string
    :param outputpath: path of the directory the .dat files will be stored in
    """
    rd.load_data(inputpath)
    mass = rd.particle_mass()
    x_min = rd.x_minimum()
    x_max = rd.x_maximum()
    npoint = rd.n_point()
    fval = rd.first_eigenvalue()
    lval = rd.last_eigenvalue()
    inter_type = rd.interpolation_type()
    x_pot = rd.x_potential()
    y_pot = rd.y_potential()
    delta = (x_max - x_min) / npoint
    const_a = 1 / (mass * delta**2)
    x_axis = np.linspace(x_min, x_max, npoint)
    y_pot_inter = []
    expected_x = np.array([])
    uncertainties = np.array([])
    energies = np.array([])
    norm_eigenvecs = np.array([])

    #interpolating
    if inter_type == "polynomial":
        y_pot_inter = KroghInterpolator(x_pot, y_pot)
    else:
        y_pot_inter = interp1d(x_pot, y_pot, kind=inter_type)

    #calculating
    maindiag = y_pot_inter(x_axis) + const_a
    seconddiag = np.full(npoint - 1, -1 / 2 * const_a)
    selectrange = (fval - 1, lval - 1)
    ev = eigh_tridiagonal(maindiag,
                          seconddiag,
                          select="i",
                          select_range=selectrange)
    (energies, eigenvecs) = ev
    norm_eigenvecs = np.array(
        [normalize(eigenvec, delta) for eigenvec in eigenvecs.T])
    expected_x = np.array(
        [exp_x(eigenvec, delta, x_axis) for eigenvec in norm_eigenvecs])
    uncertaintylist = [
        uncertainty(eigenvec, delta, x_axis) for eigenvec in norm_eigenvecs
    ]
    uncertainties = np.array(uncertaintylist)

    #saving
    if not outputpath.endswith("/"):
        outputpath = outputpath + "/"
    rd.save_xyformat(outputpath + "potential.dat", x_axis, y_pot_inter(x_axis))
    rd.save_nxyformat(outputpath + "wavefuncs.dat", x_axis, norm_eigenvecs.T)
    rd.save_xyformat(outputpath + "energies.dat", energies,
                     ["" for _ in energies])
    rd.save_xyformat(outputpath + "expvalues.dat", expected_x, uncertainties)

    print("The results have been saved succesfully into the folder \"" +
          outputpath + "\".")
Esempio n. 5
0
    layers = [length,2*n_imfs,3, n_imfs]
else:
    layers = [length,2*n_imfs,3, n_imfs]

'''
read the filename
'''


for linenum in linenum_all:
    print(linenum," trainging test")
    curdir = os.path.abspath(os.curdir)
    filename = os.path.join(curdir,'./data/'+linenum+writefile+suffix)


    X_train, y_train = load_data(filename,length,kinds)
    y_train = y_train[0::length]
    X_train = np.reshape(X_train,[-1,length,3])

    #############shuffle the data #######################################
    index = [i for i in range(X_train.shape[0])]
    np.random.shuffle(index)
    data1 = X_train[index,:,:]
    label1 = y_train[index,:]
    print (data1.shape)
    
    for snr in SNR:
        
        #####################calculate the noise ########################
        print('SNR is '+str(snr))
        data_noise = np.zeros([kinds, length, 3])
Esempio n. 6
0
    layers = [length, 2 * n_imfs, 3, n_imfs]
elif net_mode == 4:
    from htt_coe_cnn_model import *
    layers = [length, 3, 1, n_imfs]
elif net_mode == 5:
    from cnn_kernel_test_model import *
    layers = [length, 2 * n_imfs, 3, n_imfs]

#######################################################

for linenum in linenum_all:
    print(linenum, " trainging test")
    curdir = os.path.abspath(os.curdir)
    filename = os.path.join(curdir, './data/' + linenum + writefile + suffix)

    data, label, name = load_data(filename, length, kinds)
    label = label[0::length]
    data = np.reshape(data, [-1, length, 3])

    #################### whether using HHT and its coe or not ###############
    if net_mode == 1:
        ### data for CNN only
        pass
    # else:

    # res = multicore(data, n_imfs, kinds)

    print("HHT process is over")
    data2 = np.zeros((kinds, length, n_imfs * 2, 3))
    for i in tqdm(range(kinds)):
        # data2[i, :, :, :] = res[i]
Esempio n. 7
0
    for i in range(0, 10):
        prob_digit[i] = prob_digit[i] * prior_prob[i]
    return prob_digit.index(max(prob_digit))


source_train_images = "/Users/jainipatel/Downloads/data/digitdata/trainingimages"
source_train_labels = "/Users/jainipatel/Downloads/data/digitdata/traininglabels"
source_test_images = "/Users/jainipatel/Downloads/data/digitdata/testimages"
source_test_labels = "/Users/jainipatel/Downloads/data/digitdata/testlabels"

# source_train_images = "D:/project2 AI/digitdata/trainingimages"
# source_train_labels = "D:/project2 AI/digitdata/traininglabels"
# source_test_images = "D:/project2 AI/digitdata/testimages"
# source_test_labels = "D:/project2 AI/digitdata/testlabels"

fetch_data_train = rd.load_data(source_train_images, 5000, 28, 28)
fetch_data_test = rd.load_data(source_test_images, 1000, 28, 28)
Y_train_labels = labels = rd.load_label(source_train_labels)
X_train = rd.matrix_transformation(fetch_data_train, 28, 28)
X_test = rd.matrix_transformation(fetch_data_test, 28, 28)
Y_test_labels = rd.load_label(source_test_labels)
# print(len(Y1))

tem = 0.99
accuracy_array = []
percent_training = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
total_training_time = 0
start1 = time.time()

for i in range(0, 10):
    start = time.time()
def start():
    number_of_tries = int(ent_number_of_tries.get())
    number_of_individuals = int(ent_number_of_individuals.get())
    increase_of_individuals = int(ent_increase_of_individuals.get())
    max_generation = int(ent_max_generation.get())
    increase_of_max_generation = int(ent_increase_of_max_generation.get()) 
    number_of_genes = int(ent_number_of_genes.get())
    number_of_color = int(ent_number_of_color.get())
    mutation_rate = float(ent_mutation_rate.get())
    method_algorthim= combo_method_algorthim.get()
    population_method = radio_population_method.get()
    fitness_method = radio_fitness_method.get()
    crossover_method = radio_crossover_method.get()
    mutation_method = radio_mutation_method.get()
    if method_algorthim == 'random':
        #Random adjacency matrix
        adjacency_matrix = [[random.randint(0, 1) for x in range(number_of_genes)] for y in range(number_of_genes)]
        for i in range(number_of_genes):
            for j in range(number_of_genes):
                if j <= i and adjacency_matrix[j][i] == 1:
                    adjacency_matrix[j][i] = 0
    elif method_algorthim == 'custom':
        adjacency_matrix = load_data(path, number_of_genes)
    ## We can choose two method random to create a random edges between nodes or custom to define our edges in adjacency matrix form
    ## in the random method the fourth parameter returned by GAlgorthim will by the adjacency matrix which created randomly
    with open('Result.txt', 'w') as result:
        result.write('max_generation,populition,number_of_solutions,number_of_color,fitness,time\n')
        list_number_of_tries = [x for x in range(1,number_of_tries+1)]
        list_max_generation=[]
        list_number_of_individuals=[]
        list_number_sol_color=[]
        list_fitness_solution=[]
        list_time=[]
        list_first_solution=[]
        
        for x in range(number_of_tries):
            start_time = time.time()
            solution_list, fitness_solution, number_sol_color=GAlgorthim(population_method, fitness_method, crossover_method,mutation_method,
                                                        number_of_individuals, max_generation, number_of_genes, number_of_color, mutation_rate, adjacency_matrix)
                     
            time_algorthim = "{:.2f}".format(time.time() - start_time)
            
            text.insert(END, "try number "+str(x+1)+" ==========\n" )
            text.insert(END, "The time of running the algorithm is %s seconds\n" % time_algorthim)
            text.insert(END, "The populition = " + str(number_of_individuals))
            text.insert(END, "\nThe max genertion = " + str(max_generation))
            text.insert(END, '\nThe algorithm found '+str( len(solution_list))+ ' possible solutions\nMinimum number of colors = '+ str(number_sol_color)+
                  '\nThe fitness = '+ str(fitness_solution)+'\nNumber of edges = '+str(sum([sum(x) for x in adjacency_matrix]))+'\n')
            print("The time of running the algorithm is %s seconds " % time_algorthim)
            print('solution_list',solution_list)
            print("The populition is = " , number_of_individuals)
            print('The algorithm found ', len(solution_list), 'possible solutions\nMinimum number of colors = ', number_sol_color,
                  '\nThe fitness = ', fitness_solution,'\nNumber of edges = ',sum([sum(x) for x in adjacency_matrix]))
            print('solution_list',solution_list)
            print('The first possible solution is : ', solution_list[0])
            print('====================================================')
            result.write(str(max_generation)+','+str(number_of_individuals)+','+ str(len(solution_list))+','+ str(number_sol_color)+','+
                         str(fitness_solution)+','+ str(time_algorthim)+'\n')      

            list_max_generation.append(max_generation)
            list_number_of_individuals.append(number_of_individuals)
            list_number_sol_color.append(number_sol_color)
            list_fitness_solution.append(fitness_solution)
            list_time.append(time_algorthim)
            list_first_solution.append(solution_list[0])

            number_of_individuals += increase_of_individuals
            max_generation += increase_of_max_generation
    final_solution = optimal_solution(list_first_solution, list_fitness_solution)
    text.insert(END, "\nthe list of the final solution is "+str(final_solution) )
    print('the optimal solutions after all tries ',final_solution)      
    plot_result(list_number_of_tries, list_max_generation, list_number_of_individuals, list_number_sol_color, list_fitness_solution, list_time)
    #Drawing the graph

    global idx_solution
    global number_of_solution
    idx_solution = 0
    number_of_solution = len(final_solution)
    def next_solution(number_of_genes, adjacency_matrix, number_of_color, show_solution):
        global idx_solution
        global number_of_solution
        idx_solution += 1
        if idx_solution == number_of_solution:
            Button_solution = Button(root, text="Quit ...", width= 15, command=root.destroy)
            Button_solution.grid(row=13, column=2, padx=5, pady=5)
        else:
            Button_solution = Button(root, text="next solution", width= 15, command=lambda: next_solution(number_of_genes, adjacency_matrix, number_of_color, final_solution[idx_solution]))
            Button_solution.grid(row=13, column=2, padx=5, pady=5)
        draw_graph(number_of_genes, adjacency_matrix, number_of_color, show_solution)

    Button_solution = Button(root, text="display solution", width= 15, command=lambda: next_solution(number_of_genes, adjacency_matrix, number_of_color, final_solution[idx_solution]))
    Button_solution.grid(row=13, column=2, padx=5, pady=5)