Пример #1
0
def num_dims_vs_time(o_algorithm, settings, o_function, num_dims, plot=False):
    algorithm_name = o_algorithm.get_name()
    func_name = o_function.func_globals['__name__'].replace('_', ' ').title()

    times = []
    accuracy = []

    optimize_settings(settings)

    for test in num_dims:
        # create bounds
        bounds = []
        for i in range(0, test):
            bounds.append((-10, 10))
        settings['number_of_dimensions'] = test
        settings['bounds'] = bounds

        algorithm = o_algorithm(settings, o_function)

        algorithm.start_timer()
        algorithm.run()
        algorithm.stop_timer()

        times.append(algorithm.get_time())
        accuracy.append(algorithm.get_best_x().get_fval())

    if plot:
        # timing plot
        time_fig = plt.figure()
        time_ax = time_fig.add_subplot(111)
        plt.title("%s Number of Dimensions vs Time on %s" %
                  (algorithm_name, func_name))
        time_ax.set_xlabel("Number of Dimensions")
        time_ax.set_ylabel("Time (seconds)")
        time_ax.plot(num_dims, times, 'g-')
        # accuracy plot
        acc_fig = plt.figure()
        acc_ax = acc_fig.add_subplot(111)
        plt.title("%s Number of Dimensions vs Accuracy on %s" %
                  (algorithm_name, func_name))
        acc_ax.set_xlabel("Number of Dimensions")
        acc_ax.set_ylabel("Objective Function Value")
        acc_ax.plot(num_dims, accuracy, 'g-')

        plt.show()

    return (times, accuracy)
Пример #2
0
def cmp_num_parts_vs_time(o_algorithm1, o_algorithm2, \
                      settings1, settings2, o_function, num_particles):
    func_name = o_function.func_globals['__name__'].replace('_', ' ').title()
    times1 = []
    times2 = []
    accuracy1 = []
    accuracy2 = []

    optimize_settings(settings1)
    optimize_settings(settings2)

    for test in num_particles:

        t, acc = num_parts_vs_time(o_algorithm1, settings1, o_function, [test],
                                   False)
        times1.append(t)
        accuracy1.append(acc)

        t, acc = num_parts_vs_time(o_algorithm2, settings2, o_function, [test],
                                   False)
        times2.append(t)
        accuracy2.append(acc)

    # timing plot
    time_fig = plt.figure()
    time_ax = time_fig.add_subplot(111)
    time_ax.set_title("GA vs. PSO timing on %s" % func_name)
    time_ax.set_xlabel("Number of Particles")
    time_ax.set_ylabel("Time (seconds)")
    time_ax.plot(num_particles, times1, 'g-')
    time_ax.plot(num_particles, times2, 'r-')
    time_ax.legend(['GA', 'PSO'])
    # accuracy plot
    acc_fig = plt.figure()
    acc_ax = acc_fig.add_subplot(111)
    acc_ax.set_title("GA vs. PSO accuracy on %s" % func_name)
    acc_ax.set_xlabel("Number of Particles")
    acc_ax.set_ylabel("Objective Function Value")
    acc_ax.plot(num_particles, accuracy1, 'g-')
    acc_ax.plot(num_particles, accuracy2, 'r-')
    acc_ax.legend(['GA', 'PSO'])

    plt.ylim(0, 1)
    plt.show()

    return (times1, times2)
Пример #3
0
def num_parts_vs_time(o_algorithm,
                      settings,
                      o_function,
                      num_particles,
                      plot=False):
    algorithm_name = o_algorithm.get_name()
    func_name = o_function.func_globals['__name__'].replace('_', ' ').title()

    times = []
    accuracy = []

    optimize_settings(settings)
    settings['num_iterations'] = 100

    for test in num_particles:
        settings['population_size'] = test

        algorithm = o_algorithm(settings, o_function)

        algorithm.start_timer()
        algorithm.run()
        algorithm.stop_timer()

        times.append(algorithm.get_time())
        accuracy.append(algorithm.get_best_x().get_fval())

    if plot:
        plt.title("%s Number of Particles vs Time on %s" %
                  (algorithm_name, func_name))
        plt.xlabel("Number of Particles")
        plt.ylabel("Time (Seconds)")

        plt.plot(num_particles, times, 'r-')
        plt.show()

    return (times, accuracy)
Пример #4
0
def get_pso_two_d_accuracy(o_algorithm, o_settings, o_function, \
                           x1_start, x1_step, x1_end, \
                           x2_start, x2_step, x2_end, \
                           x1_name, x2_name, \
                           population_size=50, num_tests_per_point=10, plot=True, \
                           save_histograms=True, response_surface=True, debug=False):
    if response_surface:
        plot = True

    # turn off settings that slow us down
    o_settings = optimize_settings(o_settings)

    func_name = o_function.func_globals['__name__']
    tests = {}
    hist_num_bins = 150

    o_settings['population_size'] = population_size
    num_tests_per = num_tests_per_point
    x1_srt = x1_start
    x1_e = x1_end
    x1_stp = x1_step
    x2_srt = x2_start
    x2_e = x2_end
    x2_stp = x2_step
    num_tests = int(( (int(100*x1_e)-int(100*x1_srt))/int(100*x1_stp) + 1 )* \
                    ( (int(100*x2_e)-int(100*x2_srt))/int(100*x2_stp) + 1 ))
    X = np.ones(shape=(num_tests, 6))
    y = np.zeros(shape=(num_tests, 1))

    n = 0

    if plot:
        fig = plt.figure()
        ax1 = fig.add_subplot(111, projection='3d')
        if o_function != griewank_function.objective_function:
            ax1.set_zlim(0, 1)

    for i in np.arange(x1_srt, x1_e + x1_stp, x1_stp):
        for j in np.arange(x2_srt, x2_e + x2_stp, x2_stp):
            # set settings for this test
            o_settings[x1_name] = i
            o_settings[x2_name] = j

            # initial variables
            values = []
            euclid_distance = []
            test_name = 'cp' + '(' + str(o_settings['cp']) + ')' + ',' + \
                        'cg' + '(' + str(o_settings['cg']) + ')'

            print("Running test %s on %s" % (test_name, func_name))

            # create histogram plot if true
            if save_histograms:
                hist_fig = plt.figure()
                hist_ax = hist_fig.add_subplot(111)

            # run optimization algorithm
            for k in range(0, num_tests_per):
                algorithm = o_algorithm(o_settings, o_function)
                algorithm.run()
                # save enf values
                values.append(algorithm.get_best_x().get_fval())
                # euclidean distance
                squares = 0
                for pos in algorithm.get_best_x().pos:
                    if o_function == rosenbrock_function.objective_function:
                        squares += (pos - 1)**2
                    elif o_function == easom_function.objective_function:
                        squares += (pos - np.pi)**2
                    else:
                        squares += pos**2
                euclid_distance.append(np.sqrt(squares))

            # save histogram if true
            if save_histograms:
                hist_ax.hist(euclid_distance,
                             hist_num_bins,
                             range=(0, 9),
                             normed=True)
                hist_fig.savefig(test_name + '.png')
                plt.close(hist_fig)

            # find average and save data
            #avg = sum(values)/len(values)
            #avg = median(values)
            #avg = sum(euclid_distance)/len(euclid_distance)
            avg = np.median(euclid_distance)
            tests[test_name] = avg
            if plot:
                ax1.scatter(i, j, avg)

            X[n][1] = i
            X[n][2] = j
            X[n][3] = i * i
            X[n][4] = i * j
            X[n][5] = j * j
            y[n] = avg

            # increment test number
            n += 1

#    fname = gen_filename(x1_name, x2_name, func_name)
#    write_xy_data(X, y, fname)

    if debug:
        print("\n*** DATA ***")
        print("X")
        print(X)
        print("\ny")
        print(y)
        print("\ntests")
        print(tests)

    if response_surface:
        # get regression coefficients
        b = regression_utils.get_regression_coef(X, y)

        pltx = np.arange(x1_start, x1_end + x1_step, x1_step)
        plty = np.arange(x2_start, x2_end + x2_step, x2_step)
        pltX, pltY = np.meshgrid(pltx, plty)
        F = b[0] + b[1] * pltX + b[2] * pltY + b[3] * pltX * pltX + b[
            4] * pltX * pltY + b[5] * pltY * pltY
        ax1.plot_wireframe(pltX, pltY, F)

    if plot:
        print("\nPlotting ...")
        x1_name = x1_name[0].upper() + x1_name[1:]
        x2_name = x2_name[0].upper() + x2_name[1:]
        ax1.set_xlabel(x1_name)
        ax1.set_ylabel(x2_name)
        ax1.set_zlabel('Average Euclidean Distance from Global Minimum')
        plt.show()

    return (X, y)
Пример #5
0
def get_3d_accuracy(o_algorithm, o_settings, o_function, \
                         x1_info, x2_info, x3_info, \
                         population_size=50, num_tests_per_point=10, \
                         save_histograms=True, debug=False):
    # turn off settings that slow us down
    o_settings = optimize_settings(o_settings)

    func_name = o_function.func_globals['__name__']
    tests = {}
    hist_num_bins = 150

    o_settings['population_size'] = population_size
    num_tests_per = num_tests_per_point

    x1_name = x1_info[NAME]
    x2_name = x2_info[NAME]
    x3_name = x3_info[NAME]
    x1_s = x1_info[START]
    x1_e = x1_info[END]
    x1_stp = x1_info[STEP]
    x2_s = x2_info[START]
    x2_e = x2_info[END]
    x2_stp = x2_info[STEP]
    x3_s = x3_info[START]
    x3_e = x3_info[END]
    x3_stp = x3_info[STEP]

    num_tests = int(( (int(100*x1_e)-int(100*x1_s))/int(100*x1_stp) + 1 )* \
                    ( (int(100*x2_e)-int(100*x2_s))/int(100*x2_stp) + 1 )* \
                    ( (int(100*x3_e)-int(100*x3_s))/int(100*x3_stp) + 1 ) )
    X = np.ones(shape=(num_tests, 10))
    y = np.zeros(shape=(num_tests, 1))

    n = 0

    for i in np.arange(x1_s, x1_e + x1_stp, x1_stp):
        for j in np.arange(x2_s, x2_e + x2_stp, x2_stp):
            for k in np.arange(x3_s, x3_e + x3_stp, x3_stp):
                # set settings for this test
                o_settings[x1_name] = i
                o_settings[x2_name] = j
                o_settings[x3_name] = k

                # initial variables
                values = []
                euclid_distance = []
                test_name = x1_name + '(' + str(i) + ')' + ',' + \
                            x2_name + '(' + str(j) + ')' + ',' + \
                            x3_name + '(' + str(k) + ')'

                print("Running test %s" % test_name)

                # create histogram plot if true
                if save_histograms:
                    hist_fig = plt.figure()
                    hist_ax = hist_fig.add_subplot(111)

                # run optimization algorithm
                for t in range(0, num_tests_per):
                    algorithm = o_algorithm(o_settings, o_function)
                    algorithm.run()
                    # save enf values
                    values.append(algorithm.get_best_x().get_fval())
                    # euclidean distance
                    squares = 0
                    for pos in algorithm.get_best_x().pos:
                        if o_function == rosenbrock_function.objective_function:
                            squares += (pos - 1)**2
                        elif o_function == easom_function.objective_function:
                            squares += (pos - np.pi)**2
                        else:
                            squares += pos**2
                    euclid_distance.append(np.sqrt(squares))

                # save histogram if true
                if save_histograms:
                    hist_ax.hist(values,
                                 hist_num_bins,
                                 range=(0, 1.5),
                                 normed=True)
                    hist_fig.savefig(test_name + '.png')
                    plt.close(hist_fig)

                # find average and save data
                #avg = sum(values)/len(values)
                #avg = median(values)
                #avg = sum(euclid_distance)/len(euclid_distance)
                avg = np.median(euclid_distance)
                tests[test_name] = avg

                X[n][1] = i
                X[n][2] = j
                X[n][3] = k
                X[n][4] = i * i
                X[n][5] = i * j
                X[n][6] = i * k
                X[n][7] = j * j
                X[n][8] = j * k
                X[n][9] = k * k
                y[n] = avg

                # increment test number
                n += 1

    fname = gen_filename(x1_name, x2_name, func_name)
    write_xy_data(X, y, 'ga_3d_data.dat')

    if debug:
        print("\n*** DATA ***")
        print("X")
        print(X)
        print("\ny")
        print(y)
        print("\ntests")
        print(tests)

    return (X, y)
Пример #6
0
def cmp_func_val_over_iterations(o_algorithm, settings, o_function):
    x1_start = 0.3
    x1_step = 0.1
    x1_end = 0.6
    x2_start = 0.25
    x2_step = 0.25
    x2_end = 0.75

    x1_name = "selection_cutoff"
    x2_name = "mutation_rate"
    population_size = [50]

    optimize_settings(settings)

    fig, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)

    tests1 = []
    tests2 = []
    tests3 = []

    for test in population_size:
        for i, x1 in enumerate(np.arange(x1_start, x1_end + x1_step, x1_step)):
            for j, x2 in enumerate(
                    np.arange(x2_start, x2_end + x2_step, x2_step)):
                settings[x1_name] = x1
                settings[x2_name] = x2
                f = []

                settings['population_size'] = test

                algorithm = o_algorithm(settings, o_function)

                while settings['num_iterations'] > algorithm.num_generations:
                    f.append(algorithm.get_best_f())
                    algorithm.do_loop()

                if j == 0:
                    tests1.append(
                        "Selection Cutoff %4.2f Mutation Rate %4.2f" %
                        (x1, x2))
                    ax1.plot(range(1, len(f) + 1), f)
                elif j == 1:
                    tests2.append(
                        "Selection Cutoff %4.2f Mutation Rate %4.2f" %
                        (x1, x2))
                    ax2.plot(range(1, len(f) + 1), f)
                elif j == 2:
                    tests3.append(
                        "Selection Cutoff %4.2f Mutation Rate %4.2f" %
                        (x1, x2))
                    ax3.plot(range(1, len(f) + 1), f)

    ax1.legend(tests1)
    ax2.legend(tests2)
    ax3.legend(tests3)
    ax1.set_title(
        'GA Comparison of Selection Cutoff & Mutation Rate on Ackley Function (50 particles)'
    )
    ax1.set_xlabel('Number of Iterations')
    ax2.set_xlabel('Number of Iterations')
    ax3.set_xlabel('Number of Iterations')
    ax1.set_ylabel('Objective Function Value')
    ax2.set_ylabel('Objective Function Value')
    ax3.set_ylabel('Objective Function Value')
    #ax2.ylabel('Objective Function Value')
    #ax3.ylabel('Objective Function Value')
    #plt.legend(tests)
    plt.show()
Пример #7
0
def cmp_func_val_over_iterations(o_algorithm, settings, o_function):
    x1_start = 0.25
    x1_step = 0.25
    x1_end = 0.75
    x2_start = 0.25
    x2_step = 0.25
    x2_end = 0.75
    x1_name = "cp"
    x2_name = "cg"
    varient = 'normal'
    population_size = [50]

    optimize_settings(settings)

    fig, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)

    settings['velocity_type'] = varient

    tests1 = []
    tests2 = []
    tests3 = []

    for test in population_size:
        for i, x1 in enumerate(np.arange(x1_start, x1_end+x1_step, x1_step)):
            for j, x2 in enumerate(np.arange(x2_start, x2_end+x2_step, x2_step)):
                settings[x1_name] = x1
                settings[x2_name] = x2
                f = []

                settings['population_size'] = test

                algorithm = o_algorithm(settings, o_function)

                while settings['num_iterations'] > algorithm.num_iterations:
                    f.append(algorithm.get_best_f())
                    algorithm.do_loop()

                if j == 0:
                    tests1.append("Cp %4.2f Cg %4.2f" % (x1, x2))
                    ax1.plot(range(1,len(f)+1), f)
                elif j == 1:
                    tests2.append("Cp %4.2f Cg %4.2f" % (x1, x2))
                    ax2.plot(range(1,len(f)+1), f)
                elif j == 2:
                    tests3.append("Cp %4.2f Cg %4.2f" % (x1, x2))
                    ax3.plot(range(1,len(f)+1), f)

    ax1.legend(tests1)
    ax2.legend(tests2)
    ax3.legend(tests3)
    varient = varient[0].upper() + varient[1:]
    ax1.set_title(varient + ' PSO Comparison of Cp & Cg on Easom Function (50 particles)')
    ax1.set_xlabel('Number of Iterations')
    ax2.set_xlabel('Number of Iterations')
    ax3.set_xlabel('Number of Iterations')
    ax1.set_ylabel('Objective Function Value')
    ax2.set_ylabel('Objective Function Value')
    ax3.set_ylabel('Objective Function Value')
    #ax2.ylabel('Objective Function Value')
    #ax3.ylabel('Objective Function Value')
    #plt.legend(tests)
    plt.show()