Example #1
0
def main():
    import argparse
    parser = argparse.ArgumentParser(description='Train it :3')
    parser.add_argument('--ckpt-path', help='relative path to checkpoint to saved model', default=None)
    args = parser.parse_args()

    # Leaving everything in a single function for easy later CLI argument parsing
    experiment_params = dict(
        exp_name='32-SimpleModel-gainx10-longrun',
        # env_name='Pendulum-v0',
        # env_name='DroneZero-v0',
        # env_name='PendrogoneFlagrun-v0',
        env_name='WindTurbine-v2',
        num_iterations=600,
        sample_horizon=8192,
        # Learning hyperparameters
        epochs=10, batch_size=128, learning_rate=1e-4,
        # GAE params
        gamma=0.95, lam=0.95,
        # PPO specific hyperparameter, not gonna change this :v
        epsilon=0.2,
        ckpt_path=args.ckpt_path
    )
    exp_dir = experiment(n_experiments=4, **experiment_params)
    data = plotter.get_datasets(exp_dir)
    plotter.plot_data(data, os.path.join(exp_dir, 'plot4this.png'))
Example #2
0
def main():
    # Leaving everything in a single function for easy later CLI argument parsing
    experiment_params = dict(
        exp_name='06-exponential',
        # env_name='Pendulum-v0',
        # env_name='DroneZero-v0',
        env_name='PendrogoneZero-v0',
        num_iterations=350,
        sample_horizon=2048,
        # Learning hyperparameters
        epochs=10, batch_size=64, learning_rate=1e-4,
        # GAE params
        gamma=0.99, lam=0.95,
        # PPO specific hyperparameter, not gonna change this :v
        epsilon=0.2,
    )
    exp_dir = experiment(n_experiments=4, **experiment_params)
    data = plotter.get_datasets(exp_dir)
    plotter.plot_data(data, os.path.join(exp_dir, 'plot4this.png'))
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from exam import hours_studied, passed_exam
from plotter import plot_data

# Create logistic regression model
model = LogisticRegression()
model.fit(hours_studied,passed_exam)

# Plug sample data into fitted model
sample_x = np.linspace(-16.65, 33.35, 300).reshape(-1,1)
probability = model.predict_proba(sample_x)[:,1]

# Function to plot exam data and logistic regression curve
plot_data(model)

# Show the plot
plt.show()

# Lowest and highest probabilities
lowest = 0

highest = 1


'''
LOGISTIC REGRESSION
Log-Odds
In Linear Regression we multiply the coefficients of our features by their respective feature values and add the intercept, resulting in our prediction, which can range from -∞ to +∞. In Logistic Regression, we make the same multiplication of feature coefficients and feature values and add the intercept, but instead of the prediction, we get what is called the log-odds.
Example #4
0
        if new_path_distance < old_path_distance:
            path = new_path
            if new_path_distance < min_distance:
                min_distance = new_path_distance
        elif math.exp(-(new_path_distance - old_path_distance) /
                      temperature) > rand.uniform(0, 1):
            path = new_path

        temperatures.append(temperature)
        temperature *= decay_rate

    distances_plot_data = (copy.copy(iters), dists)
    temperatures_plot_data = (iters, temperatures)
    return first_path, best_path, distances_plot_data, temperatures_plot_data


n = 200
iterations = 500000
temperature = 1000
decay_rate = 0.99995
swap_type = "consecutive"
low = 0
high = 1
distribution = "uniform"

first_path, best_path, distances_plot_data, temperatures_plot_data = travelling_salesman_problem(
    n, iterations, temperature, decay_rate, swap_type, low, high, distribution)

plotter.plot_data(first_path, best_path, distances_plot_data,
                  temperatures_plot_data)