Beispiel #1
0
  def __init__(self):
    observations = [get_random_state().data.reshape(-1) for _ in range(10000)]
    self.scaler = sklearn.preprocessing.StandardScaler()
    self.scaler.fit(observations)

    self.featurizer = sklearn.pipeline.FeatureUnion([
      ("rbf1", RBFSampler(gamma=5.0, n_components=150)),
      ("rbf2", RBFSampler(gamma=2.0, n_components=150)),
      ("rbf3", RBFSampler(gamma=1.0, n_components=150)),
      ("rbf4", RBFSampler(gamma=0.5, n_components=150))
    ], n_jobs=1)
    # self.featurizer.fit(observations)
    self.featurizer.fit(self.scaler.transform(observations))

    self.models = dict()
    for i in range(BOARD_ROWS):
        for j in range(BOARD_COLS):
            model = SGDRegressor(learning_rate="constant")
            model.partial_fit([self.featurize_state(State())], [0])
            self.models[(i,j)] = model
Beispiel #2
0
def login():
    """
    View function to render the default page to the user.
    Enables the user to login using Google or Facebook OAuth provider.
    :return:
    """
    state = utils.get_random_state()
    login_session['state'] = state

    # Get all categories and their item count
    all_categories = _get_categories()
    # Get most recently added items
    latest_items = _get_latest_category_items()
    return render_template('index.html',
                           GOOGLE_CLIENT_ID=GOOGLE_CLIENT_ID,
                           FB_APP_ID=FB_APP_ID,
                           STATE=login_session['state'],
                           LOGIN_SESSION=login_session,
                           ACTIVE_CATEGORY=LATEST_CATEGORY,
                           ALL_CATEGORIES=all_categories,
                           CATEGORY_ITEMS=latest_items)
    n_images = 25
    plt.figure(figsize=(10, 10))
    for i in range(n_images):
        plt.subplot(5, 5, i + 1)
        plt.xticks([])
        plt.yticks([])
        plt.grid(False)
        plt.imshow(digits.images[i], cmap=plt.cm.binary)
        plt.xlabel("Value: %d" % digits.target_names[digits.target[i]],
                   fontsize=12)
    plt.suptitle('Example of the training data', fontsize=30)
    plt.show()

# setup random state
seed = 687
random_state = uls.get_random_state(seed)

# split data
X_train, X_test, y_train, y_test = train_test_split(flat_images,
                                                    digits.target,
                                                    test_size=0.33,
                                                    random_state=random_state)

# ++++++++++++++++++++++++++
# THE ANN
# restrictions:
# - 2 h.l. with sigmoid a.f.
# - softmax a.f. at output
# - 20% for validation
# ++++++++++++++++++++++++++
# ann's ingridients
Beispiel #4
0
from utils import get_random_state
from utils import compare

if __name__ == "__main__":

    # (Question 1) dt.py: Decision tree
    SAMPLE_NUMBER = 2000
    TRAIN_SET_SAMPLE_NUM = 150

    X, y = get_dataset(SAMPLE_NUMBER)

    X_train, y_train = X[:TRAIN_SET_SAMPLE_NUM], y[:TRAIN_SET_SAMPLE_NUM]
    X_test, y_test = X[TRAIN_SET_SAMPLE_NUM:], y[TRAIN_SET_SAMPLE_NUM:]

    # 1.
    decisionTreeClassifier = DecisionTreeClassifier(random_state=get_random_state())
    decisionTreeClassifier.fit(X_train, y_train)
    y_dtc = decisionTreeClassifier.predict(X_test)

    # Plot
    plot_boundary("1-1-Ground-Truth", decisionTreeClassifier, X_test, y_test, title="Ground Truth data")
    plot_boundary("1-1-Prediction", decisionTreeClassifier, X_test, y_dtc, title="Prediction data")

    # 2.
    max_depths = [i for i in range(1, 20)]
    training_scores = []
    for max_depth in max_depths:
        decisionTreeClassifier = DecisionTreeClassifier(random_state=get_random_state(), max_depth=max_depth)
        decisionTreeClassifier.fit(X_train, y_train)
        y_dtc = decisionTreeClassifier.predict(X_test)
Beispiel #5
0
# -*- coding: utf-8 -*-
"""
Created on Fri Nov  6 14:07:50 2015

"""

import numpy as np
from matplotlib import pyplot as plt
from utils import get_random_state
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import Ridge



random_state = get_random_state()

'''
This function is used to create a sample of data thanks
to the given function

Input : - x : The input value
        - n_samples : number of samples made with this input
        
Output : - y : The data
'''
def make_data(x,n_samples = 1, noise = 1):

    y = np.zeros(n_samples)

    for i in range(n_samples):
def main():

    idw = arcpy.env.workspace + "\\Idw_Projected_30"
    noise_map = arcpy.env.workspace + "\\Transportation_Noise"
    line_for_initialization = arcpy.env.workspace + "\\example_route"
    geofences_restricted_airspace = arcpy.env.workspace + "\\Restricted_Airspace"
    geofence_point_boundary = arcpy.env.workspace + "\\Restricted_Airspace_Point_Boundary"
    output_new_line = r'threedline'
    #set up flight constraints
    # legal constraints parameters
    maximum_speed_legal = 27.7777777778  # in m/s. 100 in km/h
    # air taxi specific parameters

    #for Lilium Jet flight characteristics: aircraft = "Lilium". for Ehang aircraft ="EHANG"
    aircraft = "Lilium"
    type_aircraft, weight_aircraft, wing_area, CD_from_drag_polar, maximum_speed_air_taxi, acceleration_speed, acceleration_energy, deceleration_speed, deceleration_energy, minimal_cruise_energy, take_off_and_landing_energy, hover_energy, noise_pressure_acceleration, noise_pressure_deceleration, noise_at_cruise, noise_at_hover = init.aircraft_specs(
        aircraft)

    # flight comfort constraint
    maximum_angular_speed = 1  # (in radian/second)
    #environmental depending settings
    air_density = 1.225  #(in kg/m³) standard air pressure
    speed_of_sound = 343  # in m/s, at 20 Degrees Celsius
    gravity = 9.81  # in m/s²

    flight_constraints = flight_Constraints(
        type_aircraft, weight_aircraft, wing_area, CD_from_drag_polar,
        maximum_speed_legal, maximum_speed_air_taxi, acceleration_speed,
        acceleration_energy, deceleration_speed, deceleration_energy,
        minimal_cruise_energy, take_off_and_landing_energy, hover_energy,
        noise_pressure_acceleration, noise_pressure_deceleration,
        noise_at_cruise, noise_at_hover, maximum_angular_speed, air_density,
        speed_of_sound, gravity)

    #if feature classes from previous runs shall not be deleted, uncomment
    #uls.delete_old_objects_from_gdb("threed")

    # setup problem
    hypercube = [(-5, 5), (-5, 5)]
    rs = uls.get_random_state(1)
    problem_instance = ThreeDSpace(
        search_space=hypercube,
        fitness_function=uls.multi_objective_NSGA_fitness_evaluation(),
        IDW=idw,
        noisemap=noise_map,
        x_y_limits=900,
        z_sigma=5,
        work_space=env.workspace,
        random_state=rs,
        init_network=line_for_initialization,
        sample_point_distance="400 Meters",
        restricted_airspace=geofences_restricted_airspace,
        flight_constraints=flight_constraints,
        geofence_point_boundary=geofence_point_boundary)
    #evalluate least cost path
    # from solutions import solution
    # import utils
    #
    # linefc = "least_cost_path_3d"
    # pointfc = "least_cost_path_3d_p"
    # zpointfc = "least_cost_path_3d_p_z"
    # #utils.lineToPoints(linefc, pointfc, 10)
    # #utils.extractValuesRaster(pointfc, idw, zpointfc)
    #
    # repr = utils.point3d_fc_to_np_array(zpointfc, additional_fields = None)
    #
    # least_cost_path_solution = solution.Solution(repr)
    # least_cost_path_solution.PointFCName = pointfc
    # least_cost_path_solution.LineFCName = linefc
    # problem_instance.evaluate(least_cost_path_solution)

    # setup Genetic Algorithm
    p_c = 0.9
    p_m = 0.5
    n_iterations = 25
    population_size = 16
    n_crossover_points = 4
    selection_pressure = 0.4
    #params mutation
    percentage_disturbed_chromosomes = 0.2
    max_disturbance_distance = 200
    percentage_inserted_and_deleted_chromosomes = 0.25
    mutation_group_size = 3

    for seed in range(1):
        # setup random state
        random_state = uls.get_random_state(seed)
        # execute Genetic Algorithm
        ga1 = GeneticAlgorithm(
            problem_instance=problem_instance,
            random_state=random_state,
            population_size=population_size,
            selection=uls.nsga_parametrized_tournament_selection(
                selection_pressure),
            crossover=uls.n_point_crossover(n_crossover_points),
            p_c=p_c,
            mutation=uls.parametrized_point_mutation(
                percentage_disturbed_chromosomes=
                percentage_disturbed_chromosomes,
                max_disturbance_distance=max_disturbance_distance,
                percentage_inserted_and_deleted_chromosomes=
                percentage_inserted_and_deleted_chromosomes,
                group_size=mutation_group_size),
            p_m=p_m,
            aimed_point_amount_factor=2)
        ga1.initialize()
        #setting up the logging
        t = strftime("%Y-%m-%d_%H_%M_%S", gmtime())
        lgr = logging.getLogger(t)
        lgr.setLevel(logging.DEBUG)  # log all escalated at and above DEBUG
        # add a file handler
        fh = logging.FileHandler(r'baseline_routing/log_files/' + t +
                                 '_new.csv')
        fh.setLevel(logging.DEBUG)
        frmt = logging.Formatter(
            '%(asctime)s,%(name)s,%(levelname)s,%(message)s')
        fh.setFormatter(frmt)
        lgr.addHandler(fh)
        log_event = [
            "rs",
            id(seed), __name__, "population_size", population_size,
            "x_y_limits", problem_instance.x_y_limits, "z_sigma",
            problem_instance.z_sigma, "sample_point_distance",
            problem_instance.sample_point_distance, "selection_pressure",
            selection_pressure, "pc", p_c, "pm", p_m, "aimed_point_factor",
            ga1.aimed_point_amount_factor, "n_crossover", n_crossover_points,
            "mutation_max_disturbance_distance", max_disturbance_distance,
            "mutation_group_size", mutation_group_size,
            "percentage_inserted_and_deleted",
            percentage_inserted_and_deleted_chromosomes,
            "percentage_disturbed", percentage_disturbed_chromosomes
        ]
        lgr.info(','.join(list(map(str, log_event))))
        ga1.search(n_iterations=n_iterations,
                   lgr=lgr,
                   report=True,
                   log=True,
                   dplot=None)
Beispiel #7
0
import os
import datetime
import logging
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import utils as uls
from problems.ANNOP import ANNOP
from ANN.ANN import ANN, softmax, sigmoid
from algorithms.genetic_algorithm_Elitism import GeneticAlgorithm
# setup logger
file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "LogFiles/" + (str(datetime.datetime.now().date()) + "-" + str(datetime.datetime.now().hour) + \
            "_" + str(datetime.datetime.now().minute) + "_log.csv"))
logging.basicConfig(filename=file_path,
                    level=logging.DEBUG,
                    format='%(name)s,%(message)s')
#++++++++++++++++++++++++++
# THE DATA
# restrictions:
# - MNIST digits (8*8)
# - 33% for testing
# - flattened input images
#++++++++++++++++++++++++++
# import data
digits = datasets.load_digits()
flat_images = np.array([image.flatten() for image in digits.images])
print(flat_images.shape)
print(digits.target_names)

######################### Script

DICTIONARY_FILE = '../WataProject/sentiment-dict.txt'
TWEETS_LABELLED_FILE = '../WataProject/training_data_file.csv'

N_FOLDS = 10

words_from_dict = read_dictionary(DICTIONARY_FILE)
tweets, classes = read_tweets_labelled(TWEETS_LABELLED_FILE)

vectorizer = CountVectorizer(min_df=1, vocabulary=set(words_from_dict).union(words_from_tweets(tweets)), lowercase=True)

# Select classifier
clf = svm.LinearSVC(random_state=get_random_state())
parameters = {'C': np.arange(0.01, 1.05, 0.05), 'loss': ['hinge', 'squared_hinge']}

# clf = tree.DecisionTreeClassifier(random_state=get_random_state())
# parameters = {'max_depth': np.arange(1, 2000, 50)}

# clf = svm.SVC(random_state=get_random_state())
# parameters = {'C': np.arange(0.1, 1.1, 0.1), 'kernel': ['poly', 'rbf'], 'gamma': np.arange(0.00005, 0.00006, 0.000001)}

# clf = ensemble.ExtraTreesClassifier(random_state=get_random_state())
# parameters = {'criterion': ['gini']}


new_tweets = ["New episode is awesome #GoTSeason6", "Best show ever #GoTSeason6 #ValarMorghulis", "Not that bad #GoTSeason6", "So disappointed, so slow #GoTSeason6", "Stopping watching the show, nothing like the books #GoTSeason6"]

############ Model evaluation