Example #1
0
File: op2.py Project: joshcc3/PyOP2
def init(**kwargs):
    """Initialise OP2: select the backend and potentially other configuration
    options.

    :arg backend: Set the hardware-specific backend. Current choices
     are ``"sequential"``, ``"openmp"``, ``"opencl"`` and ``"cuda"``.
    :arg debug: The level of debugging output.
    :arg comm: The MPI communicator to use for parallel communication,
               defaults to `MPI_COMM_WORLD`

    .. note::
       Calling ``init`` again with a different backend raises an exception.
       Changing the backend is not possible. Calling ``init`` again with the
       same backend or not specifying a backend will update the configuration.
       Calling ``init`` after ``exit`` has been called is an error and will
       raise an exception.
    """
    backend = backends.get_backend()
    if backend == 'pyop2.finalised':
        raise RuntimeError("Calling init() after exit() is illegal.")
    if 'backend' in kwargs and backend not in ('pyop2.void', 'pyop2.' + kwargs['backend']):
        raise RuntimeError("Changing the backend is not possible once set.")
    cfg.configure(**kwargs)
    set_log_level(cfg['log_level'])
    if backend == 'pyop2.void':
        backends.set_backend(cfg.backend)
        backends._BackendSelector._backend._setup()
        if 'comm' in kwargs:
            backends._BackendSelector._backend.MPI.comm = kwargs['comm']
        global MPI
        MPI = backends._BackendSelector._backend.MPI  # noqa: backend override
Example #2
0
def save_reports(filenames):
    import csv
    
    import persistence as p
    from configuration import configure
    
    #open DB connection
    config = configure()
    conn = p.open_connection(config)
    
    cursor = conn.cursor()
        
    #iterate through the reports
    for f in filenames:
        print f
        reader = csv.DictReader(open(f), delimiter='\t')
        for row in reader:
            print row     
                 
            myrow = {'sku': row['SKU'],
                   'end_date': row['End Date'],
                   'customer_currency': row['Customer Currency'],
                   'begin_date' : row['Begin Date'],
                   'developer_proceeds' : row['Developer Proceeds'],
                   'promo_code': row['Promo Code'],
                   'title': row['Title'],
                   'parent_identifier': row['Parent Identifier'],
                   'customer_price': row['Customer Price'],
                   'period': row['Period'],
                   'currency_of_proceeds':row['Currency of Proceeds'],
                   'type_identifier': row['Product Type Identifier'],
                   'provider_country'   : row['Provider Country'],
                   'country_code': row['Country Code'],
                   'apple_identifier': row['Apple Identifier'],
                   'version' : row['Version'],
                   'provider' : row['Provider'],
                   'units' : row['Units'],
                   'subscription' : row['Subscription'],
                   'developer' : row['Developer'],
                   'publisher_name': get_publisher_name(row['Title']),
                   'publisher_id': get_publisher_id(row['Title'])}
            
            
            p.insert(cursor, 'itunes', myrow.keys(), myrow)
            conn.commit()
            
    cursor.close()
import numpy as np
from itertools import cycle
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from scipy import interp
from sklearn.model_selection import train_test_split
from configuration import configure
from matplotlib import pyplot as plt
from evalMetrics import evalMetric
from sklearn.multiclass import OneVsRestClassifier

# importing the dataset and pre-procesing it

datasetName = 'winequality-white.csv'

cnfg = configure(datasetName)

dataset = cnfg.binaryClassConversion('quality', 3)

X = dataset[[
    'fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar',
    'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density',
    'pH', 'sulphates', 'alcohol'
]]
y = dataset['taste']
y = label_binarize(y, classes=[-1, 0, 1])
n_classes = y.shape[1]
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.3,
                                                    random_state=0)
Example #4
0
def phase2_main():
    q0, qh, map_size, obstacles, num_obs = [], [], [], [], 0
    manual_input = int(
        input(
            "Would you like to use an input file? (Choose 0) or manual input mode? (Choose 1): "
        ))
    while manual_input != 0 and manual_input != 1:
        print("Invalid input,  please try again...")
        manual_input = int(
            input(
                "Would you like to use an input file? (Choose 0) or manual input mode? (Choose 1): "
            ))

    if not manual_input:
        filepath = input("Please enter your file name ('sample.txt'): ")
        map_sz, obstacles = readInputFile(filepath)
        q0 = [0, 0, 0]
        qh = [map_sz[0], map_sz[1], map_sz[2]]
        map_size = [q0[0], q0[1], q0[2], qh[0], qh[1], qh[2]]
        num_obs = len(obstacles)
    elif manual_input:
        map_size, obstacles, q0, qh = user_input()
        num_obs = len(obstacles)
    # takeoff_h = math.ceil(map_size[2]/4)

    algorithm = int(
        input(
            "Would you like to use an A-star algorithm? (Choose 0) or Rapidly Exploring Random Tree Star(aka RRT*) ? (Choose 1): "
        ))
    tStart = time.time()

    print("Map Size (x, y, z, w, l, h):")
    print(map_size)
    print("obstacles (x, y, z, w, l, h):")
    print(obstacles)
    print("Initial Position (q0):")
    print(q0)
    print("Final Position (qh)")
    print(qh)

    # Normalizing for the quadrotor size ------------------------------------------------------------------------------
    norm_quad = 0.029
    for i in range(len(map_size)):
        map_size[i] = math.ceil(map_size[i] /
                                norm_quad)  # normalize for robot size
    for i in range(len(obstacles)):
        for j in range(len(obstacles[i])):
            obstacles[i][j] = math.ceil(obstacles[i][j] /
                                        norm_quad)  # normalize for robot size
    qh[0], qh[1], qh[2] = math.ceil(qh[0] / norm_quad), math.ceil(
        qh[1] / norm_quad), math.ceil(qh[2] / norm_quad)

    takeoff = []
    takeoff.append([0, 0, 0])

    q0 = [q0[0], q0[1], q0[0] + int(map_size[5] / 5)]
    takeoff.append(q0)

    # -----------------------------------------------------------------------------------------------------------------
    if (algorithm == 0):
        # Solve for CSpace and find a feasible path -----------------------------------------------------------------------
        config_space, cTime = configure(map_size, obstacles)
        print("CONFIG DONE")
        print("Mapping A-Star...")
        unsamp_waypoints, astar_time = motion_planner(
            q0, qh, config_space)  # A* Algorithm
        # waypoints = motion(q0,  qh,  map_size,  obstacles)   # Probability Tree
        print("length")
        print(len(unsamp_waypoints))

        sample_rate = 10  # Sample waypoints to smooth path
        way_len = math.ceil(len(unsamp_waypoints) / sample_rate)
        waypoints = [[0, 0, 0] for i in range(way_len)]
        count = 0
        for i in range(len(unsamp_waypoints)):
            if i % sample_rate == 0:
                waypoints[count] = unsamp_waypoints[i]
                count += 1
        waypoints.append([qh[0], qh[1], qh[2]])

    else:
        print("Mapping RRT-Star...")
        rrt_time, cTime, waypoints = motion(q0, qh, map_size, obstacles)
        waypoints = waypoints.tolist()

    # -----------------------------------------------------------------------------------------------------------------

    poly = path3D(waypoints, norm_quad, False, False)
    tFinal = time.time()
    totalTime = tFinal - tStart
    land = []
    land.append(waypoints[len(waypoints) - 1])  #land copter at the end of path
    land.append([
        waypoints[len(waypoints) - 1][0], waypoints[len(waypoints) - 1][1], 0
    ])

    takeoff_points = path3D(takeoff, norm_quad, True, False)
    landed = path3D(land, norm_quad, False, True)
    poly_takeoff = takeoff_points.copy()

    takeoff_points.extend(waypoints)
    waypoints = takeoff_points.copy()
    waypoints.extend(landed)

    poly_takeoff.extend(poly)
    poly = poly_takeoff.copy()
    poly.extend(landed)

    # Times for each portion of code
    if (algorithm == 0):
        print("C_space Time: %.4f" % cTime)
        print("Algorithm Time: %.4f" % astar_time)

    else:
        print("C_space Time: %.4f" % cTime)
        print("Algorithm Time: %.4f" % rrt_time)
    print("Program Time: %.4f" % totalTime)

    # BEGIN PLOTTING --------------------------------------------------------------------------------------------------

    fig = plt.figure()
    ax = fig.gca(projection='3d')
    mx = [map_size[0], map_size[0] + map_size[3]]
    my = [map_size[1], map_size[1] + map_size[4]]
    mz = [map_size[2], map_size[2] + map_size[5]]
    drawRect(ax, mx, my, mz, 'cyan', 'b', 0.08)

    # Plotting Obstacles
    for i in range(num_obs):
        ob_x = [obstacles[i][0], obstacles[i][3] + obstacles[i][0]]
        ob_y = [obstacles[i][1], obstacles[i][4] + obstacles[i][1]]
        ob_z = [obstacles[i][2], obstacles[i][5] + obstacles[i][2]]
        drawRect(ax, ob_x, ob_y, ob_z, 'maroon', 'r', 0.5)

    # Plotting Chosen Path
    wx = np.asanyarray([0 for i in range(len(waypoints))], dtype=float)
    wy = np.asanyarray([0 for i in range(len(waypoints))], dtype=float)
    wz = np.asanyarray([0 for i in range(len(waypoints))], dtype=float)
    cx = np.asanyarray([0 for i in range(len(poly))], dtype=float)
    cy = np.asanyarray([0 for i in range(len(poly))], dtype=float)
    cz = np.asanyarray([0 for i in range(len(poly))], dtype=float)
    for i in range(len(waypoints)):
        wx[i] = waypoints[i][0]
        wy[i] = waypoints[i][1]
        wz[i] = waypoints[i][2]
        # ax.scatter3D(wx[i], wy[i], wz[i], c='b')
    ax.plot3D(wx, wy, wz, 'black', label='waypoints ')
    for i in range(len(poly)):
        cx[i] = poly[i][0]
        cy[i] = poly[i][1]
        cz[i] = poly[i][2]
    ax.plot3D(cx, cy, cz, 'green', label='quadrotor path')
    #ax.plot3D(dx, dy, dz, c='black',label='desired')

    ax.legend()

    ax.set_xlabel('X')
    ax.set_ylabel('Y')
    ax.set_zlabel('Z')
    # Show Final Plot
    plt.show()
Example #5
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from pkl import load_training
from pkl import load_testing
from pkl import write_predictions
import numpy as np

from CNN import cnn_model
from configuration import configure

config = configure()

#_________________________________________MAIN_________________________________________
def main(unused_argv):

	if config.runCNN["make_test_predictions"]:

		x_test = load_testing("../../data")
		models = [
		{
			"name": "final","checkpoints": ["model.ckpt-10201","model.ckpt-10301","model.ckpt-10401","model.ckpt-10500","model.ckpt-10501"]
		},
		{
			"name": "final2","checkpoints": ["model.ckpt-9601","model.ckpt-9701","model.ckpt-9801","model.ckpt-9901","model.ckpt-10000"]
		},
		{
			"name": "final3","checkpoints": ["model.ckpt-9601","model.ckpt-9701","model.ckpt-9801","model.ckpt-9901","model.ckpt-10000"]
		},
		{
Example #6
0
def main(unused_argv):

    x_train, y_train = load_training("../../data", True)
    config = configure()

    split = config.runCNN["split"]
    size = len(x_train)
    training_size = int(size - size * split)

    x_train, y_train = randomize(x_train, y_train)

    x_valid = x_train[training_size:size]
    y_valid = y_train[training_size:size]
    x_train = x_train[0:training_size]
    y_train = y_train[0:training_size]

    mnist_classifier = tf.estimator.Estimator(
        model_fn=cnn_model,
        model_dir=config.runCNN["model_dir"],
        config=tf.contrib.learn.RunConfig(save_checkpoints_steps=1000))

    if config.runCNN["training"]:

        epoch = config.runCNN["epochs"]

        for i in range(epoch):

            # Create the Estimator
            train_input_fn = tf.estimator.inputs.numpy_input_fn(
                x={"x": x_train},
                y=y_train,
                batch_size=config.runCNN["batch_size"],
                num_epochs=None,
                shuffle=True)
            mnist_classifier.train(
                input_fn=train_input_fn,
                steps=config.runCNN["steps"],
            )
            valid_input_fn = tf.estimator.inputs.numpy_input_fn(
                x={"x": x_valid}, y=y_valid, num_epochs=1, shuffle=False)

            mnist_classifier.evaluate(input_fn=valid_input_fn)

    elif config.runCNN["evaluate"]:
        valid_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": x_valid},
                                                            y=y_valid,
                                                            num_epochs=1,
                                                            shuffle=False)

        mnist_classifier.evaluate(input_fn=valid_input_fn)

    if config.runCNN["make_test_predictions"]:
        # Load test data
        x_test = load_testing("../../data")

        # Make Predictions
        predict_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": x_test},
                                                              num_epochs=1,
                                                              shuffle=False)
        predictions = list(mnist_classifier.predict(input_fn=predict_input_fn))
        predicted_classes = [p["classes"] for p in predictions]

        # Print them to a file
        write_predictions("../../output", predicted_classes)
Example #7
0
import matplotlib.pyplot as plt
from configuration import configure
import numpy as np

cfg = configure("winequality-white.csv")

ds = cfg.getdataset()

#box plot
ds.plot(kind='box',
        subplots=False,
        layout=(5, 3),
        legend=False,
        figsize=(30, 15),
        table=ds.describe())
ax1 = plt.axes()
x_axis = ax1.axes.get_xaxis()
x_axis.set_visible(False)
x_label = x_axis.get_label()
x_label.set_visible(False)
plt.show()
plt.savefig("Charts/" + "BoxwithTable" + "_plot_scores.jpg")

#correlation matrix
ds_corr = ds.corr()
fig = plt.figure(figsize=(15, 15))
ax = fig.add_subplot(111)
cax = ax.matshow(ds_corr)
fig.colorbar(cax)
ticks = np.arange(0, 12, 1)
ax.set_xticks(ticks)
Example #8
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import tensorflow as tf
from configuration import configure

config = configure().CNN


def cnn_model(features, labels, mode):

    # Input Layer
    input_layer = tf.reshape(features["x"], [-1, 64, 64, 1])

    numConvLayers = len(config["conv_layers"])

    conv = []
    pool = []
    normal = []

    for i in range(numConvLayers):
        inp = None
        layer = config["conv_layers"][i]

        if i == 0:
            inp = input_layer
        else:
            inp = normal[i - 1]
Example #9
0
import pandas as pd
from torch.utils.data import DataLoader
from data_loading import create_datasets, SeriesDataset
from configuration import configure
from TrainESRNN import TrainESRNN
from ESRNN_model import ESRNN_model
import time
from dataset import DatasetTS, data_generator

from IPython import embed

print('loading config')
config = configure('Hourly')

data = pd.read_csv('Dataset/actuals_08_16_to_08_18.csv')
 # extract ts values
labels = data.iloc[:,1].values

series = label = data.iloc[:,2].values

ind = 0
curr_label = labels[0]
ts = []
for i, label in enumerate(labels):
    if label != curr_label:
        ts.append(series[ind:i])
        ind = i
        curr_label = label

data = []
for i, series in enumerate(ts):