Exemple #1
0
def index(request):
	if request.method == 'POST':
		username = request.POST['username']
		select = request.POST['select']
		if select=='starcraft':
			id = request.POST['id']
			data = main.getData(username,select,id=id)
			return profile_view(request,username,data,select)
		if select=='wow':
			realm = request.POST['realm']
			data = main.getData(username,select,realm=realm)
			return profile_view(request,username,data,select)
		if select=='steam':
			data = main.getData(username,select)
			return profile_view(request,username,data,select)
		if select=='crysis':
			data = main.getData(username,select)
			return profile_view(request,username,data,select)
	return render_to_response("ggapi/index.html",context_instance=RequestContext(request))
def data():
    if request.method == "POST":
        global abc
        global DP, Gr, CW
        result = request.form
        abc = result
        Distress_time=result['Distress_time']
        Distress_position=result['Distress_position']
        if (Distress_position or Distress_time):
            Datum_point,Grid,Cell_Width=main.getData(result)
        if not(Distress_position and Distress_time):
            Datum_point,Datum_line,Grid,Cell_Width=main.getData(result)
        DP = Datum_point
        Gr = Grid
        CW = Cell_Width
        print(Datum_point,Grid,Cell_Width)
        return dash(result)
    if request.method == "GET":
        return render_template("dataCollect.html")
    def test_pad_list_of_lists__same_size(self):
        from main import get_rdf_data as getData
        from main import pad_list_of_lists as testMethod

        data = getData('rdfData/gfo-1.0.json')
        list = testMethod(data['supports'])

        for i in range(len(list)):
            element = list[i]
            for j in range(len(element)-1):
                self.assertEqual(len(element[j]), len(element[j+1]))
Exemple #4
0
def initialTrain():
    state = main.getData()
    main.startGame()
    # If it's the first step
    while not main.getDone():

        # Random action
        action = random.choice([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
                                [0, 0, 0, 1]])

        # Get the rewards
        reward = main.getReward(action)
        # Look if the episode is finished
        done = main.getDone()

        # If we're dead
        if done:
            # We finished the episode
            next_state = np.zeros([5])

            # Add experience to memory
            memory.add((state, action, reward, next_state, done))

            # First we need a state
            state = main.getData()

            # Stack the frames

        else:
            # Get the next state
            next_state = main.getData()

            # Add experience to memory
            memory.add((state, action, reward, next_state, done))

            # Our state is now the next_state
            state = next_state
    main.initialize()

    train()
def getGaitEvents(self):
    # If negative minima is found,
    # then first positive maxima is HS

    # negative minima
    if (not self.foundNegMinima and self.previousShankAngVelYDifference < 0
            and self.shankAngVelYDifference > 0
            and self.previousShankAngVelY < 0):
        self.foundNegMinima = True

    # positive maxima for shank ang vel X
    if (not self.foundPosMaxima and self.previousShankAngVelXDifference > 0
            and self.shankAngVelXDifference < 0
            and self.previousShankAngVelX > 0):
        self.foundPosMaxima = True

    # positive maxima
    if (self.foundNegMinima and self.foundPosMaxima
            and self.previousShankAngVelYDifference > 0
            and self.shankAngVelYDifference < 0
            and self.previousShankAngVelY > 0):

        self.gaitData['HS']['Row'].append(self.row)
        self.gaitData['HS']['Shank Accel Z'].append(self.shankAccelZ)
        self.foundNegMinima = False
        self.foundPosMaxima = False

        HSstartRow = self.row

        while (self.row - HSstartRow < main.convertMilliSecToRow(
                self.frequency, 300) and self.row < self.lastRow):

            self.row += 1
            main.getData(self)
            main.setPreviousData(self)

    else:

        main.setPreviousData(self)
Exemple #6
0
def map():
    formData = {
        "origin": None,
        "destination": None,
        "soc": None,
    }
    returnData = {}
    if request.method == 'POST':
        formData['origin'] = request.form.get(
            'origin')  # access the data inside
        formData['destination'] = request.form.get('destination')
        formData['soc'] = request.form.get('soc')
        returnData = getData(formData['origin'], formData['destination'],
                             formData['soc'])
        if returnData['energyA'] and returnData[
                'energy'] and returnData['energyA'] < returnData['energy']:
            returnData['waypoints'] = 'MH SH 79, Pali T. Waredi, Maharashtra'
    return render_template('map.html',
                           active='map',
                           data=formData,
                           returnData=returnData)
def getGaitEventsWithThresholdAnd300MSPositive(self):

    # If there are 5 HS events are already found, use 70% of the average to be the threshold to find the next positive maxima in ang velY
    if (len(self.gaitData['HS']['Shank Ang Vel Y']) > 5):
        if (len(self.gaitData['HS']['Shank Ang Vel Y']) == 6):
            subArray = self.gaitData['HS']['Shank Ang Vel Y'][:5]
            self.averageMax = sum(subArray) / 5
            # print("ANG VEL Y VALUES = ", self.gaitData['HS']['Row'])
            # print("ANG VEL Y VALUES = ", self.gaitData['HS']['Shank Ang Vel Y'])
            # print("AVERAGE MAX = ", self.averageMax)

        # negative minima
        if (not self.foundNegMinima and self.previousShankAngVelYDifference < 0
                and self.shankAngVelYDifference > 0
                and self.previousShankAngVelY < 0):
            self.foundNegMinima = True

        # positive maxima
        if (self.foundNegMinima and self.previousShankAngVelYDifference > 0
                and self.shankAngVelYDifference < 0
                and self.previousShankAngVelY > self.averageMax):

            self.foundNegMinima = False
            self.foundPosMaxima = False

            HSstartRow = self.row

            shankAngVelYPositive = True

            # check if shankAngVelY is positive for 300 ms
            while (shankAngVelYPositive
                   and self.row - HSstartRow < main.convertMilliSecToRow(
                       self.frequency, 300) and self.row < self.lastRow):

                self.row += 1
                main.getData(self)
                main.setPreviousData(self)

                if (self.shankAngVelY < 0):
                    shankAngVelYPositive = False

            # if shankAngVelY stayed positive for 300 ms, then add HS event
            if shankAngVelYPositive:

                self.gaitData['HS']['Row'].append(self.row)
                self.gaitData['HS']['Shank Ang Vel Y'].append(
                    self.previousShankAngVelY)

        else:

            main.setPreviousData(self)

    else:

        # negative minima
        if (not self.foundNegMinima and self.previousShankAngVelYDifference < 0
                and self.shankAngVelYDifference > 0
                and self.previousShankAngVelY < 0):
            self.foundNegMinima = True

        # positive maxima for shank ang vel X
        if (not self.foundPosMaxima and self.previousShankAngVelXDifference > 0
                and self.shankAngVelXDifference < 0
                and self.previousShankAngVelX > 0):
            self.foundPosMaxima = True

        # positive maxima
        if (self.foundNegMinima and self.foundPosMaxima
                and self.previousShankAngVelYDifference > 0
                and self.shankAngVelYDifference < 0
                and self.previousShankAngVelY > 0):

            self.gaitData['HS']['Row'].append(self.row)
            self.gaitData['HS']['Shank Ang Vel Y'].append(self.shankAngVelY)
            self.foundNegMinima = False
            self.foundPosMaxima = False

            HSstartRow = self.row

            while (self.row - HSstartRow < main.convertMilliSecToRow(
                    self.frequency, 300) and self.row < self.lastRow):

                self.row += 1
                main.getData(self)
                main.setPreviousData(self)

        else:

            main.setPreviousData(self)
Exemple #8
0
def searchBestForest(params, client):
    c = client
    print(c)
    data = getDataForTraining(getData())
    data.to_csv('../ignore/dataPrepared.csv')
    data = dd.read_csv('../ignore/dataPrepared.csv').drop(columns='Unnamed: 0')
    X_train, X_test, y_train, y_test = train_test_split(
        data.drop(columns='price'), data.price, test_size=0.2)
    [ele.compute() for ele in [X_train, X_test, y_train, y_test]]

    with joblib.parallel_backend('dask'):
        model = RandomForestRegressor(bootstrap=True,
                                      criterion='mse',
                                      max_depth=10,
                                      max_features='auto',
                                      max_leaf_nodes=None,
                                      min_impurity_decrease=0.0,
                                      min_impurity_split=None,
                                      min_samples_leaf=1,
                                      min_samples_split=2,
                                      min_weight_fraction_leaf=0.0,
                                      n_estimators=200,
                                      n_jobs=None,
                                      oob_score=False,
                                      random_state=None,
                                      verbose=0,
                                      warm_start=False)
        model.fit(X_train, y_train)
        y_pred = model.predict(X_test)
        bestMod = {'model': model, 'R2_score': r2_score(y_test, y_pred)}
        contador = 1
        print(bestMod)
        for estimators in params['n_estimators']:
            for features in params['max_features']:
                for dep in params['max_depth']:
                    for samples in params['min_samples_split']:
                        for samplesL in params['min_samples_leaf']:
                            for boot in params['bootstrap']:
                                model = RandomForestRegressor(
                                    bootstrap=boot,
                                    criterion='mse',
                                    max_depth=dep,
                                    max_features=features,
                                    max_leaf_nodes=None,
                                    min_impurity_decrease=0.0,
                                    min_impurity_split=None,
                                    min_samples_leaf=samplesL,
                                    min_samples_split=samples,
                                    min_weight_fraction_leaf=0.0,
                                    n_estimators=estimators,
                                    n_jobs=None,
                                    oob_score=False,
                                    random_state=None,
                                    verbose=0,
                                    warm_start=False)
                                model.fit(X_train, y_train)
                                y_pred = model.predict(X_test)
                                r2 = r2_score(y_test, y_pred)
                                if r2 > bestMod['R2_score']:
                                    bestMod = {'model': model, 'R2_score': r2}
                                    print(bestMod)
                                del model
Exemple #9
0
import numpy as np
import matplotlib.pyplot as plt
import main as func
import pandas as pd

url1 = 'https://www.bps.go.id/dynamictable/2017/05/04/1243/indeks-kebahagiaan-menurut-provinsi-2014-2017.html'
soup1 = func.getData(url1)

# find results within table
result_wilayah = soup1.find('table', attrs={'id': 'tableLeftBottom'})
result_value = soup1.find('table', attrs={'id': 'tableRightBottom'})
rows_wilayah = result_wilayah.find_all('tr')
rows_value = result_value.find_all('tr')

data_ipm = {}

for id, r in enumerate(rows_wilayah[:-1]):
    # find all columns per result
    data_result = r.find_all('td', attrs={'id': 'th4'})
    data_value = rows_value[id].find_all('td', attrs={'class': 'datas'})
    # check that columns have data
    if len(data_result) == 0:
        continue

    wilayah = data_result[0].getText()
    wilayah = wilayah.replace('\n', '')
    nilai = data_value[-1].getText()
    # Remove decimal point
    nilai = nilai.replace('.', '')
    # Cast Data Type Integer
    nilai = int(nilai)
# Import Libraries
import main as function
import numpy as np
import matplotlib.pyplot as plt

# Specify the URL
url = 'https://kawalpemilu.org/#pilpres:0'
soup = function.getData(url)

# Find results within table
results = soup.find('table', {'class': 'table'})
rows = results.find_all('tr', {'class': 'row'})
list_wilayah = []
jokowi = []
prabowo = []

# Print(rows)
for r in rows:
    # Find columns per result
    data = r.find_all('td')
    # check that columns have data
    if len(data) == 0:
        continue
    # Write columns to variables
    wilayah = data[1].find('a').getText()
    capres1 = data[2].find('span', attrs={'class': 'abs'}).getText()
    capres2 = data[3].find('span', attrs={'class': 'abs'}).getText()

    #Removing decimal point
    capres1 = capres1.replace('.', '')
    capres2 = capres2.replace('.', '')
Exemple #11
0
from __future__ import print_function
import numpy as np
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
import main

batch_size = 128
# num_classes = 10
epochs = 20

# the data, shuffled and split between train and test sets
X, y = main.getData()
X = np.array(X)
y = np.array(y)
l = len(X)
x_train = X[:int(0.6 * l)]
y_train = y[:int(0.6 * l)]
x_val = X[int(0.6 * l):int(0.8 * l)]
y_val = y[int(0.6 * l):int(0.8 * l)]
x_test = X[int(0.8 * l):]
y_test = y[int(0.8 * l):]

# x_train = x_train.reshape(l, 4)
# x_test = x_test.reshape(10000, 784)
# x_train = x_train.astype('float32')
# x_test = x_test.astype('float32')
# x_train /= 255
# x_test /= 255
Exemple #12
0
def train():
    global randomMoves, totalMoves, gamma
    with tf.Session() as sess:
        # Initialize the variables
        sess.run(tf.global_variables_initializer())

        # Initialize the decay rate (that will use to reduce epsilon)
        decay_step = 0
        if main.model != None:
            print("oh no")
            saver.restore(sess, main.model[0:-6])
            decay_step = 1000000
        # Init the game
        instance = 0
        while (training):
            # Set step to 0
            instance += 1

            # Initialize the rewards of the episode
            episode_rewards = []

            # Make a new episode and observe the first state
            main.startGame()
            if main.getPlaying() == False:
                main.setPlaying(True)
            state = main.getData()
            randomMoves = 0
            totalMoves = 0
            while main.getPlaying():
                if main.ticks != 1000:
                    main.pygame.time.Clock().tick(main.ticks)

                # Increase decay_step
                decay_step += 1

                # Predict the action to take and take it
                action, explore_probability = predict_action(
                    explore_start, explore_stop, decay_rate, decay_step, state,
                    [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
                    sess)

                # Do the action
                reward = main.getReward(action)

                # Look if the episode is finished
                done = main.getPlaying()

                # Add the reward to total reward
                episode_rewards.append(reward)

                # If the game is finished
                if not done:
                    # the episode ends so no next state
                    next_state = np.zeros((5), dtype=np.int)

                    # Get the total reward of the episode
                    total_reward = np.sum(episode_rewards)

                    print('Episode: {}'.format(main.getGen()),
                          'Total reward: {:.4f}'.format(total_reward),
                          'Explore P: {:.4f}'.format(explore_probability),
                          'Moves: R:', randomMoves, ' T:', totalMoves)

                    memory.add((state, action, reward, next_state, done))

                else:
                    # Get the next state
                    next_state = main.getData()

                    # Add experience to memory
                    memory.add((state, action, reward, next_state, done))

                    # st+1 is now our current state
                    state = next_state

                ### LEARNING PART
                # Obtain random mini-batch from memory
                batch = memory.sample(batch_size)
                states_mb = np.array([each[0] for each in batch], ndmin=2)
                actions_mb = np.array([each[1] for each in batch])
                rewards_mb = np.array([each[2] for each in batch])
                next_states_mb = np.array([each[3] for each in batch], ndmin=2)
                dones_mb = np.array([each[4] for each in batch])

                target_Qs_batch = []
                # Get Q values for next_state
                Qs_next_state = sess.run(
                    DQNetwork.output,
                    feed_dict={DQNetwork.inputs_: next_states_mb})

                # Set Q_target = r if the episode ends at s+1, otherwise set Q_target = r + gamma*maxQ(s', a')
                for i in range(0, len(batch)):
                    terminal = dones_mb[i]

                    # If we are in a terminal state, only equals reward
                    if terminal:
                        target_Qs_batch.append(rewards_mb[i])

                    else:
                        target = rewards_mb[i] + gamma * np.max(
                            Qs_next_state[i])
                        target_Qs_batch.append(target)

                targets_mb = np.array([each for each in target_Qs_batch])

                loss, _ = sess.run(
                    [DQNetwork.loss, DQNetwork.optimizer],
                    feed_dict={
                        DQNetwork.inputs_: states_mb,
                        DQNetwork.target_Q: targets_mb,
                        DQNetwork.actions_: actions_mb
                    })

                # Write TF Summaries
                summary = sess.run(write_op,
                                   feed_dict={
                                       DQNetwork.inputs_: states_mb,
                                       DQNetwork.target_Q: targets_mb,
                                       DQNetwork.actions_: actions_mb
                                   })
                writer.add_summary(summary, instance)
                writer.flush()

            # Save model every 10 episodes
            if instance % 100 == 0:
                save_path = saver.save(
                    sess, dir_path + "/models/model" + str(instance) + ".ckpt")
                print("Model Saved")