Exemple #1
0
def dispatch(requestBody, eventsData):
    helpers.init()
    funcDict = {
        'billPayment': billPayment.payBill,
        'getBiller_billPayment': billPayment.getBiller_billPayment,
        'getAccount_billPayment': billPayment.getAccount_billPayment,
        'getAccount_fundsTransfer': fundsTransfer.getAccount_fundsTransfer,
        'getAmount_billPayment': billPayment.getAmount_billPayment,
        'getAmount_fundsTransfer': fundsTransfer.getAmount_fundsTransfer,
        'confirm_fundsTransfer': fundsTransfer.confirm_fundsTransfer,
        'confirm_billPayment': billPayment.confirm_billPayment,
        'fundsTransfer': fundsTransfer.transferFunds,
        'getRecipient_fundsTransfer': fundsTransfer.getRecipient_fundsTransfer,
        'getBalance': getBalance.getBalance
    }
    if requestBody is None:
        return None
    requestBody = json.loads(requestBody)
    intent = validate.try_ex(
        lambda: requestBody['queryResult']['intent']['displayName'])
    result = (
        validate.try_ex(lambda: funcDict[intent](requestBody, eventsData)))
    if result is None:
        raise Exception('Intent with name ' + intent + ' not supported')
    return result
Exemple #2
0
def main():
    h.init()
    global cfg
    cfg = h.cfg
    
    # configure and start the actual app
    global routing
    application = webapp.WSGIApplication(routing,
                                         debug=True)
    util.run_wsgi_app(application)
Exemple #3
0
def def_first():
	"""Game for matching word to definiton"""
	content = helpers.init()
	print('\nYou can exit at anytime by entering \'menu\'.\n') 
	random.shuffle(content)
	count = len(content)
	correct = 0
	incorrect = 0
	for word in content:
		print('\n{}. {}'.format(operator.indexOf(content, word) + 1, word['definition']))
		res = input('\nWhat word does this definition belong to?\n\t=>').lower()

		if res == word['key'].lower():
			count -= 1
			correct += 1
			if count > 0:
				print('\nGood Job! next word!')		
		elif res == 'menu':
			main.menu()
		else:
			incorrect += 1
			count -= 1
			if count > 1:
				print('\nIncorrect. Next word.')
				
	print('\nYou got {} correct and {} incorrect'.format(correct, incorrect))
	helpers.callback(def_first)
Exemple #4
0
def scrambled():
	"""Unscramble the word"""

	content = helpers.init()
	print('\nYou can exit at anytime by entering \'menu\'.\n')

	random.shuffle(content)
	count = len(content)
	correct = 0
	incorrect = 0
	i = 0
	shuffled = []

	for word in content:
		scrmbld = helpers.scramble_string(word['key'])
		shuffled.append(scrmbld)

	for scr in shuffled:
		res = input('\n{}. {}.\n\t=>'.format(i + 1, scr)).lower()

		if res == content[i]['key'].lower():
			count -= 1
			correct += 1
			if count > 0:
				print('\nGood Job! next word!')		
		elif res == 'menu':
			main.menu()
		else:
			incorrect += 1
			count -= 1
			if count > 1:
				print('\nIncorrect. Next word.')
		i += 1
	print('\nYou got {} correct and {} incorrect'.format(correct, incorrect))
	helpers.callback(scrambled)
Exemple #5
0
def def_first():
	"""Game for matching word to definiton"""
	content = helpers.init()
	print('\nYou can exit at anytime by entering \'menu\'.\n') 
	random.shuffle(content)
	count = len(content)
	correct = 0
	incorrect = 0
	for word in content:
		print('\n{}. {}'.format(operator.indexOf(content, word) + 1, word['definition']))
		res = input('\nWhat word does this definition belong to?\n\t=>').lower()

		if res == word['key'].lower():
			count -= 1
			correct += 1
			if count > 0:
				print('\nGood Job! next word!')		
		elif res == 'menu':
			main.menu()
		else:
			incorrect += 1
			count -= 1
			if count > 1:
				print('\nIncorrect. Next word.')
				
	print('\nYou got {} correct and {} incorrect'.format(correct, incorrect))
	helpers.callback(def_first)
Exemple #6
0
def def_first():
    """Game for matching word to definiton"""
    content = helpers.init()
    print("\nYou can exit at anytime by entering 'exit'.\n")
    random.shuffle(content)
    count = len(content)
    correct = 0
    incorrect = 0
    for word in content:
        print("\n{}. {}".format(operator.indexOf(content, word) + 1, word["definition"]))
        res = input("\nWhat word does this definition belong to?\n\t=>").lower()

        if res == word["key"].lower():
            count -= 1
            correct += 1
            if count > 0:
                print("\nGood Job! next word!")
        elif res == "exit":
            return
        else:
            incorrect += 1
            count -= 1
            if count > 1:
                print("\nIncorrect. Next word.")

    print("\nYou got {} correct and {} incorrect".format(correct, incorrect))
    helpers.callback(def_first)
Exemple #7
0
def scrambled():
    """Unscramble the word"""

    content = helpers.init()
    print("\nYou can exit at anytime by entering 'exit'.\n")

    random.shuffle(content)
    count = len(content)
    correct = 0
    incorrect = 0
    i = 0
    shuffled = []

    for word in content:
        scrmbld = helpers.scramble_string(word["key"])
        shuffled.append(scrmbld)

    for scr in shuffled:
        res = input("\n{}. {}.\n\t=>".format(i + 1, scr)).lower()

        if res == content[i]["key"].lower():
            count -= 1
            correct += 1
            if count > 0:
                print("\nGood Job! next word!")
        elif res == "exit":
            return
        else:
            incorrect += 1
            count -= 1
            if count > 1:
                print("\nIncorrect. Next word.")
        i += 1
    print("\nYou got {} correct and {} incorrect".format(correct, incorrect))
    helpers.callback(scrambled)
def get_events_breakdown_data(col, player_name, start, end):
    pipeline = helpers.init(player_name, start, end)
    pipeline[1]["$project"]["element_type"] = 1
    projection = [{"$group": {"_id": None, "goals": {"$push": "$fixture_history.goals"},
                  "assists": {"$push": "$fixture_history.assists"},
                  "clean_sheets": {"$push": "$fixture_history.clean_sheet"},
                  "points": {"$push": "$fixture_history.points"},
                  "gameweeks": {"$push": "$fixture_history.gameweek"},
                  "player_type": {"$first": "$element_type"}}}]
    pipeline.extend(projection)
    breakdown_object = col.aggregate(pipeline).next()
    
    goal_points = get_goal_points(breakdown_object["goals"], breakdown_object["player_type"])
    assist_points = map(lambda x: 3*x, breakdown_object["assists"])
    others_points = (np.array(breakdown_object["points"]) - np.array(goal_points)
                     - np.array(assist_points))
    data = [{"goals": pairs_to_lists(zip(breakdown_object["gameweeks"],goal_points))},
            {"assists": pairs_to_lists(zip(breakdown_object["gameweeks"],assist_points))}]
    if breakdown_object["player_type"] == 3 or breakdown_object["player_type"] == 4: # Forwards/midfields
        data.append({"others": pairs_to_lists(zip(breakdown_object["gameweeks"],others_points.tolist()))})
    else: # Defenders/GKs
        cs_points = map(lambda x: 4*x, breakdown_object["clean_sheets"])
        others_points -= np.array(cs_points)
        data.extend([{"cleanSheets": pairs_to_lists(zip(breakdown_object["gameweeks"],cs_points))},
                     {"others": pairs_to_lists(zip(breakdown_object["gameweeks"],others_points.tolist()))}])
    return data
def get_consistency_data(col, player_name, start, end):
    pipeline = helpers.init(player_name, start, end)
    projection = [{"$group": {"_id": None, attr: {"$push": "$fixture_history."+attribute.INTERNAL_ATTR_MAP[attr]}}}]
    pipeline.extend(projection)
    attr_vals = np.array(col.aggregate(pipeline).next()[attr])
    data = [attr_vals.min(), np.percentile(attr_vals, 25, interpolation="midpoint"), np.median(attr_vals),
            np.percentile(attr_vals, 75, interpolation="midpoint"), attr_vals.max()]
    return [data]
def get_over_time_data(col, player_name, start, end):
    pipeline = helpers.init(player_name, start, end)
    projection = [{"$group": {"_id": None, "gameweeks": {"$push": "$fixture_history.gameweek"},
                  attr: {"$push": "$fixture_history."+attribute.INTERNAL_ATTR_MAP[attr]},
                  "results": {"$push": "$fixture_history.opponent_result"}}}]
    pipeline.extend(projection)
    data = col.aggregate(pipeline).next()
    data = helpers.pairs_to_lists(zip(data["gameweeks"], data[attr]))
    return map(lambda p: [p[0],p[1]/10.0], data)
def get_home_vs_away_data(col, player_name, start, end, attr):
    pipeline = helpers.init(player_name, start, end)
    projection = [{"$group": {"_id": "$fixture_history.ground",
                            "y": {"$sum": "$fixture_history."+INTERNAL_ATTR_MAP[attr]}}}]
    pipeline.extend(projection)
    data = [d for d in col.aggregate(pipeline)]
    for d in data:
        d["_id"] = "Home" if d["_id"] == "H" else "Away"
        d["name"] = d.pop("_id")
    return data
def dispatch(requestBody, eventsData):
    helpers.init()
    funcDict = {
        'booking': booking.bookingcom,
        'getarrival_booking': booking.getarrival_booking,
        'getdeparture_booking': booking.getdeparture_booking,
        'getadult_booking': booking.getadult_booking,
        'getchild_booking': booking.getchild_booking,
        'Confirm_booking': booking.Confirm_booking
    }
    if requestBody is None:
        return None
    requestBody = json.loads(requestBody)
    intent = validate.try_ex(
        lambda: requestBody['queryResult']['intent']['displayName'])
    result = (
        validate.try_ex(lambda: funcDict[intent](requestBody, eventsData)))
    if result is None:
        raise Exception('Intent with name ' + intent + ' not supported')
    return result
def get_cumulative_total_data(col, player_name, start, end, attr):
    pipeline = helpers.init(player_name, start, end)
    
    projection = [{"$group": {"_id": None, "gameweeks": {"$push": "$fixture_history.gameweek"},
                  attr: {"$push": "$fixture_history."+INTERNAL_ATTR_MAP[attr]},
                  "results": {"$push": "$fixture_history.opponent_result"}}}]
    pipeline.extend(projection)
    data = col.aggregate(pipeline).next()
    cum_sums = np.cumsum(data[attr])
    data = map(list, zip(data["gameweeks"], cum_sums))
    return data
def get_over_time_data(col, player_name, start, end, attr):
    pipeline = helpers.init(player_name, start, end)
    projection = [{"$group": {"_id": None, "gameweeks": {"$push": "$fixture_history.gameweek"},
                    attr: {"$push": "$fixture_history."+INTERNAL_ATTR_MAP[attr]},
                    "results": {"$push": "$fixture_history.opponent_result"}}}]
    pipeline.extend(projection)
    data = col.aggregate(pipeline).next()
    res_length = len(data["gameweeks"])
    data = [{"x": data["gameweeks"][i], "y": data[attr][i], "name": data["results"][i]}
            for i in xrange(res_length)]
    return data
Exemple #15
0
    def __init__(self, argv):
        # Authenticate and construct service.
        self.service, self.arguments = init(
            argv,
            'calendar',
            'v3',
            __doc__,
            __file__,
            parents=[self._create_argument_parser()],
            scope='https://www.googleapis.com/auth/calendar.readonly')

        # user info
        self.user = None
        self.title = None
        with open("user.json", 'r', encoding='utf-8') as fp:
            user_data = json.load(fp)
            self.user = user_data["user"]
            self.title = user_data["title"]

        # projects database
        self.db = CalendarDb(self.arguments.database)
        self.projects = dict()
        for p in self.db.get_projects():
            self.projects[p.key] = p

        # start and end date
        if self.arguments.start_date is None and \
                self.arguments.end_date is None and \
                self.arguments.period is None:
            self.arguments.period = "last-month"

        self.start_date = date.today().replace(day=1)
        self.end_date = date.today() + timedelta(days=1)

        if self.arguments.period is not None:
            if self.arguments.period == "last-month":
                self.end_date = self.start_date
                self.start_date = (self.end_date -
                                   timedelta(days=1)).replace(day=1)
            else:
                raise ValueError("Unknown period: %s" % self.arguments.period)
            if self.arguments.start_date is not None or \
                    self.arguments.end_date is not None:
                raise ValueError(
                    "You cannot combine --period and explicit date")
        else:
            if self.arguments.start_date is not None:
                y, m, d, *r = humanfriendly.parse_date(
                    self.arguments.start_date)
                self.start_date = date(y, m, d)
            if self.arguments.end_date is not None:
                y, m, d, *r = humanfriendly.parse_date(self.arguments.end_date)
                self.end_date = date(y, m, d)
Exemple #16
0
def scrambled():
	"""Unscramble the word"""

	content = helpers.init()
	print('\nYou can exit at anytime by entering \'menu\'.\n')

	random.shuffle(content)
	count = len(content)
	correct = 0
	incorrect = 0
	i = 0
	shuffled = []

	for word in content:
		to_scramble = list(word['key'].lower().strip())
		random.shuffle(to_scramble)
		shuffled.append(to_scramble)

	for scr in shuffled:
		res = input('\n{}. {}.\n\t=>'.format(i + 1, ''.join(scr))).lower()

		if res == content[i]['key'].lower():
			count -= 1
			correct += 1
			if count > 0:
				print('\nGood Job! next word!')		
		elif res == 'menu':
			main.menu()
		else:
			incorrect += 1
			count -= 1
			if count > 1:
				print('\nIncorrect. Next word.')
		i += 1
	print('\nYou got {} correct and {} incorrect'.format(correct, incorrect))
	helpers.callback(scrambled)
import win32gui
from pywinauto.findwindows import find_window
from pywinauto.win32functions import SetForegroundWindow
import ImageGrab
import cv2
import helpers
import numpy as np
import random

thing_id = 'bob30'
helpers.init()


def get_rect(hwnd):
    rect = win32gui.GetWindowRect(hwnd)
    return rect


def screen_grab():
    box = (x1, y1, x2, y2)
    im = ImageGrab.grab(box)
    return im


window = find_window(title="bob_robot_sim")
SetForegroundWindow(window)
x1, y1, x2, y2 = get_rect(window)


class RandomNoise:
Exemple #18
0
import helpers
helpers.init(globals())

from structures.Possession import PossessionManager
PossessionManager().persist()
Exemple #19
0
import torchvision
import torch.nn as nn
from torch.autograd import Variable
import torch
import numpy as np

import dataset
import helpers
import models
from config import *
assert (file_name == 'vae-gan')
writer, save_dir = helpers.init(gpu, file_name, experiment_name)

use_ganloss = True  # TODO can I use that here?
use_vaeloss = True
use_instancenoise = False
iter_decay = 1000.  # iterations after which instance noise is at 1/e

# models  # TODO to one model?
netD = models.Discriminator().cuda()
netG = models.Generator().cuda()
netE = models.Encoder().cuda()

# weight initialization
netD.apply(models.init_weights)  # xavier init
netE.apply(models.init_weights)  # xavier init
netG.apply(models.init_weights)  # xavier init

criterion = nn.BCELoss().cuda()
optD = torch.optim.Adam(netD.parameters(), lr=lr)
optG = torch.optim.Adam(netG.parameters(), lr=lr)
Exemple #20
0
This module uses K-Means to initialize mean and cov. Transition matrix A and distribution for the first component is initialized randomly. All estimated parameters starts with an underscore then follows the same name of the parameters as we used previously.
"""

# K-Means to estimate _mean and _cov
_mean, assign = KMeans(X, numComponent)
_cov = helpers.estimateCov(X, _mean, assign)

# show clustering result
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c = assign)
plt.scatter(_mean[:, 0], _mean[:, 1], s = 40, color = 'y', marker = 'D')
plt.title("Clustering Result by K-Means")

#%% Train HMM
# initialize parameters
_A, _pi = helpers.init(numComponent)

for nIter in range(1, 21):
    # E Step
    gamma, epsilon = helpers.E_Step(X, _mean, _cov, _A, _pi)

    # M Step
    _mean, _cov, _A, _pi = helpers.M_Step(X, gamma, epsilon)

    # plot intermediate result
    if(nIter % 5 == 0):
        plt.figure()
        plt.scatter(X[:, 0], X[:, 1], c = gamma)
        plt.scatter(_mean[:, 0], _mean[:, 1], s = 40, color = 'y', marker = 'D')
        plt.title("Result return by HMM after " + str(nIter) + " iterations")
        plt.show()
Exemple #21
0
	def __init__(self,input_size,output_size):
		super(Linear,self).__init__()
		self.param('weights', init(input_size,output_size))
		self.param('bias', np.zeros(output_size))
def run_multi_engine_query(config):
    """ Based on the specified named query, collect all engines, and run the query against
    all engines and put the output in a single .csv file.

    High-level logic:
    1. Create a Portal object instance
    2. Get the list of Engines from the Portal
    3. Create the output file
    For each Engine:
        4. Run the query against that engine
        5. Append the results to the output file

    Arguments:
    config: Initialized MultiEngineQueryConfig object
    """
    func_name = inspect.currentframe().f_code.co_name

    # Create the logger, and enable Class-level debugging
    logger = init(config)

    start_time = time.time()
    if config.verbose:
        logger.info('{} - Starting'.format(func_name))

    # 1. Create a Portal object instance
    portal = PortalAppliance.create(config)

    # 2. Get the list of Connected Engines from the Portal
    portal_start_time = time.time()
    engine_list = portal.get_engine_list(config.engine_port,
                                         only_connected=True)
    portal_end_time = time.time()
    msg = 'Retrieved {0} connected Engine{1} from Portal "{2}" in {3}.'.format(
        len(engine_list), 's' if len(engine_list) != 1 else '', portal.name,
        timer(portal_start_time, portal_end_time))
    config.add_to_email(msg)
    print(msg)
    if config.verbose: logger.info("{0} - {1}".format(func_name, msg))
    # If there are no Enigne Appliances, just note the fact and exit.
    if len(engine_list) == 0:
        msg = 'No Engine Appliances are connected or available, so exiting.'
        print(msg)
        config.add_to_email(msg)
        return finish_process(func_name,
                              logger,
                              config,
                              start_time,
                              finished=True)

    # For each query
    for query in config.queries:
        query_start_time = time.time()

        if config.verbose:
            msg = 'Processing Query "{0}".'.format(query.name)
            print(msg)
            config.add_to_email(msg)
            logger.info('{} - {}'.format(func_name, msg))

        # 3. Create the output file
        output_fname, output_created = create_output_file(
            logger, config, query)
        if not output_created:
            msg = 'Unable to create the output file ("{0}") for Query "{1}", so exiting.'.format(
                output_fname, query.name)
            print(msg)
            config.add_to_email(msg)
            return finish_process(func_name,
                                  logger,
                                  config,
                                  start_time,
                                  finished=True)

        # For each Engine
        first_engine = True
        for engine in engine_list:
            eng_name = '[{0} ({1})]'.format(engine.name, engine.hostname_fqdn)

            # 4. Run the query
            engine_objects = run_query_on_engine(logger, config, engine, query)
            msg = '{0} Retrieved {1} Object{2} to save from this Engine for Query "{3}".'.format(
                eng_name, len(engine_objects),
                's' if len(engine_objects) != 1 else '', query.name)
            config.add_to_email(msg)
            print(msg)
            if config.debug_engine:
                logger.debug('{0} - {1}'.format(func_name, msg))

            # 5. Save the query results to the output file
            if len(engine_objects) > 0:
                if first_engine:
                    field_names = engine_objects[0].keys()
                written_ok = write_to_output_file(logger, config, query,
                                                  output_fname, engine_objects,
                                                  field_names, first_engine)
                first_engine = False
            elif config.debug_engine:
                logger.debug(
                    '{0} - Skipping write of output file for Engine "{1}", no rows were returned for Query "{2}".'
                    .format(func_name, eng_name, query.name))

        query_end_time = time.time()
        if config.verbose:
            msg = 'Completed collecting and writing results for "{0}" to "{1}" in {2}.'.format(
                query.name, output_fname,
                timer(query_start_time, query_end_time))
            logger.info('{0} - {1}'.format(func_name, msg))
            print(msg)

    return finish_process(func_name, logger, config, start_time)