Пример #1
0
def start(args):
    """
    Create all required interfaces and start the application.
    """

    args = vars(args)
    log.debug('Command line arguments: %s' % args)
    init(**args)

    if args.get('create_user'):
        db.users.insert({
            'username': args.get('create_user'),
            'hash': sha.sha(getpass.getpass()).hexdigest()
        })
        return

    log.info('Starting alfred {0}'.format(__version__))
    signal.signal(signal.SIGINT, signalHandler)

    # Starting all the stuffs
    manager.start()
    # persistence.start()
    ruleHandler.loadRules(os.path.join(os.path.dirname(__file__), 'rules'))
    ruleHandler.start()

    # Sends heartbeats
    sys.startTime = time.asctime()
    signal.signal(signal.SIGALRM, heartbeat)
    heartbeat(signal.SIGALRM, None)

    # Let's have an interface :)
    webserver.start(args.get('client_path'))
Пример #2
0
def start(args):
    """
    Create all required interfaces and start the application.
    """

    args = vars(args)
    log.debug('Command line arguments: %s' % args)
    init(**args)

    if args.get('create_user'):
        db.users.insert({'username': args.get(
            'create_user'), 'hash': sha.sha(getpass.getpass()).hexdigest()})
        return

    log.info('Starting alfred {0}'.format(__version__))
    signal.signal(signal.SIGINT, signalHandler)

    # Starting all the stuffs
    manager.start()
    # persistence.start()
    ruleHandler.loadRules(os.path.join(os.path.dirname(__file__), 'rules'))
    ruleHandler.start()

    # Sends heartbeats
    sys.startTime = time.asctime()
    signal.signal(signal.SIGALRM, heartbeat)
    heartbeat(signal.SIGALRM, None)

    # Let's have an interface :)
    webserver.start(args.get('client_path'))
Пример #3
0
import manager
import item

manager.start()
Пример #4
0
"""
Supervised Learning on generated training data.
"""
import multiprocessing as mp
import os
import sys
import keras.backend as K

_PATH_ = os.path.dirname(os.path.dirname(__file__))

import memory_saving_gradients
"""
Uncomment the following line if your model doesn't fit in your gpu memory.
Training will be a bit slower but you will be able to use bigger networks.
To learn more about : https://medium.com/tensorflow/fitting-larger-networks-into-memory-583e3c758ff9
"""
K.__dict__["gradients"] = memory_saving_gradients.gradients_memory

if _PATH_ not in sys.path:
    sys.path.append(_PATH_)

if __name__ == "__main__":
    mp.set_start_method('spawn')
    sys.setrecursionlimit(10000)
    import manager

    manager.start(worker="opt", config_type="normal")
Пример #5
0
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
import manager
import os

if __name__ == '__main__':
    binary = FirefoxBinary(
        r'C:\Program Files (x86)\Mozilla Firefox\firefox.exe')
    driver = webdriver.Firefox(firefox_binary=binary)
    try:
        if not os.path.isfile('settings.py'):
            raise Exception('Not found settings!')
        manager.start(driver)
    finally:
        driver.quit()
Пример #6
0
"""
Make to networks compete against each other.
"""
import multiprocessing as mp
import os
import sys

_PATH_ = os.path.dirname(os.path.dirname(__file__))

if _PATH_ not in sys.path:
    sys.path.append(_PATH_)

if __name__ == "__main__":
    mp.set_start_method('spawn')
    sys.setrecursionlimit(10000)
    import manager

    white_model_path = "data/model/last"
    black_model_path = "data/model/old"
    # model_1 is white, model_2 plays as black player
    manager.start(worker="duel",
                  config_type="normal",
                  model_1_path=white_model_path,
                  model_2_path=black_model_path,
                  deterministic=False)
Пример #7
0
import multiprocessing as mp
import os
import sys
import keras.backend as K
import memory_saving_gradients

_PATH_ = os.path.dirname(os.path.dirname(__file__))

if _PATH_ not in sys.path:
    sys.path.append(_PATH_)

if __name__ == "__main__":
    mp.set_start_method('spawn')
    sys.setrecursionlimit(10000)
    import manager

    # Alterning self play, training and evaluation phases
    while True:

        # Generates pgn files by competing best network against itself
        manager.start(worker="self", config_type="normal")

        tmp_gradients = K.__dict__["gradients"]
        K.__dict__["gradients"] = memory_saving_gradients.gradients_memory
        for _ in range(2):
            # Continue training network on most recent pgn files
            manager.start(worker="opt", config_type="normal", rl=True)
        K.__dict__["gradients"] = tmp_gradients
        # Evaluates last network against the best and replace it if stronger
        manager.start(worker="eval", config_type="normal")
Пример #8
0
import os
import sys
from dotenv import load_dotenv, find_dotenv

if find_dotenv():
    load_dotenv(find_dotenv())

_PATH_ = os.path.dirname(os.path.dirname(__file__))

if _PATH_ not in sys.path:
    sys.path.append(_PATH_)

if __name__ == "__main__":
    import manager
    sys.exit(manager.start())
Пример #9
0
from manager import start
start(True)