示例#1
0
    def __init__(self, stateCnt, actionCnt, instance_name='', weights=None):
        """Class to manage an agent to train it's Brain

        Args:
            stateCnt (int): The size of the input state vector
            actionCnt (int): The size of the output action vector
            weights_h5 (string): Filename of h5 file to load from,
                None if to not load from any file.

        """
        self.lg = setup_custom_logger('Agent: ' + instance_name, instance_name)
        self.lg.debug('Agent Initializing...')
        self.instance_dir = 'data/instances/' + instance_name + '/'

        self.stateCnt = stateCnt
        self.actionCnt = actionCnt

        self.steps = 0
        self.epsilon = MAX_EPSILON

        self.brain = Brain(self.stateCnt, self.actionCnt, instance_name,
                           weights)
        self.memory = Memory(MEMORY_CAPACITY, instance_name)

        self.lg.debug('Agent Initialized.')
示例#2
0
def init(oid, threshold, pport, sport):
    global log, sk, addrs, brokerAddr
    log = logger.setup_custom_logger(str(oid))
    members = {}
    oids = []

    for line in open("examples/tcp-utils/members.txt").readlines():
        lines = line.split(' ')
        id = int(lines[0])
        addr = lines[1].rstrip("\n")
        oids.append(id)
        if not brokerAddr:
            brokerAddr = addr
        addrs[id] = addr

    ctx = zmq.Context()
    sub = ctx.socket(zmq.SUB)
    sub.connect("tcp://%s:%s" % (brokerAddr, sport))
    sub.setsockopt(zmq.SUBSCRIBE, "")

    pub = ctx.socket(zmq.PUB)
    pub.connect("tcp://%s:%s" % (brokerAddr, pport))

    socket = ctx.socket(zmq.REP)
    socket.bind("tcp://*:%s" % oid)

    poller = zmq.Poller()
    poller.register(socket, zmq.POLLIN)
    poller.register(sub, zmq.POLLIN)

    end = False

    log.info("%d: Started communications..." % oid)
    while not end:
        socks = dict(poller.poll())
        if sub in socks and socks[sub] == zmq.POLLIN:
            msg = sub.recv()
            type = msg.split("_", 1)[0]
            if type == "setup":
                setup(members, oid, oids, threshold)
            elif type == "consensus" and sk is not None:
                pub.send(genNewSig(oid))
            elif type == "sig":
                handleSig(json.loads(msg.split("_", 1)[1]), threshold, members)

        if socket in socks and socks[socket] == zmq.POLLIN:
            msg = json.loads(socket.recv())
            topic = msg["topic"]
            if topic == "contrib":
                receiveContribution(msg, members, oid)
                socket.send("OK")
示例#3
0
    def __init__(self, dp, instance_name=''):
        """Class to manage preprocessed data
        """

        self.lg = setup_custom_logger('DataManager: ' + instance_name,
                                      instance_name)
        self.lg.info('DataManager Initializing...')

        self.dp = dp
        self.data = dp.clean_df
        self.state = dp.state
        self.pairs = self.data.pair.unique()
        self.training_columns = self.get_training_columns()

        self.lg.info('DataManager Initialized!')
示例#4
0
    def __init__(self, stateCnt, actionCnt, instance_name='', weights_h5=None):
        """Class to create and train a neural network for prediction.

        Args:
            stateCnt (int): The size of the input state vector
            actionCnt (int): The size of the output action vector
            weights_h5 (string): Filename of h5 file to load from,
                None if to not load from any file.

        """

        self.lg = setup_custom_logger('Brain: ' + instance_name, instance_name)

        self.stateCnt = stateCnt
        self.actionCnt = actionCnt

        self.model = self._createModel(weights_h5)
示例#5
0
    def __init__(self, data, ds, instance_name='', render_figures=True):
        """Class to manage an agent to train it's Brain

        Args:
            df (DataFrame): the training data

        """

        self.lg = setup_custom_logger('Environment: ' + instance_name,
                                      instance_name)
        self.lg.debug('Environment initializing...')
        self.instance_dir = 'data/instances/' + instance_name + '/'

        self.actions = {0: 'pos_short', 1: 'pos_neutral', 2: 'pos_long'}
        self.state_variables = ds.columns

        df = data.join(ds)

        df = df[~df.isnull().any(axis=1)]
        # init portfolio
        df['portfolio'] = STARTING_PORTFOLIO
        df.pos_short = 0
        df.pos_neutral = 0
        df.pos_long = 0

        self.close_col = df.columns.get_loc('close')
        self.open_col = df.columns.get_loc('open')
        self.next_col = df.columns.get_loc('next_open')
        self.pf_col = df.columns.get_loc('portfolio')

        self.state = np.asarray(df.iloc[0][self.state_variables])

        self.df = df
        self.current_datetime = self.df.index[0]
        self.canvas = {'portfolio': [], 'next_open': [], 'index': []}

        self.render_figures = render_figures
        self.lg.debug('Rendering Figures: ' + str(self.render_figures))
        if self.render_figures:
            plt.ion()
            self.fig, self.ax = plt.subplots()

        self.lg.debug('Environment initialized.')
示例#6
0
    def __init__(self, apikey, agent_id, frames_per_state=1, host=None):

        # PPO agent seems to learn that it needs to speed around the environment to collect rewards
        self._agent = PPOAgent(
            states_spec=dict(type='float', shape=(frames_per_state * 25, )),
            actions_spec=dict(type='float',
                              shape=(3, ),
                              min_value=np.float32(-1.0),
                              max_value=np.float32(1.0)),
            network_spec=[
                dict(type='dense', activation='relu', size=128),
                dict(type='dense', activation='relu', size=128),
            ],
            optimization_steps=5,
            # Model
            scope='ppo',
            discount=0.99,
            # DistributionModel
            distributions_spec=None,
            entropy_regularization=0.01,
            # PGModel
            baseline_mode=None,
            baseline=None,
            baseline_optimizer=None,
            gae_lambda=None,
            # PGLRModel
            likelihood_ratio_clipping=0.2,
            summary_spec=None,
            distributed_spec=None,
            batch_size=2048,
            step_optimizer=dict(type='adam', learning_rate=1e-4))

        self._logger = setup_custom_logger("Controller")

        self._frame_count_per_episode = 0
        self._total_frames = 1
        self._frames_per_state = frames_per_state

        self._client = AsyncClient(apikey, agent_id,
                                   self._train_state_callback, host)

        self._state_stack = StateStack(self._frames_per_state)
示例#7
0
    def __init__(self, instance_name='', prepare_data=True):
        """Class to create, store, and fetch data
        """
        self.lg = setup_custom_logger('DataProcessor: ' + instance_name,
                                      instance_name)
        self.lg.info('DataProcessor Initializing...')

        if prepare_data:
            if os.path.isfile('data/master.pickle'):
                self.lg.info('Loading Master Data')
                self.clean_df = self.load_data('data/master.pickle')
            else:
                self.lg.error('data/master.pickle is required')
                raise FileNotFoundError('data/master.pickle is required')

            if os.path.isfile('data/state_space.pickle'):
                self.lg.info('Loading State Data')
                self.state = self.load_data('data/state_space.pickle')
            else:
                self.lg.info('Generated State Data')
                self.state = self.prepare_state_space()

        self.lg.info("DataProcessing Initialized!")
示例#8
0
    )  # this line makes sure the file exists; if it doesn't, it creates the file.
    with open(filename, 'r') as f:
        raw_data = f.readlines()

    # The list that readlines() provides contains newline characters: '\n'
    # Wuwuro doesn't view a string with a newline and a string without AS the same
    # So we have to remove the newline characters for every item in the list
    processed_data = []
    for i in range(len(raw_data)):
        processed_data.append(raw_data[i].replace('\n', ''))
    return processed_data


''' Start of the program '''
app_version = '2.1.0'
logger = setup_custom_logger(app_version)
bot_name = ('Wuwuro Bot %s' % app_version
            )  # This will appear as our User-Agent

print(bot_name + '\n')
''' These are the changeable variables '''
save_path = ''  # !!!MUST create a folder with the novel name
novel_url = ''  # !!!MUST copy the whole URL here. EXCEPT the chapter number
backup_url = ''  # Sometimes the URL changes format so it's important to have the backup URL
start_at_chap = 0  # * Change this based on where you want to start
end_at_chap = 0  # * Change this based on where you want to END
''' END of changeable variables '''

to_downloads_amount = end_at_chap - start_at_chap
downloaded_amount = 0
''' Starting the scraper '''
from picamera.array import PiRGBArray
#from pyimagesearch.tempimage import TempImage
from logger import setup_custom_logger

#####################################################################################

# Construct the argument parser and parse the arguments.
ap = argparse.ArgumentParser()
ap.add_argument("-c",
                "--conf",
                required=True,
                help="path to the configuration file")
args = vars(ap.parse_args())

# Initialize the logger.
logger = setup_custom_logger("main", "logs")

# Filter warnings, load the configuration and initialize the <EXTERNAL FILE STORAGE>.
warnings.filterwarnings("ignore")
conf = json.load(open(args['conf']))
client = None
if conf['use_dropbox']:
    client = dropbox.Dropbox(conf['dropbox_access_token'])
    logger.info("SUCCES - dropbox account linked")

# Initialize the camera and grab a reference to the raw camera capture.
camera = PiCamera()
camera.resolution = tuple(conf['resolution'])
camera.framerate = conf['fps']
raw_capture = PiRGBArray(camera, size=tuple(conf['resolution']))
示例#10
0
文件: run.py 项目: embod/andromeda-tf
import argparse
from logger import setup_custom_logger
from Controller import Controller

if __name__ == "__main__":

    parser = argparse.ArgumentParser(
        description='Mbot control using Deep Reinforcement Learning.')
    parser.add_argument('-p',
                        required=True,
                        dest='apikey',
                        help='Your embod.ai API key')
    parser.add_argument('-a',
                        required=True,
                        dest='agent_id',
                        help='The id of the agent you want to control')
    parser.add_argument('-H',
                        default="wss://api.embod.ai",
                        dest='host',
                        help="The websocket host for the environment")

    args = parser.parse_args()
    setup_custom_logger("root")

    controller = Controller(args.apikey, args.agent_id, 5, args.host)

    controller.train(2000000)
示例#11
0
import atexit
from selenium import webdriver
from logger import setup_custom_logger

logger = setup_custom_logger('driverbuilder')


def _enable_download_in_headless_chrome(driver: webdriver, download_dir: str):
        """
        headless does not allow file download: https://bugs.chromium.org/p/chromium/issues/detail?id=696481

        크롬 드라이버는 크롤링을 이용한 해킹을 예방하기 위해 기본적으로 headless 모드에서 다운로드를 지원하지 않는다.
        전문을 먼저 보내야 사용이 가능. 자세한 내용 위 링크 참조

        :param driver: 크롬 드라이버 인스턴스
        :param download_dir: 파일 다운로드 경로
        """
        driver.command_executor._commands["send_command"] = ("POST", '/session/$sessionId/chromium/send_command')

        params = {
            'cmd': 'Page.setDownloadBehavior',
            'params': {
                'behavior': 'allow',
                'downloadPath': download_dir
            }
        }
        driver.execute("send_command", params)


def _close_chrome(chrome: webdriver):
    """
示例#12
0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 14:30:49 2019

@author: jordip
"""

import sys
import hashlib
import consensus
import utils, logger

logger.setup_custom_logger('Consensus')

DKG_NUMBER_PARTICIPANTS = 100

#Make the random selection
random_no = utils.compress_random_no_to_int(
    '0xfab206a4186845ff0f0192fd06be977971a7dedbf9c22173cc38d23625aac2a7', 16)
random_no_string = '0xfab206a4186845ff0f0192fd06be977971a7dedbf9c22173cc38d23625aac2a7'

#Load master addresses
#List all addresses at the moment in the chain
all_addresses = []
try:
    all_addr = open('dataset/master-all-addrs-db-order.txt', 'r')
except Exception as e:
    print e
    sys.exit(1)
示例#13
0
import sys
import os
import time
import zipfile
import glob
import shutil
from datetime import datetime
from selenium.webdriver.common.keys import Keys

from logger import setup_custom_logger
from chromedriver import generate_chrome
from xlsxhandler import get_dir_update_info, get_file_diff_info_list
from slackhandler import Slack, gen_total_file_update_info_text, gen_diff_row_info_text

logger = setup_custom_logger('main.py')
logger.debug('Run crawler!!!!')
PROJECT_DIR = str(os.path.dirname(os.path.abspath(__file__)))
DOWNLOAD_DIR = f'{PROJECT_DIR}/download'

driver_path = f'{PROJECT_DIR}/lib/webDriver/'
platform = sys.platform
if platform == 'darwin':
    logger.debug('System platform : Darwin')
    driver_path += 'chromedriverMac'
elif platform == 'linux':
    logger.debug('System platform : Linux')
    driver_path += 'chromedriverLinux'
elif platform == 'win32':
    logger.debug('System platform : Window')
    driver_path += 'chromedriverWindow'
else:
示例#14
0
APP_NAME = "startup_mailer"
LOG_DIRECTORY = Path(f"./logs")
LOG_DIRECTORY.mkdir(parents=True, exist_ok=True)

GMAIL_USERNAME = os.getenv("GMAIL_USERNAME")
GMAIL_PASSWORD = os.getenv("GMAIL_PASSWORD")
RECIPIENTS = os.getenv("STARTUP_RECIPIENTS").replace(" ", "").split(",")
DEFAULT_RECIPIENTS = ["*****@*****.**"]

if os.path.exists(LOG_DIRECTORY):
    initial_message = f"{APP_NAME} started."
else:
    initial_message = f"{APP_NAME} started, but the logging directory, " \
                      f"{LOG_DIRECTORY}, does not exist."
    LOG_DIRECTORY = Path(__file__).resolve()
LOGGER = setup_custom_logger(APP_NAME, LOG_DIRECTORY)
LOGGER.info(initial_message)


def get_ip() -> str:
    """
    Returns the IP address for the Raspberry Pi.
    :return: IP address string.
    """
    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    s.connect(("8.8.8.8", 53))
    return s.getsockname()[0]


def system_status_information_as_html() -> str:
    """
from azure.storage.queue import QueueService
import sys
import json
import datetime
import socket
import pprint

import logger

log = logger.setup_custom_logger('consumer')

try:
    with open('config.json') as config_file:
        config = json.load(config_file)
except FileNotFoundError:
    print("No config.json set")
    exit(1)

print("Getting meta from: " + config['queueName'])

queue_service = QueueService(account_name=config['accountName'],
                             account_key=config['accountKey'])

metadata = queue_service.get_queue_metadata(config['queueName'])

log.info(metadata.approximate_message_count)
示例#16
0
import time
from transactions import Transaction
import copy
from config import Env
from chain import Chain
from genesis_helpers import mk_genesis_data
from apply import validate_transaction, apply_transaction
from keystore import Keystore
import rlp
import netaddr
from netaddr import IPNetwork, IPAddress, IPSet
from utils import address, normalize_address
from chain_service import ChainService
import logger

logger.setup_custom_logger('Database')

print "Loading the environment..."
db = LevelDB("./chain")
env = Env(db)

print "Loading chain..."
chain = ChainService(env)

print "Loading keystores..."

add1 = "094a2c9f5b46416b9b9bd9f1efa1f3a73d46cec2"
add2 = "7719818983cb546d1badee634621dad4214cba25"
add3 = "a3e04410f475b813c01ca77ff12cb277991e62d2"

ks1 = Keystore.load("./keystore/094a2c9f5b46416b9b9bd9f1efa1f3a73d46cec2",
示例#17
0
import pandas as pd
from models import FileDiffInfo
from logger import setup_custom_logger

logger = setup_custom_logger('xlsxhandler')


def _compare_file_list(compare_list: list, compare_target_list) -> list:
    """
    두 파일 리스트를 비교하여 다른 파일 정보가 있다면
    해당 파일 이름을 리스트에 담아 리턴한다.

    :param compare_list: 비교할 리스트
    :param compare_target_list: 비교 대상 리스트
    :return: 다른 항목 리스트
    """
    result_list = []
    for name in compare_list:
        if name not in compare_target_list:
            result_list.append(name)

    return result_list


def get_dir_update_info(before_xlsx_path_list: list, after_xlsx_path_list: list) -> (list, list):
    """
    이전 파일 리스트와 현재 파일리스트를 비교하여
    삭제된 파일과 추가된 파일을 파악하여 반환

    :param before_xlsx_path_list: 이전 파일경로 리스트
    :param after_xlsx_path_list: 업데이트 후 파일경로 리스트
示例#18
0
    def __init__(self, capacity, instance_name=''):
        self.capacity = capacity

        self.lg = setup_custom_logger('Memory: ' + instance_name,
                                      instance_name)
        self.lg.debug('Memory initializing...')
示例#19
0
def init_logger():
    logger.setup_custom_logger('Main')
    logger.setup_custom_logger('Database')
    logger.setup_custom_logger('P2P')
    logger.setup_custom_logger('OOR')
    logger.setup_custom_logger('Consensus')
    logger.setup_custom_logger('Parser')
示例#20
0
            log.info("COMMANDS:" + content)
            if actions != None and actions['data'] != None and actions['data'][
                    'actionRequests'] != None:
                log.info("actionRequests: " +
                         json.dumps(actions['data']['actionRequests']))
                for i in actions['data']['actionRequests']:
                    command_processor.commandQueue.put(i)

    except Exception as inst:
        noop = None
        traceback.print_exc()


if __name__ == '__main__':

    log = logger.setup_custom_logger('main')

    log.info("STARTING MAIN MODULE")

    httplib2.debuglevel = 0
    http = httplib2.Http(timeout=HTTP_TIMEOUT)
    content_type_header = "application/json"

    #parse args

    parser = argparse.ArgumentParser()
    parser.add_argument("--config", default="/home/pi/main.cfg")
    args = parser.parse_args()

    cfg = args.config
示例#21
0
from oauth2client import tools
from oauth2client.file import Storage
import json
import datetime
from datetime import timedelta
from bs4 import BeautifulSoup
import requests
from dateutil import parser
import pytz
try:
    import argparse
    flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
    flags = None

logger = l.setup_custom_logger(__name__)

SCOPES = 'https://www.googleapis.com/auth/calendar'
CLIENT_SECRET_FILE = 'configs/client_secret.json'
APPLICATION_NAME = 'Google Calendar API Python Quickstart'


def get_credentials():
    home_dir = os.path.expanduser('~')
    credential_dir = os.path.join(home_dir, '.credentials')
    if not os.path.exists(credential_dir):
        os.makedirs(credential_dir)
    credential_path = os.path.join(credential_dir,
                                   'calendar-python-quickstart.json')

    store = Storage(credential_path)
示例#22
0
import os
import numpy as np
import glob
from logger import setup_custom_logger
from data import Data
from image_encoder import ImageEncoder
import boto3
from botocore.exceptions import ClientError
import zlib

log = setup_custom_logger('similarity')
LOCAL_PATH = 'live_products/'
TOTAL_SIMILARITIES_PER_PRODUCT = 1500
MIN_SIMILARITIES_EXPECTED = 100

STORAGE_SESSION = boto3.session.Session()
STORAGE_CLIENT = STORAGE_SESSION.client('s3',
                    region_name=os.environ.get('AWS_REGION_NAME'),
                    endpoint_url=os.environ.get('AWS_S3_ENDPOINT_URL'),
                    aws_access_key_id=os.environ.get('AWS_ACCESS_KEY_ID'),
                    aws_secret_access_key=os.environ.get('AWS_SECRET_ACCESS_KEY')
)
SIMILARITY_BUCKET = os.environ.get('AWS_SIMILARITY_BUCKET_NAME')

def exists(file):
    return os.path.isfile(LOCAL_PATH + os.path.basename(file))

def download_from_s3(file, path_to_local_file):
    STORAGE_CLIENT.download_file(SIMILARITY_BUCKET, file, path_to_local_file)    

def download(file):