def __init__(self, config_file='/config/switch.txt'):
     config_loader = ConfigurationLoader(config_file)
     configs = config_loader.load_configuration('mqtt_broker', 'mqtt_topic',
                                                'mqtt_id', 'switch_pin',
                                                'switch_update_period')
     self.config_file = config_file
     self.switch_update_period = int(configs['switch_update_period'])
     self.mqtt_client = MQTTClient(configs['mqtt_id'],
                                   configs['mqtt_broker'])
     self.mqtt_client.DEBUG = True
     self.mqtt_topic = configs['mqtt_topic']
     self.switch_pin_num = int(configs['switch_pin'])
     self.switch_pin = Pin(self.switch_pin_num, Pin.IN)
     self.id = configs['mqtt_id']
     self.mqtt_broker = configs['mqtt_broker']
     self.logger = MyLogger(False)
     self.logger.log('DEBUG', self.id,
                     'Connecting to {}...'.format(self.mqtt_broker))
     try:
         self.mqtt_client.connect()
         self.logger.log('INFO', self.id,
                         'Reconnected to {}'.format(self.mqtt_broker))
     except:
         self.logger.log(
             'ERROR', self.id,
             'Connection failure to {}'.format(self.mqtt_broker))
     self.last_switch_position = self.switch_pin.value()
     self.mqtt_messages_sent = 0
     self.debounce_time = 0.5
     self.timer = None
     self.init_timer()
Esempio n. 2
0
 def __init__(self):
     self.log = MyLogger('ADB', LOG_LEVEL=logging.INFO)
     client = Client(host='127.0.0.1', port=5037)
     self.log.debug(client.version())
     devices = client.devices()
     if len(devices) == 0:
         self.log.debug("no devices")
         quit()
     self.device = devices[0]
     self.log.debug(f'updating info for {self.device}')
     number = 5
     touch_id = 0
     lines = self.device.shell('getevent -p').split("\n")
     for line in lines:
         if "/dev/input" in line:
             number = line[-1]
         if "Touch" in line:
             touch_id = number
             self.touch = f"sendevent /dev/input/event{number}"
         if "max" in line and "ABS" in line and number == touch_id:
             values = line.split(', ')
             for value in values:
                 if "max" in value:
                     self.max = int(value[4:])
                     self.log.debug(f"found max: {self.max}")
Esempio n. 3
0
    def __init__(self):

        self.global_config = parse_json(default_config_path)
        self.total_stock = self.global_config["target_stock"]
        self.total_index = self.global_config["target_index"]
        self.total_mao = self.global_config["target_mao"]
        self.logger = MyLogger("monitor.py - Stock Monitor").get_logger()
Esempio n. 4
0
 def __init__(self, device, tasklist):
     self.log = MyLogger('Crops', LOG_LEVEL=logging.INFO)
     self.device = device
     self.tasklist = tasklist
     self.data = []
     self.settings = []
     self.updateListData()
Esempio n. 5
0
    def __init__(self, map):
        self.log = MyLogger('Game')
        self.player_x = 0
        self.player_y = 0
        self.map = map
        self.current_room = self._get_room(0, 0)

        self._look_at(self.current_room)
Esempio n. 6
0
 def __init__(self, device, tasklist):
     HD.__init__(self, device, tasklist, 'money')
     self.log=MyLogger('shop', LOG_LEVEL=logging.DEBUG)
     self.tasklist.addtask(20, f"checking for items to sell", self.image, self.checkItems)
     self.shoplist=Shoplist(self)
     self.position=HD.getPos([-15,0])
     self.slots=4
     self.max_slots=4
     self.atshop=False
     self.temp_shop=self.device.loadTemplates('shop','')
	def __init__(self, loggerfile, modelfile = False):
			
		self.logger = MyLogger(loggerfile)
		self.trafficLights = ni.trafficLights
		self.linkEdges = ni.linkEdges
		if modelfile == False:
			self.model = self.createModel()
		else:
			self.logger.debug(' load model : ' + modelfile)
			self.model = torch.load(modelfile)
		self.actionMap = self.createActionMap()
Esempio n. 8
0
 def __init__(self, config_file):
     self.config_loader = ConfigurationLoader(config_file)
     configs = self.config_loader.load_configuration(
         'check_delay', 'broker', 'topic', 'mqtt_id',
         'installed_version_file', 'mqtt_logger_conf')
     self.check_delay = int(configs['check_delay'])
     self.logger = MyLogger(True, configs['mqtt_logger_conf'])
     self.mqtt_client = MQTTClient(configs['mqtt_id'], configs['broker'])
     self.mqtt_client.DEBUG = True
     self.mqtt_client.set_callback(self.read_update)
     self.mqtt_topic = configs['topic']
Esempio n. 9
0
 def __init__(self, device, tasklist, item):
     self.log = MyLogger('HD', LOG_LEVEL=logging.INFO)
     self.device = device
     self.tasklist = tasklist
     self.scheduled = False
     file = path.join('images', 'products', f'{item}.png')
     self.image = file if path.isfile(file) else path.join(
         'images', 'no_image.png')
     self.jobs = 0
     self.waiting = 0
     self.loadImages()
Esempio n. 10
0
 def __init__(self,
              image_filename,
              conf_data,
              genedict=None,
              conflict_color='green',
              mode='base64',
              logger=None):
     self.image_filename = image_filename
     self.conf_data = conf_data
     self.conflict_color = conflict_color
     self.mode = mode
     self.genedict = genedict or {}
     self.logger = logger or MyLogger(name='SVG')
Esempio n. 11
0
 def __init__(self, x, y, name, description, exits):
     """
     :param x: room x
     :param y: room y
     :param name: room name
     :param description: room description
     """
     self.x = x
     self.y = y
     self.name = name
     self.description = description
     self.exits = exits
     self.log = MyLogger('Room')
Esempio n. 12
0
	def __init__(self, config_file, networks_file):
		self.config_loader = ConfigurationLoader(config_file)
		self.wlan = network.WLAN(network.STA_IF)
		self.wlan.active(False)
		sleep(0.5)
		self.wlan.active(True)
		sleep(0.5)
		self.wlan.disconnect()
		configs = self.config_loader.load_configuration('check_delay', 'mqtt_conf_file')
		self.check_delay = int(configs['check_delay'])
		mqtt_conf_file = configs['mqtt_conf_file']
		self.logger = MyLogger(mqtt=False, mqtt_conf=mqtt_conf_file)
		self.networks_file = networks_file
Esempio n. 13
0
 def __init__(self):
     # 浏览器头
     self.headers = {
         'content-type':
         'application/json',
         'User-Agent':
         'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'
     }
     self.pattern = r'^jsonpgz\((.*)\)'
     self.total_fund = None
     self.last_update_time = None
     self.global_config = parse_json(default_config_path)
     self.total_fund_file = self.global_config["total_fund_path"]
     self.target_fund = self.global_config["target_fund"]
     self.logger = MyLogger("monitor.py - Fund Monitor").get_logger()
Esempio n. 14
0
def get_download_options():
    """ Prepares download options for the dl library
    the default config is mp3 but can be changed
    :return:
    """
    return {
        'format':
        'bestaudio/best',
        'postprocessors': [{
            'key': 'FFmpegExtractAudio',
            'preferredcodec': 'mp3',
            'preferredquality': '192',
        }],
        'logger':
        MyLogger(),
        'progress_hooks': [my_hook],
    }
Esempio n. 15
0
 def __init__(self, *args, **kwargs):
     try:
         self.logger = {}
         for name in ["MAIN", "POSSIBILITY", "SOLVER", "COMBO", "OUTPUT"]:
             self.logger[name] = MyLogger(name, logging.INFO)
         self.numRounds, self.numTables, self.tableSize = self.shape
         self.numPlayers = self.numTables * self.tableSize
         self.lowest = self.numPlayers
         self.blocks = {}
         self.scores = {}
         self.timers = {}
         self.count = 0
         self.lastcount = 0
         self.lasttime = 0
         self.level = 0
         self.start_time = 0
         self.running = False
         self.done = False
         self.show_function = self.show_default
         numBlocks = self.numPlayers - (1 + self.numRounds * 3)
         if numBlocks:
             for n in range(1, self.numPlayers + 1):
                 if numBlocks == 1:
                     blocked_player = (n + self.numPlayers // 2 -
                                       1) % self.numPlayers + 1
                     self.blocks[n] = [blocked_player]
                 else:
                     self.blocks[n] = [
                         (n + 3) % self.numPlayers + 1,
                         (n + self.numPlayers - 5) % self.numPlayers + 1
                     ]  #plus four and minus four
         self.resetScores(self.tableSize + 2)
         self.load_data()
     except Exception as e:
         print('ERROR bij het maken van Grid')
         print(e)
         self.numRounds = 0
         self.numTables = 0
         self.tableSize = 0
         self.numPlayers = 0
     finally:
         print('rounds: {}'.format(self.numRounds))
         print('tables: {}'.format(self.numTables))
         print('tablesize: {}'.format(self.tableSize))
         print('players: {}'.format(self.numPlayers))
Esempio n. 16
0
 def __init__(self, device, tasklist):
     HD.__init__(self, device, tasklist, 'board')
     self.device = device
     self.tasklist = tasklist
     self.log = MyLogger('Board', LOG_LEVEL=logging.INFO)
     self.nextcheck = 0.1
     self.image = path.join('images', 'board', 'car_button_C.png')
     self.base_template = HD.loadTemplates('board', 'base')
     self.complete_templates = HD.loadTemplates('board', 'check')
     # self.card_template=HD.loadTemplates('board','pins')
     self.product_templates = {}
     self.car = [1335, 775]
     self.bin = [1175, 780]
     self.cards = []
     for location in [[290, 290], [535, 290], [775, 290], [290, 520],
                      [535, 520], [775, 520], [290, 730], [535, 730],
                      [775, 730]]:
         self.cards.append(Card(tasklist, location))
     self.product_images = []
     self.checkImages()
     self.tasklist.addtask(self.nextcheck, 'board', self.image, self.check)
Esempio n. 17
0
import os
import random
import string
from random import randrange
from datetime import datetime, timedelta

import yfinance as yf
from elasticsearch import Elasticsearch
from elasticsearch import helpers

from logger import MyLogger

LOGGER = MyLogger(log_file="", name=__file__)
STOCKS_LIST = str(os.getenv("STOCKS", "AAPL,TSLA,MSFT")).split(",")
INDEX_1_MIN = os.getenv("INDEX_1_MIN", "us_tickers_1m")
INDEX_1_DAY = os.getenv("INDEX_1_DAY", "us_tickers_1d")
ES_HOST = os.getenv("ES_HOST", "localhost")
ES_PORT = os.getenv("ES_PORT", "9200")
ES_USERNAME = os.getenv("ES_USERNAME", "elastic")
ES_PASSWORD = os.getenv("ES_PASSWORD", "SOME_RANDOM_PASS")


def random_id_generator():
    id_size = randrange(5, 10)
    return "".join(
        random.choices(string.ascii_uppercase + string.digits, k=id_size))


def doc_generator(df, ticker_name, date_time_field, es_index):
    df_iter = df.iterrows()
    for df_idx, document in df_iter:
Esempio n. 18
0
 def setUp(self):  # Данный метод выполняется перед каждым тестом
     self.__parse_config('./config.ini')
     self.driver = webdriver.Chrome()
     self.driver.get(self.__app_link)
     self.calculator = WebCalculator(self.driver)
     self.log = MyLogger('TestCaseLog').logger()
Esempio n. 19
0
    s = self.spacer * (self.spacer_increment*(self.depth+1))
    label_line = '{s}{ti} {label}'.format(s=s, ti=self.terminator_in, label=self.label[self.depth])
    if self.put_stdout:
      print(label_line)
    if self.callback is not None:
      self.callback(label_line)
    return label_line
  def __impl_out(self, label=None):
    s = self.spacer * (self.spacer_increment*(self.depth+1))
    label_line = '{to}{s} {label}'.format(s=s, to=self.terminator_out, label=self.label[self.depth])
    if self.put_stdout:
      print(label_line)
    if self.callback is not None:
      self.callback(label_line)
    self.__decrease()
    return label_line
  def __enter__(self):
    self.__impl_in()
    return self
  def __exit__(self, ex_type, ex_value, trace):
    if ex_type is None:
      self.__impl_out()

from logger import MyLogger
if __name__ == '__main__':
  lg = MyLogger()
  with InOutProgressor('level1', callback=lg.log_inf) as pg:
    with pg.phase('test') as pg2:
      pg2.enter('aaaa')
      print('!!!!')
      pg2.exit()
Esempio n. 20
0

class adict(dict):
    ''' Attribute dictionary - a convenience data structure, similar to SimpleNamespace in python 3.3
        One can use attributes to read/write dictionary content.
    '''
    def __init__(self, *av, **kav):
        dict.__init__(self, *av, **kav)
        self.__dict__ = self


FLAGS = adict(dic)

# In[10]:

CM_logger = MyLogger("CMTest")
load_data_fast()

# In[11]:

rdm_model = RDM_Model(768, 300, 256, 0.2).cuda()
cm_model = CM_Model(300, 256, 2).cuda()
rdm_classifier = nn.Linear(256, 2).cuda()
cm_log_dir = "CMBertTrain"

# senti_save_as = '/home/hadoop/ERD/%s/sentiModel_epoch%03d.pkl' % ("BERTSubjObj/", 0)

# checkpoint = torch.load(senti_save_as)

# bert.load_state_dict(checkpoint['bert'])
Esempio n. 21
0
                                                                            sum_loss, sum_acc,
                                                                            ))
                    writer.add_scalar('Valid Loss', loss, step%1000)
                    writer.add_scalar('Valid Accuracy', acc, step%1000)
                    save_as = '/home/hadoop/ERD/BERTTwitter/epoch%03d_%.4f.pkl' % (step%1000, sum_acc)
                    torch.save(senti_model.state_dict(), save_as)
                    sum_acc = 0.0
                    sum_loss = 0.0
                    


# In[64]:


from logger import MyLogger
logger = MyLogger("BERTTwitter")
Train(bb, train_reader, valid_reader, test_reader, 768, logger, "BERTTwitter")


# In[ ]:


# # Each architecture is provided with several class for fine-tuning on down-stream tasks, e.g.
# BERT_MODEL_CLASSES = [BertModel, BertForPreTraining, BertForMaskedLM, BertForNextSentencePrediction,
#                       BertForSequenceClassification, BertForMultipleChoice, BertForTokenClassification,
#                       BertForQuestionAnswering]

# # All the classes for an architecture can be initiated from pretrained weights for this architecture
# # Note that additional weights added for fine-tuning are only initialized
# # and need to be trained on the down-stream task
# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
Esempio n. 22
0
	def __init__(self, logfile):
		self.log = MyLogger(logfile)
		self.createModel()
Esempio n. 23
0
# coding: utf-8
from collections import deque
from ERDModel import RL_GRU2
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from dataUtils import *
from logger import MyLogger
os.chdir("/home/hadoop/ERD")

tf.logging.set_verbosity(tf.logging.ERROR)

logger = MyLogger("ERDMain")


def df_train(sess, summary_writer, mm, t_acc, t_steps, new_data_len=[]):
    sum_loss = 0.0
    sum_acc = 0.0
    ret_acc = 0.0
    init_states = np.zeros([FLAGS.batch_size, FLAGS.hidden_dim],
                           dtype=np.float32)

    for i in range(t_steps):
        if len(new_data_len) > 0:
            x, x_len, y = get_df_batch(i, new_data_len)
        else:
            x, x_len, y = get_df_batch(i)
        feed_dic = {
            mm.input_x: x,
            mm.x_len: x_len,
            mm.input_y: y,
Esempio n. 24
0
def test_tensorboard():
    from logger import MyLogger
    mylogger = MyLogger('./log')
    input1 = tf.constant([1.0, 2.0, 3.0], name='input1')
    input2 = tf.Variable(tf.random_uniform([3]), name='inout2')
    output = tf.add_n([input1, input2], name='add')
Esempio n. 25
0
def test_discriminator():
    from logger import MyLogger
    mylogger = MyLogger('./log')

    configs = config.configs['gail']
    print(tf.get_default_session())
    with tf.Session() as sess:
        d = Discriminator(arch_params=configs.discriminator_params,
                          stddev=0.02)
        s1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]).reshape([-1, 4])
        a1 = np.array([[1, -1], [1, -1]]).reshape([-1, 2])

        s2 = np.array([8, -6, 5, 7]).reshape([-1, 4])
        a2 = np.array([1, 1]).reshape([-1, 2])

        is_training = tf.placeholder(tf.bool)
        e_s = tf.placeholder(dtype=tf.float32,
                             shape=list(s1.shape),
                             name='e_s')
        e_a = tf.placeholder(dtype=tf.float32,
                             shape=list(a1.shape),
                             name='e_a')
        g_s = tf.placeholder(dtype=tf.float32,
                             shape=list(s2.shape),
                             name='g_s')
        g_a = tf.placeholder(dtype=tf.float32,
                             shape=list(a2.shape),
                             name='g_a')
        # print([None]+list(s1.shape))

        e_output = d(state=e_s, action=e_a, is_training=is_training)
        g_output = d(state=g_s,
                     action=g_a,
                     is_training=is_training,
                     reuse=True)

        discriminator_loss = -tf.reduce_mean(
            tf.log(e_output + configs.epsilon) +
            tf.log(1 - g_output + configs.epsilon))

        # # tf.GraphKeys.UPDATE_OPS ,tf.GraphKeys.TRAINABLE_VARIABLES
        # with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        #     discriminator_train_step = tf.train.AdamOptimizer(configs.learning_rate, configs.beta1,
        #                                                       configs.beta2).minimize(discriminator_loss)

        discriminator_train_step = tf.train.AdamOptimizer(
            configs.learning_rate, configs.beta1, configs.beta2).minimize(
                discriminator_loss,
                var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                           scope='discriminator'))

        tf.global_variables_initializer().run(session=sess)

        for i in range(100):
            eoutput, goutput, _, dloss = sess.run([
                e_output, g_output, discriminator_train_step,
                discriminator_loss
            ],
                                                  feed_dict={
                                                      e_s: s1,
                                                      e_a: a1,
                                                      g_s: s2,
                                                      g_a: a2,
                                                      is_training: False
                                                  })
            s1 += 0
            a1 += 0
            mylogger.write_summary_scalar(iteration=i,
                                          tag='dloss',
                                          value=dloss)
            mylogger.write_summary_scalar(iteration=i, tag='loss', value=dloss)
Esempio n. 26
0
import random
import string
import datetime
import logging
import colorlog

from UI_controller import UI_controller
from init import main as init
from uiauto.manifest import Manifest
from pymodel import pmt
from util import start_android_emulator, revoke_at, term_by_port,\
                fetch_uiaction, launch_appium, launch_mitmdump
from logger import MyLogger
import tools

running_logger = MyLogger('Tester').get_logger()
apkfile = ''


def set_state(appium_port,
              proxy_port,
              system_port,
              current_user,
              package,
              launcher_activity,
              version,
              idp_name,
              ui_support,
              reset=False,
              emulator_name=None,
              snapshot_tag=None):
Esempio n. 27
0
#!/usr/bin/env python
# coding: utf-8

# In[8]:

import dataloader

# In[4]:
from logger import MyLogger

import time
from SentCNN import *

logger = MyLogger("SentTrain")


def get_curtime():
    return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))


sent_mm = SentCNN(300, 256, 41, 3)

# In[5]:

import tensorflow as tf
sent_global_step = tf.Variable(0, name="global_step", trainable=False)
sent_train_op = tf.train.AdagradOptimizer(0.01).minimize(
    sent_mm.loss, sent_global_step)

# In[1]:
Esempio n. 28
0
from logger import MyLogger

logger = MyLogger().logger


def test():
    logger.info('teset here')


if __name__ == '__main__':
    import rospy
    from std_msgs.msg import Int32, Empty

    rospy.init_node('build_classifier')
    rospy.Subscriber('/exercise/mode', Int32, test)

    print "Classifier launched. Listening to message..."
    logger.info('Classifier launched. Listening to message...')

    logger.info('main log')
    test()
    rospy.spin()
Esempio n. 29
0
                    "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s",
                    line)
            self.mydb.commit()
        logger.info("Finished processing {}".format(filename))
        self.update_set(filename)

    def process_folder(self):
        for file in os.listdir(self.watchedFilePath):
            file = os.path.join(self.watchedFilePath, file)
            if file not in self.list_of_processed:
                self.process_file(file)
        self.mydb.cursor().close()


if __name__ == '__main__':
    logger_obj = MyLogger()
    logger = logger_obj.get_logger()
    sys.excepthook = logger_obj.handle_exception
    parser = arp.ArgumentParser(prog='csvWatcher')
    parser.add_argument('-d',
                        '--dir-path',
                        help='Path to watched dir',
                        required=True)
    parser.add_argument('-c',
                        '--creds-path',
                        help='Path to config file',
                        required=True)
    args: arp.Namespace = parser.parse_args()
    watcher_props = CsvWatcherProps(args.dir_path, args.creds_path)
    watcher_props.process_folder()
Esempio n. 30
0
if __name__ == '__main__':
    model_number = None

    episodes = 1000
    max_step_per_episode = 100

    date = ''
    # create env and agent
    env = Myindex()  # 环境

    state_dim = 3 * len(env.columns)
    action_dim = 3 * len(env.columns)

    agent = DQN(state_dim, action_dim,
                MyLogger(path='../data/qindex_train_dqn_log' + date + '.txt'),
                model_number)

    agent.logger.write(
        ['state_dim = ', state_dim, ', action_dim = ', action_dim, '\n'])

    print('start=-=-=-=-=-=-=')

    rewards_exp = list()

    for episode in range(episodes):
        print("episode:", episode)
        episode_start_time = time.time()

        # 每个episode开始时初始化环境, 初始化该episode的信息
        state = env.reset()  # 第0轮需要手动删除所有表, 因为reset无法检测到数据库实际有多少表, 只能跟踪已经插入多少表