Example #1
0
    def __init__(self, _config):
        self.logger = logger
        self.logger.critical('Object up: %s' % str(self))

        self.__data_prvider = DataProvider(_config['database_path'])
        self.__cmd_server = IPC_Server(_config['socket_file_path'])
        self.__cmd_server.register_cmd('GET_DESC', self.cmd_desc)
        self.__cmd_server.register_cmd('GET_MONTHLY', self.cmd_monthly_data)
        self.__cmd_server.register_cmd('GET_YEARLY', self.cmd_yearly_data)
        self.__cmd_server.register_cmd('GET_DAILY', self.cmd_daily_data)
        self.__cmd_server.register_cmd('GET_DATATREE', self.cmd_datatree)
        self.__cmd_server.register_cmd('GET_NEWEST', self.cmd_newest)
        self.__cmd_server.register_cmd('SET_SAMPLE', self.cmd_store_sample)
        self.__cmd_server.register_cmd('SET_DESC', self.cmd_station_register)

        # Command registred, just start
        self.__cmd_server.start()

        self.dataCatch = []
        for broker in _config['mqtt']:
            catcher = MqttCatcher(_config['socket_file_path'],
                                  broker[0],
                                  broker[1],
                                  _topic_prefix=broker[2],
                                  _protocol=broker[3])
            catcher.start()
            self.dataCatch.append(catcher)
Example #2
0
 def handle(self):
     data = DataProvider()
     while True:
         try:
             self.request.send(data.getData())
         except:
             # Client closed/lost connection
             break
         time.sleep(1.0/15.0)
def main():
    GPIO.setwarnings(False)
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(4, GPIO.OUT)
    GPIO.output(4, GPIO.LOW)
    time.sleep(3)
    GPIO.cleanup()

    data_provider = DataProvider(DB_PATH)

    configuration = WeightProcessorConfiguration(MAX_PAUSE_BETWEEN_MORNING_CHECKS_IN_DAYS,
                                                 MAX_WEIGHT_DIFF_BETWEEN_MORNING_CHECKS,
                                                 USERS_MAX_W_DIFF,
                                                 MORNING_HOURS)

    user_provider = UserProvider(USERS)
    weight_processor = WeightProcessor(data_provider,
                                       configuration,
                                       user_provider,
                                       FitbitConnector(FITBIT_CLIENT_ID, FITBIT_CLIENT_SECRET, user_provider))

    events_processor = EventProcessor(weight_processor)
    board = Wiiboard(events_processor)

    if len(sys.argv) == 1:
        logging.debug("Discovering board...")
        address = board.discover()
    else:
        address = sys.argv[1]

    logging.debug("Trying to connect...")
    board.connect(address)  # The wii board must be in sync mode at this time
    board.set_light(False)

    while 1 == 1:
        events_processor.reset()
        board.receive()

        weight = events_processor.weight + 2

        weight_record = WeightRecord({'year': datetime.today().year,
                                      'month': datetime.today().month,
                                      'day': datetime.today().day,
                                      'w': weight})

        user = weight_processor.get_user_by_weight(weight)
        display.render(str(weight), WHITE, safe_text(user))
        weight_processor.process(weight_record)
        display.render_graph(data_provider.all_mornings(user))

        board.set_light(False)
        logging.debug('Ready for next job')
Example #4
0
 def __init__(self):
   super().__init__()
   self.cl = ClientList(self)
   self.data = DataProvider(self)
   self.vcdj = Vcdj(self)
   self.nfs = NfsClient(self)
   self.keepalive_ip = "0.0.0.0"
   self.keepalive_port = 50000
   self.beat_ip = "0.0.0.0"
   self.beat_port = 50001
   self.status_ip = "0.0.0.0"
   self.status_port = 50002
   self.need_own_ip = OwnIpStatus.notNeeded
   self.own_ip = None
Example #5
0
def main():
    model = DeepDP(training['model_cfg']['input_dim'], training['model_cfg']['hidden_size'], \
          training['model_cfg']['num_factors'], training['model_cfg']['latent_dim'])

    dataprovider = DataProvider(training['sample_size'],
                                training['model_cfg']['input_dim'])
    optimizer = get_optimizer(model)
    for i in range(training['epochs']):
        run_epoch(model, optimizer, dataprovider, i)
    meta_file_loc = os.path.join(training['result_dir'], 'metafile.pkl')
    with open(meta_file_loc, 'w+') as f:
        pickle.dump(training, f)
Example #6
0
class MarketPlace:
    def __init__(self, robots: ['Robot'], ticker: str):
        self.data_provider = DataProvider()
        self.robots = robots
        self.accountant = Accountant(robots)
        self.order_book = OrderBook(ticker)

    def training(self, data: 'pd.DataFrame'):
        for robot in self.robots:
            robot.train(data)

    def trading(self):
        orders = []
        data_from_order_book = self.order_book.get_data()
        robots_orders = self.order_book.get_robots_orders()
        for robot in self.robots:
            data_from_accountant = self.accountant.get_data(robot.username)
            robot.update(data_from_order_book, robots_orders[robot.username],
                         data_from_accountant)
            robot.on_tick()
            orders += robot.gather_new_orders()
        self.order_book.register_orders(orders)
        trades = self.order_book.get_last_trades()
        self.accountant.register_trades(trades)

    def start(self, path: str):
        #training_data = self.data_provider.get_training_data()
        #self.training(training_data)
        self.data_provider.prepare_for_trading(path)
        i = 0
        total_orders = 0
        for orders in self.data_provider:
            i += 1
            total_orders += len(orders)
            #try:
            self.order_book.register_orders(orders)
            print(i)
        print(self.order_book.total_deals)
        print(total_orders)
Example #7
0
def main(path, is_training, is_predicting, model_weights_file,
         submission_file):
    print('Starting train_statefarm.py')
    print('* using path: {0}'.format(path))
    print('* training: {0}, predicting: {1}'.format(is_training,
                                                    is_predicting))

    batch_size = 64
    data_provider = DataProvider(os.path.join(path, 'partition'), batch_size)
    feature_provider = FeatureProvider(data_provider)
    training_data_provider = TrainingDataProvider(data_provider,
                                                  feature_provider)

    builder = ModelBuilder(training_data_provider,
                           dropout=0.6,
                           batch_size=batch_size)

    if is_training:
        print('Train last layer of dense model with batch normalization.')
        builder.train_last_layer()

    if is_training:
        print('Train dense layers of model with batch normalization.')
        builder.train_dense_layers()

    model = builder.build(data_provider)

    if not is_training:
        print('Loading model weights from {0}'.format(model_weights_file))
        model.load_weights(
            data_provider.get_weight_filepath(model_weights_file))
    else:
        model.train()
        print('Writing model weights to {0}'.format(model_weights_file))
        model.save_weights(
            data_provider.get_weight_filepath(model_weights_file))

    if is_predicting:
        print('Writing predictions to {0}'.format(submission_file))
        batch_size = 2
        data_provider = DataProvider(path, batch_size)
        predict_states(model, data_provider, batch_size, submission_file)
Example #8
0
class DatabaseBot(object):
    ''' DatabaseBot '''
    def __init__(self, _config):
        self.logger = logger
        self.logger.critical('Object up: %s' % str(self))

        self.__data_prvider = DataProvider(_config['database_path'])
        self.__cmd_server = IPC_Server(_config['socket_file_path'])
        self.__cmd_server.register_cmd('GET_DESC', self.cmd_desc)
        self.__cmd_server.register_cmd('GET_MONTHLY', self.cmd_monthly_data)
        self.__cmd_server.register_cmd('GET_YEARLY', self.cmd_yearly_data)
        self.__cmd_server.register_cmd('GET_DAILY', self.cmd_daily_data)
        self.__cmd_server.register_cmd('GET_DATATREE', self.cmd_datatree)
        self.__cmd_server.register_cmd('GET_NEWEST', self.cmd_newest)
        self.__cmd_server.register_cmd('SET_SAMPLE', self.cmd_store_sample)
        self.__cmd_server.register_cmd('SET_DESC', self.cmd_station_register)

        # Command registred, just start
        self.__cmd_server.start()

        self.dataCatch = []
        for broker in _config['mqtt']:
            catcher = MqttCatcher(_config['socket_file_path'],
                                  broker[0],
                                  broker[1],
                                  _topic_prefix=broker[2],
                                  _protocol=broker[3])
            catcher.start()
            self.dataCatch.append(catcher)

    def cmd_station_register(self, _json_param):
        logger.info('cmd_station_register()')
        param = json.loads(_json_param)
        self.logger.info('Got register data: %s' % str(param))
        self.__data_prvider.stationRegister(param['uid'], param['desc'])
        return json.dumps('OK')

    def cmd_store_sample(self, _json_param):
        logger.info('cmd_store_sample()')
        param = json.loads(_json_param)
        self.logger.info('Got sample data: %s' % str(param))
        self.__data_prvider.stationSampleStore(param['sample'])
        return json.dumps('OK')

    def cmd_monthly_data(self, _json_param):
        logger.info('cmd_monthly_data()')
        param = json.loads(_json_param)
        req_data = self.__data_prvider.getSerieMonthlyData(
            param["uid"], param["serie"], param["date"])
        return json.dumps(req_data)

    def cmd_yearly_data(self, _json_param):
        logger.info('cmd_yearly_data()')
        param = json.loads(_json_param)
        req_data = self.__data_prvider.getSerieYearlyData(
            param["uid"], param["serie"], param["date"])
        return json.dumps(req_data)

    def cmd_daily_data(self, _json_param):
        logger.info('cmd_daily_data()')
        param = json.loads(_json_param)
        req_data = self.__data_prvider.getSerieDayData(param["uid"],
                                                       param["serie"],
                                                       param["date"])
        return json.dumps(req_data)

    def cmd_datatree(self, _json_param):
        logger.info('cmd_datatree()')
        req_data = self.__data_prvider.getStationsDateTree()
        return json.dumps(req_data)

    def cmd_newest(self, _json_param):
        logger.info('cmd_newest()')
        req_data = self.__data_prvider.getStationsNewest()
        return json.dumps(req_data)

    def cmd_desc(self, _json_param):
        logger.info('cmd_desc()')
        req_data = self.__data_prvider.getStationsDesc()
        return json.dumps(req_data)

    def proc(self):
        logger.info('proc()')
        while True:
            time.sleep(1)
Example #9
0
import tensorflow as tf
from architectures.pretrained_encoder import encoder_model
from architectures.latent_space import latent_space
from architectures.decoder import decoder_model
from dataprovider import DataProvider
import os
epochs = 50
batch_size = 20
latent_units = 200
l_rate = 0.0001

# data
data_provider = DataProvider(batch_size, root_folder='../data')
train_num_batches, val_num_batches = data_provider.get_num_batches()

training_dataset_init, val_dataset_init, images, labels = data_provider.get_data(
)

# model
encoder = encoder_model(images)
latent_vector, mean, stddev = latent_space(encoder, latent_units)
predictions = decoder_model(latent_vector)

# losses
generative_loss = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(
    predictions, labels),
                                               axis=1),
                                 axis=1)
latent_loss = tf.reduce_mean(0.5 * tf.reduce_sum(
    tf.square(mean) + tf.square(stddev) - tf.log(1e-8 + tf.square(stddev)) - 1,
    1))
Example #10
0
#!/usr/bin/env python

from pygame.locals import *
from dataprovider import WeightRecord, DataProvider

data = DataProvider("/home/pi/weight_db")
all_records = data.all("Alex")
print all_records

count = sum(1 for r in all_records)
print count
Example #11
0
 def __init__(self, robots: ['Robot'], ticker: str):
     self.data_provider = DataProvider()
     self.robots = robots
     self.accountant = Accountant(robots)
     self.order_book = OrderBook(ticker)
Example #12
0
class ProDj(Thread):
  def __init__(self):
    super().__init__()
    self.cl = ClientList(self)
    self.data = DataProvider(self)
    self.vcdj = Vcdj(self)
    self.nfs = NfsClient(self)
    self.keepalive_ip = "0.0.0.0"
    self.keepalive_port = 50000
    self.beat_ip = "0.0.0.0"
    self.beat_port = 50001
    self.status_ip = "0.0.0.0"
    self.status_port = 50002
    self.need_own_ip = OwnIpStatus.notNeeded
    self.own_ip = None

  def start(self):
    self.keepalive_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    self.keepalive_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
    self.keepalive_sock.bind((self.keepalive_ip, self.keepalive_port))
    logging.info("Listening on {}:{} for keepalive packets".format(self.keepalive_ip, self.keepalive_port))
    self.beat_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    self.beat_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
    self.beat_sock.bind((self.beat_ip, self.beat_port))
    logging.info("Listening on {}:{} for beat packets".format(self.beat_ip, self.beat_port))
    self.status_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    self.status_sock.bind((self.status_ip, self.status_port))
    logging.info("Listening on {}:{} for status packets".format(self.status_ip, self.status_port))
    self.socks = [self.keepalive_sock, self.beat_sock, self.status_sock]
    self.keep_running = True
    self.data.start()
    super().start()

  def stop(self):
    self.keep_running = False
    self.nfs.stop()
    self.data.stop()
    self.vcdj_disable()
    self.join()
    self.keepalive_sock.close()
    self.beat_sock.close()

  def vcdj_set_player_number(self, vcdj_player_number=5):
    logging.info("Player number set to {}".format(vcdj_player_number))
    self.vcdj.player_number = vcdj_player_number
    #self.data.dbc.own_player_number = vcdj_player_number

  def vcdj_enable(self):
    self.vcdj_set_iface()
    self.vcdj.start()

  def vcdj_disable(self):
    self.vcdj.stop()
    self.vcdj.join()

  def vcdj_set_iface(self):
    if self.own_ip is not None:
      self.vcdj.set_interface_data(*self.own_ip[1:4])

  def run(self):
    logging.debug("ProDj: starting main loop")
    while self.keep_running:
      rdy = select(self.socks,[],[],1)[0]
      for sock in rdy:
        if sock == self.keepalive_sock:
          data, addr = self.keepalive_sock.recvfrom(128)
          self.handle_keepalive_packet(data, addr)
        elif sock == self.beat_sock:
          data, addr = self.beat_sock.recvfrom(128)
          self.handle_beat_packet(data, addr)
        elif sock == self.status_sock:
          data, addr = self.status_sock.recvfrom(256)
          self.handle_status_packet(data, addr)
      self.cl.gc()
    logging.debug("ProDj: main loop finished")

  def handle_keepalive_packet(self, data, addr):
    #logging.debug("Broadcast keepalive packet from {}".format(addr))
    try:
      packet = packets.KeepAlivePacket.parse(data)
    except Exception as e:
      logging.warning("Failed to parse keepalive packet from {}, {} bytes: {}".format(addr, len(data), e))
      dump_packet_raw(data)
      return
    # both packet types give us enough information to store the client
    if packet["type"] in ["type_ip", "type_status", "type_change"]:
      self.cl.eatKeepalive(packet)
    if self.own_ip is None and len(self.cl.getClientIps()) > 0:
      self.own_ip = guess_own_iface(self.cl.getClientIps())
      if self.own_ip is not None:
        logging.info("Guessed own interface {} ip {} mask {} mac {}".format(*self.own_ip))
        self.vcdj_set_iface()
    dump_keepalive_packet(packet)

  def handle_beat_packet(self, data, addr):
    #logging.debug("Broadcast beat packet from {}".format(addr))
    try:
      packet = packets.BeatPacket.parse(data)
    except Exception as e:
      logging.warning("Failed to parse beat packet from {}, {} bytes: {}".format(addr, len(data), e))
      dump_packet_raw(data)
      return
    if packet["type"] in ["type_beat", "type_mixer"]:
      self.cl.eatBeat(packet)
    dump_beat_packet(packet)

  def handle_status_packet(self, data, addr):
    #logging.debug("Broadcast status packet from {}".format(addr))
    try:
      packet = packets.StatusPacket.parse(data)
    except Exception as e:
      logging.warning("Failed to parse status packet from {}, {} bytes: {}".format(addr, len(data), e))
      dump_packet_raw(data)
      return
    self.cl.eatStatus(packet)
    dump_status_packet(packet)

  # called whenever a keepalive packet is received
  # arguments of cb: this clientlist object, player number of changed client
  def set_client_keepalive_callback(self, cb=None):
    self.cl.client_keepalive_callback = cb

  # called whenever a status update of a known client is received
  # arguments of cb: this clientlist object, player number of changed client
  def set_client_change_callback(self, cb=None):
    self.cl.client_change_callback = cb

  # called when status update of a known master is received or the master changes
  # arguments of cb: this clientlist object, player number of changed master
  def set_master_change_callback(self, cb=None):
    self.cl.master_change_callback = cb

  # called when a player media changes
  # arguments of cb: this clientlist object, player_number, changed slot
  def set_media_change_callback(self, cb=None):
    self.cl.media_change_callback = cb