Exemple #1
0
    def __init__(self):
        self.cnf = cnf.get("core")
        self.logger = Logger("Core")
        self.logger.debug("Loading services")

        self._services = []

        for service_name in self.cnf.get("services"):
            service = Loader.by_id('services', service_name)
            self._services.append(service)
Exemple #2
0
    def __init__(self, thread, id, root=cnf):
        super().__init__(id, root)

        self._data = Loader.by_id('storage', self.lcnf.get("storage"))

        self._stop_timeout = 10
        self._running = False
        self._thread_to_run = thread
        self._run_thread = None
        self._logger = Logger('Service')

        self._init()
Exemple #3
0
class TestApp(App):
    def on_start(self):
        Clock.schedule_interval(self.animate, INTERVAL)

    def animate(self, ms):
        self.animator.advance(ms)
        self.display.repaint(self.animator.output)

    def build(self):
        self.display = PatternDisplay()
        self.animator = Animator()
        self.loader = Loader(self.animator, Path(__file__).parent / 'patterns')
        self.animator.pattern = self.loader.loadPattern(args.pattern)
        return self.display
Exemple #4
0
    def __run(self):
        redis_conn = Redis(host=self.lcnf.get('redis').get('host'))
        jobs = []
        known_sources = {}

        while self._running:
            sleep(self.lcnf.get('delay', 0.5))
            try:
                for pn, pipeline in self.cnf.get("pipelines").items():
                    if pn not in self.cnf.get('core').get('pipelines'):
                        continue
                    for step in pipeline['steps']:
                        q = Queue(step.get('priority', 'normal'),
                                  connection=redis_conn)
                        for job_id in jobs:
                            job = q.fetch_job(job_id)
                            if job:
                                if job.result is not None:
                                    self._logger.debug("%s|%s", job_id,
                                                       job._status)
                                    self._data.update(job.result)
                                    job.cleanup()
                                    jobs.remove(job_id)
                        if len(jobs) + 1 > self.lcnf.get('qsize', 200):
                            continue
                        filter = {
                            "steps.%s" % step['task']: {
                                '$exists': False
                            }
                        }
                        filter.update({
                            key: value
                            for key, value in step.get("if", {}).items()
                        })
                        count = step.get('parallel', 1)
                        # get as much as possible from own pool
                        items = self._data.get(block=False,
                                               count=count,
                                               filter=filter)
                        # obtain everything else from source
                        if len(items) < count:
                            source = None
                            source_id = pipeline.get('source')
                            if source_id in known_sources:
                                source = known_sources[source_id]
                            else:
                                source = Loader.by_id('storage', source_id)
                                known_sources[source_id] = source
                            new_items = source.get(block=False,
                                                   count=(count - len(items)),
                                                   filter=filter)
                            items.extend(new_items)
                            source.remove(new_items)

                        if items:
                            for i in items:
                                i['steps'][step['task']] = None
                            self._data.update(items)
                            job = q.enqueue("lib.exec.Task.run", step['task'],
                                            items)
                            self._logger.info("%s|%s|%s|%s", job.id,
                                              step.get('priority', 'normal'),
                                              step['task'], len(items))
                            jobs.append(job.id)
            except Exception as e:
                self._logger.error("Error in executor main thread: %s", e)
Exemple #5
0
def run(task_name, items):
    result = Loader.by_id('tasks', task_name).run(items)
    return result
Exemple #6
0
                          LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)

# Intialize the library (must be called once before other functions).
strip.begin()

# setup option parser
argParser = argparse.ArgumentParser(
    description='''Simulate NeoPixel animations.\n''')
argParser.add_argument('pattern',
                       help="Pattern to display.",
                       default='carousel',
                       nargs='?')
args = argParser.parse_args()

animator = Animator()
loader = Loader(animator, Path(__file__).parent / 'patterns')
animator.pattern = loader.loadPattern(args.pattern)

UPDATE_INTERVAL = 1.0 / 60.0  # 120 fps

try:
    while True:
        animator.advance(UPDATE_INTERVAL)
        for i in range(0, LED_COUNT):
            r, g, b = animator.output[i]
            strip.setPixelColor(
                i, Color(int(r * 255.0), int(g * 255.0), int(b * 255.0)))
        strip.show()
        time.sleep(UPDATE_INTERVAL)
except KeyboardInterrupt:
    print('Exiting...')
Exemple #7
0
 def build(self):
     self.display = PatternDisplay()
     self.animator = Animator()
     self.loader = Loader(self.animator, Path(__file__).parent / 'patterns')
     self.animator.pattern = self.loader.loadPattern(args.pattern)
     return self.display
Exemple #8
0
# Load Data
data = Data(data_file=args.data, input_horizon=s.INPUT_HORIZON,
            n_stations=args.n_stations, train_ratio=s.TRAIN_RATIO,
            val_ratio=s.VAL_RATIO, debug=args.debug)

# Load Model
model = Model(args.n_stations, s.MOVING_HORIZON, s.ACTIVATION, s.CRITERION, usegpu=args.usegpu)

# Train First RNN
[X_train, y_train], [X_val, y_val], [X_test, y_test] = data.load_data_lstm_1()

rnn_model_num = 1
print '#' * 10 + ' RNN 1 ' + '#' * 10

train_loader = torch.utils.data.DataLoader(Loader((X_train, y_train)), batch_size=args.batch_size, shuffle=True,
                                           num_workers=args.n_workers, pin_memory=pin_memory)

val_loader = torch.utils.data.DataLoader(Loader((X_val, y_val)), batch_size=args.batch_size, shuffle=False,
                                         num_workers=args.n_workers, pin_memory=pin_memory)

model.fit(rnn_model_num, s.LEARNING_RATE, s.WEIGHT_DECAY, s.CLIP_GRAD_NORM, s.LR_DROP_FACTOR, s.LR_DROP_PATIENCE, s.PATIENCE, 
          s.OPTIMIZER, s.N_EPOCHS[rnn_model_num - 1],
          train_loader, val_loader, model_save_path.format(rnn_model_num))

# Train Other RNNs
for rnn_model_num in range(2, s.MOVING_HORIZON + 1):
    X_train, y_train = data.load_data(X_train, y_train, model, rnn_model_num - 1)
    X_val, y_val = data.load_data(X_val, y_val, model, rnn_model_num - 1)
    print '#' * 10 + ' RNN {} '.format(rnn_model_num) + '#' * 10
    train_loader = torch.utils.data.DataLoader(Loader((X_train, y_train)), batch_size=args.batch_size, shuffle=True,