Exemple #1
0
    async def import_data(self, request):
        data = await request.post()

        if 'payload' not in data:
            return web.Response(status=500, text='no payload found')

        self.logger.debug("received import request {0}".format(
            data['payload']))

        try:
            imports = json.loads(data['payload'])
            for line in imports:
                if 'pattern' in line and 'command' in line and 'enabled' in line:
                    cron_item = CronItem(command=line['command'])
                    cron_item.set_all(line['pattern'])
                    cron_item.enable(line['enabled'])
                    self.logger.debug(
                        "received new job from import {0}, broadcasting it.".
                        format(cron_item))
                    broadcast(self.udp_port,
                              UdpSerializer.dump(cron_item, self.hash_key))
                else:
                    self.logger.error(
                        "import element invalid: {0}".format(line))
            return web.HTTPOk()
        except ValueError as e:
            self.logger.error(e)
            return web.HTTPClientError(text='invalid json received')
Exemple #2
0
    async def add_job(self, request):
        data = await request.post()

        self.logger.debug("received add request {0}".format(data))

        if 'command' not in data or \
                'minute' not in data or \
                'hour' not in data or \
                'dom' not in data or \
                'month' not in data or \
                'dow' not in data:
            return web.Response(
                status=500, text='not all mandatory fields submitted via form')

        cron_item = self.generate_cron_item(data)

        if 'disabled' in data:
            cron_item.enable(False)

        if cron_item in self.storage.cluster_jobs:
            raise web.HTTPConflict(text='job already exists')

        self.logger.debug("broadcasting add result")

        broadcast(self.udp_port, UdpSerializer.dump(cron_item, self.hash_key))

        raise web.HTTPCreated()
Exemple #3
0
    async def toggle_job(self, request):
        data = await request.post()

        self.logger.debug("received toggle request {0}".format(data))

        if 'command' not in data or \
                'minute' not in data or \
                'hour' not in data or \
                'dom' not in data or \
                'month' not in data or \
                'dow' not in data:
            return web.Response(status=500,
                                text='not all mandatory fields submitted')

        cron_item = self.generate_cron_item(data)

        if cron_item not in self.storage.cluster_jobs:
            raise web.HTTPConflict(text='job not found on cluster')

        self.logger.debug("broadcasting run result")

        broadcast(self.udp_port,
                  UdpSerializer.dump(Toggle(cron_item), self.hash_key))

        raise web.HTTPAccepted()
Exemple #4
0
 def timed_broadcast():
     """
     periodically broadcast system status and known jobs
     """
     while running:
         broadcast(
             args.udp_communication_port,
             UdpSerializer.dump(Status(get_ip(), get_load()), hash_key))
         for job in storage.cluster_jobs:
             if job.assigned_to == get_ip():
                 job.pid = check_process(job.command)
             for packet in UdpSerializer.dump(job, hash_key):
                 client(args.udp_communication_port, packet)
         time.sleep(args.broadcast_interval)
Exemple #5
0
 async def run(self, run, uuid):
     self.logger.debug("got full run in buffer {0}".format(run.job))
     job = next(iter([j for j in self.storage.cluster_jobs if j == run.job]), None)
     if job and job.assigned_to == get_ip():
         self.logger.info("am owner for job {0}".format(job))
         run.timestamp = datetime.now()
         process = subprocess.Popen(run.job.command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, shell=True)
         self.logger.info("{0} has been defined, going to execute".format(job.command))
         std_out, std_err = process.communicate()
         exit_code = process.wait()
         if std_err:
             self.logger.warning("error during execution of {0}: {1}".format(run.job.command, std_err))
         self.logger.info("output of {0} with code {1}: {2}".format(job.command, exit_code, std_out))
         job.append_log("{0:%b %d %H:%M:%S} localhost CRON[{1}] exit code: {2}, out: {3}, err: {4}".format(datetime.now(), process.pid, exit_code, std_out, std_err))
         broadcast(self.udp_port, UdpSerializer.dump(job, self.hash_key))
         self.clean_buffer(uuid)
Exemple #6
0
    async def re_balance(self, request):
        self.logger.debug("rebalance request received")

        self.scheduler.re_balance()

        jobs = self.storage.cluster_jobs.copy()

        broadcast(
            self.udp_port,
            UdpSerializer.dump(ReBalance(timestamp=datetime.now()),
                               self.hash_key))

        time.sleep(5)
        for job in jobs:
            broadcast(self.udp_port, UdpSerializer.dump(job, self.hash_key))

        raise web.HTTPAccepted()