示例#1
0
def done_thread():
    global bucket_error
    map_count = {}
    t_count = 0
    e_count = 0
    t0 = time.time()
    t_last = t0
    done_count = 0
    while True:
        _, data = queue.select(done_queue)
        if data.get("is_done", False):
            done_count += 1
        else:
            if data.get('success', False):
                t_count += 1

                bucket_name = data['bucket_name']

                if bucket_name not in map_count:
                    map_count[bucket_name] = 0

                map_count[bucket_name] += 1

                if t_count % COUNT_INCREMENT == 0:
                    new_t = time.time()
                    log.info("%s (%s at %s keys/sec) Q:%s ==> %s" %
                             (t_count, new_t - t_last,
                              int(COUNT_INCREMENT / (new_t - t_last)),
                              reindex_queue.length(), map_count))
                    t_last = new_t
            else:
                e_count += 1

        if done_count == PROCESSES_COUNT:
            break

    summary = ""
    summary += "Re-Index DONE! (%s keys re-indexed - %s errors - %s secs)\n" % (
        t_count, e_count, time.time() - t0)
    summary += "\n############################################\n"
    summary += "########## RE-INDEX SUMMARY ################\n"
    summary += "############################################\n\n"

    for k, v in map_count.iteritems():
        summary += "\t%15s: %s\n" % (k.upper(), v)
    if len(bucket_error) > 0:
        summary += "\nThese buckets failed to index completely: %s\n" % bucket_error
    log.info(summary)
示例#2
0
def process_alerts():
    global running  # pylint: disable=W0603

    consecutive_errors = 0

    end_t = now(interval)
    while running:
        if now() > end_t:
            logger.info("Finished interval (%ds). Restarting...", interval)
            running = False
            break

        event = queue.select(alertq, commandq, timeout=1)
        if not event:
            continue

        q_name = event[0]
        message = event[1]
        if q_name == alertq_name:
            counts.increment('alert.received')
            try:
                create_alert(counts, datastore, logger, message)
                consecutive_errors = 0
            except Exception as ex:  # pylint: disable=W0703
                consecutive_errors += 1
                retries = message['retries'] = message.get('retries', 0) + 1
                if retries > max_retries:
                    logger.exception('Max retries exceeded for: %s',
                                     str(message))
                else:
                    alertq.push(message)
                    if 'Submission not finalized' not in str(ex):
                        logger.exception('Unhandled exception processing: %s',
                                         str(message))

                for x in exit_msgs:
                    if x in str(ex):
                        consecutive_errors = max_consecutive_errors + 1
                        break

            if consecutive_errors > max_consecutive_errors:
                break
示例#3
0
def main():
    ds = forge.get_datastore()
    fs = forge.get_filestore()
    submission_queue = queue.NamedQueue('d-submission', db=DATABASE_NUM)
    result_queue = queue.NamedQueue('d-result', db=DATABASE_NUM)
    file_queue = queue.NamedQueue('d-file', db=DATABASE_NUM)
    error_queue = queue.NamedQueue('d-error', db=DATABASE_NUM)
    dynamic_queue = queue.NamedQueue('d-dynamic', db=DATABASE_NUM)
    alert_queue = queue.NamedQueue('d-alert', db=DATABASE_NUM)
    filescore_queue = queue.NamedQueue('d-filescore', db=DATABASE_NUM)
    emptyresult_queue = queue.NamedQueue('d-emptyresult', db=DATABASE_NUM)

    log.info("Ready!")
    queues = [
        submission_queue, result_queue, file_queue, error_queue, dynamic_queue,
        alert_queue, filescore_queue, emptyresult_queue
    ]
    while True:
        queue_name, key = queue.select(*queues)

        try:
            rewrite = False
            expiry = None
            if isinstance(key, tuple) or isinstance(key, list):
                key, rewrite, expiry = key

            if rewrite:
                # noinspection PyProtectedMember
                ds._save_bucket_item(ds.get_bucket(queue_name[2:]), key,
                                     {"__expiry_ts__": expiry})

            if queue_name == "d-submission":
                ds.delete_submission(key)
                log.debug("Submission %s (DELETED)" % key)
            elif queue_name == "d-result":
                ds.delete_result(key)
                log.debug("Result %s (DELETED)" % key)
            elif queue_name == "d-error":
                ds.delete_error(key)
                log.debug("Error %s (DELETED)" % key)
            elif queue_name == "d-file":
                ds.delete_file(key)
                if config.core.expiry.delete_storage and fs.exists(
                        key, location='far'):
                    fs.delete(key, location='far')
                log.debug("File %s (DELETED)" % key)
            elif queue_name == "d-alert":
                ds.delete_alert(key)
                log.debug("Alert %s (DELETED)" % key)
            elif queue_name == "d-filescore":
                ds.delete_filescore(key)
                log.debug("FileScore %s (DELETED)" % key)
            elif queue_name == "d-emptyresult":
                ds.delete_result(key)
                log.debug("EmptyResult %s (DELETED)" % key)
            else:
                log.warning("Unknown message: %s (%s)" % (key, queue_name))
        except:
            log.exception("Failed deleting key %s from bucket %s:", key,
                          queue_name)

        queues = queues[1:] + queues[0:1]
示例#4
0
        data = ds.sanitize(bucket_name, data, key)
        ds._save_bucket_item(ds.get_bucket(bucket_name), key, data)
    except:
        done_queue.push({
            "is_done": False,
            "success": False,
            "bucket_name": bucket_name,
            "key": key
        })

    done_queue.push({
        "is_done": False,
        "success": True,
        "bucket_name": bucket_name,
        "key": key
    })


if __name__ == "__main__":
    print "\n** Re-Index worker starting! **\n"
    while True:
        _, data = queue.select(reindex_queue)
        if isinstance(data, list):
            data = data[0]
        if data.get('is_done', False):
            break
        else:
            do_reindex(data['bucket_name'], data['key'])

    done_queue.push({"is_done": True})
示例#5
0
    def _done_thread(self, done_type):
        # Init
        map_count = {}
        missing_map_count = {}
        t_count = 0
        e_count = 0
        t0 = time.time()
        t_last = t0
        done_count = 0

        # Initialise by type
        if done_type == TYPE_BACKUP:
            title = "Backup"
            done_queue = self.backup_done_queue
        else:
            title = "Restore"
            done_queue = self.restore_done_queue

        while True:
            msg = queue.select(done_queue, timeout=1)
            if not msg:
                continue

            _, data = msg
            if data.get("is_done", False):
                done_count += 1
            else:
                if data.get('success', False):
                    t_count += 1

                    bucket_name = data['bucket_name']

                    if data.get("missing", False):
                        if bucket_name not in missing_map_count:
                            missing_map_count[bucket_name] = 0

                        missing_map_count[bucket_name] += 1
                    else:
                        if bucket_name not in map_count:
                            map_count[bucket_name] = 0

                        map_count[bucket_name] += 1

                    if t_count % COUNT_INCREMENT == 0:
                        new_t = time.time()
                        print "%s (%s at %s keys/sec) ==> %s" % (
                            t_count, new_t - t_last,
                            int(COUNT_INCREMENT / (new_t - t_last)), map_count)
                        t_last = new_t
                else:
                    e_count += 1

            if done_count == self.worker_count:
                break

        # Cleanup
        self.hash_queue.delete()

        summary = ""
        summary += "%s DONE! (%s keys backed up - %s errors - %s secs)\n" % \
                   (title, t_count, e_count, time.time() - t0)
        summary += "\n############################################\n"
        summary += "########## %08s SUMMARY ################\n" % title.upper()
        summary += "############################################\n\n"

        for k, v in map_count.iteritems():
            summary += "\t%15s: %s\n" % (k.upper(), v)

        if len(missing_map_count.keys()) > 0:
            summary += "\n\nMissing data:\n\n"
            for k, v in missing_map_count.iteritems():
                summary += "\t%15s: %s\n" % (k.upper(), v)

        if len(self.bucket_error) > 0:
            summary += "\nThese buckets failed to %s completely: %s\n" % (
                title.lower(), self.bucket_error)
        print summary