示例#1
0
    def notify_success(self, source, hostname, filename, stats):
        registry = CollectorRegistry()

        g = Gauge('backup_size', 'Size of backup file in bytes', registry=registry)
        g.set(stats.size)
        g = Gauge('backup_dumptime', 'Time taken to dump and compress/encrypt backup in seconds', registry=registry)
        g.set(stats.dumptime)
        g = Gauge('backup_uploadtime', 'Time taken to upload backup in seconds', registry=registry)
        g.set(stats.uploadtime)
        g = Gauge('backup_retained_copies', 'Number of retained backups found on destination', registry=registry)
        g.set(stats.retained_copies)
        g = Gauge('backup_timestamp', 'Time backup completed as seconds-since-the-epoch', registry=registry)
        g.set_to_current_time()

        push_to_gateway(self.url, job=source.id, registry=registry, handler=http_basic_auth_handler, handler_args=self.auth_args)

        logging.info("Pushed metrics for job '%s' to gateway (%s)" % (source.id, self.url))
示例#2
0
    def notify_success(self, source, hostname, filename, stats):
        registry = CollectorRegistry()

        s = Summary('backup_size', 'Size of backup file in bytes', registry=registry)
        s.observe(stats.size)
        s = Summary('backup_dumptime', 'Time taken to dump and compress/encrypt backup in seconds', registry=registry)
        s.observe(stats.dumptime)
        s = Summary('backup_uploadtime', 'Time taken to upload backup in seconds', registry=registry)
        s.observe(stats.uploadtime)
        if stats.retained_copies is not None:
            g = Gauge('backup_retained_copies', 'Number of retained backups found on destination', registry=registry)
            g.set(stats.retained_copies)
        g = Gauge('backup_timestamp', 'Time backup completed as seconds-since-the-epoch', registry=registry)
        g.set_to_current_time()

        def auth_handler(url, method, timeout, headers, data):
            return basic_auth_handler(url, method, timeout, headers, data, self.username, self.password)

        push_to_gateway(self.url, job=source.id, registry=registry, handler=auth_handler)

        logging.info("Pushed metrics for job '%s' to gateway (%s)" % (source.id, self.url))
            record['Contacts_LeadType_S_Data_Status1'] = 'CAMPAIGN DETAIL ERROR'
        else:
            record['Contacts_LeadType_MostRecent_Offer_PrimarySol1'] = thisCampaign['Solution_Code_Family__c']
            record['Contacts_LeadType_MostRecent_Offer_ProductSer1'] = thisCampaign['Solution_Code__c']
            record['Contacts_LeadType_S_Data_Status1'] = 'CAMPAIGN DETAILS RETREIVED'
            if (thisCampaign['Solution_Code_Family__c']==None):
                nullCount += 1

    logging.info("Records with no Primary Solution: " + str(nullCount))


    importDefName = 'Contacts.LeadType - Get Campaign Details ' + str(datetime.now())

    cdoInDef = elq.CreateDef(defType='imports', entity='customObjects', cdoID=1269, fields=myFields, defName=importDefName, identifierFieldName='Email_Address1')
    logging.info("import definition created: " + cdoInDef['uri'])

    postInData = elq.PostSyncData(data = dataOut, defObject=cdoInDef, maxPost=20000)
    logging.info("Data successfully imported, job finished: " + str(datetime.now()))
else:
    logging.info("No records, job finished")

### Logging for Prometheus

registry = CollectorRegistry()
g = Gauge('job_last_success_unixtime', 'Last time a batch job successfully finished', registry=registry)
g.set_to_current_time()
h = Gauge('job_total_records_success', 'Total number of records successfully processed in last batch', registry=registry)
h.set(len(data))

push_to_gateway(os.environ['PUSHGATEWAY'], job='Contacts.LeadType_getOfferDetails', registry=registry)
示例#4
0
文件: prom.py 项目: deadwind4/peacock
from prometheus_client import Counter, Gauge, CollectorRegistry, push_to_gateway
import time

registry = CollectorRegistry()

c = Counter('luna_counter',
            'luna memory test', ['label_foo'],
            registry=registry)
g = Gauge('luna_foo', 'luna memory test', registry=registry)
g.set_to_current_time()

while (True):

    c.labels(label_foo='new').inc()
    time.sleep(0.04)
    print(time.ctime())
    push_to_gateway('localhost:9091', job='luna_python_foo', registry=registry)
示例#5
0
async def main():
    import argparse
    from janitor import state
    from prometheus_client import (
        Gauge,
        push_to_gateway,
        REGISTRY,
    )

    parser = argparse.ArgumentParser(prog="janitor.schedule")
    parser.add_argument(
        "--dry-run",
        help="Create branches but don't push or propose anything.",
        action="store_true",
        default=False,
    )
    parser.add_argument("--prometheus",
                        type=str,
                        help="Prometheus push gateway to export to.")
    parser.add_argument("--config",
                        type=str,
                        default="janitor.conf",
                        help="Path to configuration.")
    parser.add_argument("--suite",
                        type=str,
                        help="Restrict to a specific suite.")
    parser.add_argument("--gcp-logging",
                        action='store_true',
                        help='Use Google cloud logging.')
    parser.add_argument("packages", help="Package to process.", nargs="*")

    args = parser.parse_args()

    if args.gcp_logging:
        import google.cloud.logging
        client = google.cloud.logging.Client()
        client.get_default_handler()
        client.setup_logging()
    else:
        logging.basicConfig(level=logging.INFO, format="%(message)s")

    last_success_gauge = Gauge("job_last_success_unixtime",
                               "Last time a batch job successfully finished")

    logging.info('Reading configuration')
    with open(args.config, "r") as f:
        config = read_config(f)

    db = state.Database(config.database_location)

    async with db.acquire() as conn:
        logging.info('Finding candidates with policy')
        logging.info('Determining schedule for candidates')
        todo = [
            queue_item_from_candidate_and_policy(row)
            for row in await iter_candidates_with_policy(
                conn, packages=(args.packages or None), suite=args.suite)
        ]
        logging.info('Adding to queue')
        await bulk_add_to_queue(conn, todo, dry_run=args.dry_run)

    last_success_gauge.set_to_current_time()
    if args.prometheus:
        push_to_gateway(args.prometheus,
                        job="janitor.schedule",
                        registry=REGISTRY)
示例#6
0
async def main():
    import argparse
    import sys
    from janitor.package_overrides import read_package_overrides
    from prometheus_client import (
        Gauge,
        push_to_gateway,
        REGISTRY,
    )

    parser = argparse.ArgumentParser(prog="package_metadata")
    parser.add_argument(
        "--prometheus", type=str, help="Prometheus push gateway to export to."
    )
    parser.add_argument(
        "--config", type=str, default="janitor.conf", help="Path to configuration."
    )

    parser.add_argument(
        "--distribution",
        type=str,
        default="unstable",
        help="Distribution to import metadata for.",
    )

    parser.add_argument(
        "--package-overrides",
        type=str,
        default=None,
        help="Read package overrides.",
    )
    parser.add_argument("--gcp-logging", action='store_true', help='Use Google cloud logging.')

    args = parser.parse_args()
    if args.gcp_logging:
        import google.cloud.logging
        client = google.cloud.logging.Client()
        client.get_default_handler()
        client.setup_logging()
    else:
        logging.basicConfig(level=logging.INFO)

    last_success_gauge = Gauge(
        "job_last_success_unixtime", "Last time a batch job successfully finished"
    )

    with open(args.config, "r") as f:
        config = read_config(f)

    if args.package_overrides:
        with open(args.package_overrides, "r") as f:
            package_overrides = read_package_overrides(f)
    else:
        package_overrides = {}

    db = state.Database(config.database_location)

    logging.info('Reading data')
    packages, removals = iter_packages_from_script(sys.stdin)

    async with db.acquire() as conn:
        logging.info(
            'Updating package data for %d packages',
            len(packages))
        await update_package_metadata(
            conn, args.distribution, packages, package_overrides,
            args.package_overrides
        )
        if removals:
            logging.info('Removing %d packages', len(removals))
            await mark_removed_packages(conn, args.distribution, removals)

    last_success_gauge.set_to_current_time()
    if args.prometheus:
        push_to_gateway(
            args.prometheus, job="janitor.package_metadata", registry=REGISTRY
        )
# Within your code
with c.count_exceptions():
    pass

# Count only one type of exception
with c.count_exceptions(ValueError):
    pass

# Gauges: Used to track any value, anything that's not counting will be here (e.g. temperature, cpu usage, ...)
# Can inc, dec, and set
g = Gauge('my_inprogress_requests', 'Description of gauge')
g.inc()  # Increment by 1
g.dec(10)  # Decrement by given value
g.set(4.2)  # Set to a given value

g.set_to_current_time()  # Set to current unixtime


# Another use case: Increment when entered, decrement when exited.
@g.track_inprogress()
def f():
    pass


with g.track_inprogress():
    pass

# A gauge can also take its value from a callback
d = Gauge('data_objects', 'Number of objects')
my_dict = {}
d.set_function(lambda: len(my_dict))