Exemplo n.º 1
0
def test_bloom_performance_bloom(hash_module, algo):
    func = getattr(hash_module, algo)
    t0 = monotime()
    total_bytes = 0
    array = bytearray(2**16)
    for i in range(50):
        data = os.urandom(2**12)
        total_bytes += len(data)
        func(array, data, 4)
    td = monotime() - t0
    mb_per_s = total_bytes / td / 2**20
    print(f"{func} performance: {mb_per_s:.2f} MB/s")
Exemplo n.º 2
0
 def write_progress(self):
     if monotime() < self.next_progress_mt:
         return
     self.next_progress_mt += 1
     running_time_f = monotime() - self.start_mt
     running_time = int(running_time_f)
     msg = '{hours:d}:{minutes:02d}:{seconds:02d}  {comp} / {orig} = {ratio:.3f}  {speed}/s'.format(
         hours=running_time // 3600,
         minutes=(running_time // 60) % 60,
         seconds=running_time % 60,
         orig=human_readable_byte_size(self.orig_size),
         comp=human_readable_byte_size(self.compressed_size),
         ratio=self.compressed_size / self.orig_size,
         speed=human_readable_byte_size(self.orig_size / running_time_f))
     print(msg, file=stderr)
Exemplo n.º 3
0
 def instance_console_output(self):
     self.instance_ready()
     start_mt = monotime()
     while True:
         logger.info('Getting console output of instance %s...',
                     self.instance_id())
         reply = self.ec2_client.get_console_output(
             DryRun=False, InstanceId=self.instance_id())
         logger.debug('reply: %s', smart_repr(reply))
         if 'Output' in reply:
             return reply['Output'].splitlines()
         if monotime() - start_mt > self.console_output_timeout:
             raise Exception('No console output received')
         logger.debug('No output yet, sleeping...')
         sleep(3)
Exemplo n.º 4
0
 def update(self, access_log_record, now=None):
     status = intern(str(access_log_record.status))
     now = monotime() if now is None else now
     self.total_status_count[status] += 1
     self.rolling_5min_status_count[status] += 1
     self.rolling_5min_deque.append((now, status))
     self._roll(now)
Exemplo n.º 5
0
 def update(self, access_log_record, now=None):
     status = intern(str(access_log_record.status))
     path = unify_path(access_log_record.path)
     if access_log_record.host:
         path = access_log_record.host + path
     now = monotime() if now is None else now
     self.total_path_status_count[status][path] += 1
     self.rolling_5min_path_status_count[status][path] += 1
     self.rolling_5min_deque.append((now, status, path))
     self._roll(now)
     self._compact(status=status)
Exemplo n.º 6
0
Arquivo: schema.py Projeto: messa/ow2
 async def resolve_search_current_snapshot_items(root, info, path_query):
     model = get_model(info)
     t = monotime()
     streams = await model.streams.list_all()
     snapshot_ids = [s.last_snapshot_id for s in streams]
     snapshots = await model.stream_snapshots.get_by_ids(snapshot_ids,
                                                         load_state=True)
     re_pq = re.compile(path_query)
     found_items = []
     for snapshot in snapshots:
         for item in snapshot.state_items:
             if re_pq.match(item.path_str):
                 found_items.append(item)
         if len(found_items) >= 10000:
             logger.debug('Too many found_items')
             break
     logger.debug(
         'resolve_search_current_snapshot_items %r found %s items in %.3f s',
         path_query, len(found_items),
         monotime() - t)
     return found_items
Exemplo n.º 7
0
def generate_message_texts(conf,
                           previous_alerts,
                           current_alerts,
                           notify_aux,
                           now=None):
    '''
    Parameter notify_aux are data thta this function uses to keep track of things.
    '''
    notify_aux = notify_aux or {}
    waiting_alert_ids = notify_aux.setdefault('waiting_alert_ids',
                                              {})  # alert id -> monotime
    now = now or monotime()
    old_alerts_by_id = {a['alertId']: a for a in previous_alerts}
    new_alerts_by_id = {a['alertId']: a for a in current_alerts}
    assert len(old_alerts_by_id) == len(previous_alerts)
    assert len(new_alerts_by_id) == len(current_alerts)
    closed_alerts = [
        a for a in previous_alerts if a['alertId'] not in new_alerts_by_id
    ]
    opened_alerts = [
        a for a in current_alerts if a['alertId'] not in old_alerts_by_id
    ]
    mentions_of_closed_alerts = []
    mentions_of_short_lived_alerts = []
    mentions_of_opened_alerts = []
    # message with closed alerts
    for a in closed_alerts:
        if a['alertId'] in waiting_alert_ids:
            waiting_alert_ids.pop(a['alertId'])
            mentions_of_short_lived_alerts.append('\u267B\uFE0F ' +
                                                  alert_text(a))
        else:
            mentions_of_closed_alerts.append('\U0001F334 ' + alert_text(a))
    # message with newly opened alerts
    for a in opened_alerts:
        assert a['alertId'] not in waiting_alert_ids
        waiting_alert_ids[a['alertId']] = now + conf.wait_duration_s
    for a in current_alerts:
        if waiting_alert_ids.get(a['alertId']):
            if waiting_alert_ids[a['alertId']] <= now:
                waiting_alert_ids.pop(a['alertId'])
                mentions_of_opened_alerts.append('\U0001F525 ' + alert_text(a))
    assert waiting_alert_ids.keys() <= {a['alertId'] for a in current_alerts}
    message_texts = [
        '\n'.join(t for t in mentions_of_closed_alerts
                  if not conf.is_message_ignored(t)),
        '\n'.join(t for t in mentions_of_short_lived_alerts
                  if not conf.is_message_ignored(t)),
        '\n'.join(t for t in mentions_of_opened_alerts
                  if not conf.is_message_ignored(t)),
    ]
    message_texts = [t for t in message_texts if t]
    return message_texts, notify_aux
Exemplo n.º 8
0
async def retrieve_alerts(conf, session):
    post_kwargs = dict(headers={
        'Accept': 'application/json',
    },
                       json={'query': alert_query},
                       timeout=30)
    url = conf.graphql_endpoint
    logger.debug('Retrieving alerts from %s', redacted(url))
    t0 = monotime()
    async with session.post(url, **post_kwargs) as resp:
        resp.raise_for_status()
        rj = await resp.json()
        logger.debug('GQL response: %s', smart_repr(rj))
        logger.debug('pageInfo: %r', rj['data']['activeAlerts']['pageInfo'])
        if rj.get('error') or rj.get('errors'):
            raise Exception(
                f'Received error response from {redacted(url)}: {rj}')
        alerts = [edge['node'] for edge in rj['data']['activeAlerts']['edges']]
        logger.debug('Retrieved %d alerts in %.3f s', len(alerts),
                     monotime() - t0)
        return alerts
Exemplo n.º 9
0
async def test_default_nginx_access_log_benchmark_with_asyncio_queue():
    count = 1000
    q = Queue()

    async def produce():
        for i in range(count):
            line = '84.22.97.60 - - [04/Feb/2020:11:02:10 +0000] "GET /{i} HTTP/1.1" 200 396 "-" "Mozilla/5.0 foo/{i}"'.format(
                i=i)
            await q.put(line)
        await q.put(None)

    async def consume():
        for i in range(count):
            line = await q.get()
            parse_access_log_line(line)
        last = await q.get()
        assert last is None

    t0 = monotime()
    await gather(produce(), consume())
    duration = monotime() - t0
    assert duration < 0.1
Exemplo n.º 10
0
def restore(src_path, backend, backup_id, identity_files):
    # Restores TO the src_path - maybe there could be better naming? :)
    t0 = monotime()
    backend_files = backend.list_files()
    meta_filename = f'baq.{backup_id}.meta'
    adapter = ChunkAdapter(backend)
    logger.info('Restoring backup id %s from %s to %s', backup_id, backend,
                src_path)
    with ExitStack() as stack:
        stack.callback(backend.close_data_file)
        temp_dir = Path(
            stack.enter_context(
                TemporaryDirectory(prefix=f'baq.{backup_id}.')))
        meta_path = temp_dir / meta_filename
        backend.retrieve_file(meta_filename, meta_path)
        assert meta_path.is_file()
        meta_file = stack.enter_context(gzip.open(meta_path, mode='rb'))
        header = json.loads(meta_file.readline())['baq_backup']
        logger.debug('Metadata header:\n%s', json.dumps(header, indent=2))
        assert header['file_format_version'] == 'v1'
        assert backup_id == header['backup_id']
        key_manager = DecryptKeyManager(header['encryption_keys'],
                                        identity_files)
        while True:
            record = json.loads(meta_file.readline())
            logger.debug('Processing: %s', record)
            if record.get('done'):
                break
            elif record.get('directory'):
                restore_directory(src_path, record['directory'])
            elif record.get('file'):
                restore_file(src_path, record['file'], meta_file, adapter,
                             key_manager)
            else:
                raise Exception(
                    f"Unknown metadata record: {json.dumps(record)}")
    logger.info('Restore backup id %s done in %.3f s', backup_id,
                monotime() - t0)
Exemplo n.º 11
0
def run_system_agent_iteration(conf, sleep_interval):
    report_date = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
    t0 = monotime()
    report_state = gather_state(conf)
    duration = monotime() - t0
    report_state['duration'] = {
        '__value': duration,
        '__unit': 'seconds',
    }

    # add watchdog
    #wd_interval = conf.watchdog_interval or sleep_interval + 30
    wd_interval = sleep_interval + 30
    report_state['watchdog'] = {
        '__watchdog': {
            'deadline': int((time() + wd_interval) * 1000),
        },
    }

    report_data = {
        'label': generate_label(conf),
        'date': report_date,
        'state': report_state,
    }

    try:
        r = rs.post(conf.report_url,
                    json=report_data,
                    headers={'Authorization': 'token ' + conf.report_token},
                    timeout=default_report_timeout)
        logger.debug('Report response: %s', r.text[:100])
        r.raise_for_status()
    except Exception as e:
        logger.error('Failed to post report to %r: %r', conf.report_url, e)
        logger.info('Report token: %s...%s', conf.report_token[:3],
                    conf.report_token[-3:])
        logger.info('Report data: %r', report_data)
Exemplo n.º 12
0
Arquivo: cache.py Projeto: messa/ow2
 def get(self, key, default=None):
     if key not in self.data:
         if DEBUG:
             logger.debug('LRUCache miss: %r', key)
         return default
     value, mtime = self.data[key]
     if self.ttl_ms and mtime + self.ttl_ms < monotime():
         if DEBUG:
             logger.debug('LRUCache expired: %r', key)
         del self.data[key]
         del self.last_access[key]
         return default
     if DEBUG:
         logger.debug('LRUCache hit: %r', key)
     self._fresh(key)
     return value
Exemplo n.º 13
0
 def _open(self, seek_end=False):
     f = self._path.open(mode='rb')
     st = stat(f.fileno())
     dev_inode = (st.st_dev, st.st_ino)
     if dev_inode == self._current_dev_inode:
         # this would we weird, but could maybe happen;
         # we do not want to have the file opened twice
         f.close()
         return
     if seek_end:
         f.seek(0, SEEK_END)
     if self._current_file is not None:
         self._rotated_files.append((self._current_file, monotime() + self.expire_interval_s))
     self._current_file = f
     self._current_dev_inode = dev_inode
     logger.debug(
         'Opened file %s fd %s dev %s inode %s position %s',
         self._path, f.fileno(), dev_inode[0], dev_inode[1], f.tell())
Exemplo n.º 14
0
 def get_report(self, now=None):
     now = monotime() if now is None else now
     self._roll(now)
     report = OrderedDict()
     report['status_count'] = OrderedDict()
     report['status_count']['total'] = OrderedDict()
     report['status_count']['last_5_min'] = OrderedDict()
     for status, count in sorted(self.total_status_count.items()):
         report['status_count']['total'][status] = count
     for status, count in sorted(self.rolling_5min_status_count.items()):
         if status in server_error_status_codes:
             report['status_count']['last_5_min'][status] = {
                 '__value': count,
                 '__check': {
                     'state': 'green' if count == 0 else 'red',
                 }
             }
         else:
             report['status_count']['last_5_min'][status] = count
     return report
Exemplo n.º 15
0
def run_log_agent(conf):
    wfs = [WatchedFile(lf) for lf in conf.log_files]
    sleep_interval = conf.sleep_interval or default_sleep_interval
    while True:
        t0 = monotime()
        report = {
            'label': {
                'agent': 'log',
                'host': getfqdn(),
            },
            'date': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
            'state': {
                'configuration_file': str(conf.conf_file_path),
                'log_files': {},
            },
        }
        for wf in wfs:
            wf.run(timestamp=time())
            wf.add_to_report(report['state'])
        finish_and_send_report(report, conf, sleep_interval, t0)
        sleep(sleep_interval)
Exemplo n.º 16
0
def finish_and_send_report(report_data, conf, sleep_interval, t0):
    # add watchdog
    wd_interval = conf.watchdog_interval or sleep_interval + 30
    report_data['state']['watchdog'] = {
        '__watchdog': {
            'deadline': int((time() + wd_interval) * 1000),
        },
    }
    report_data['state']['iteration_duration_s'] = monotime() - t0
    try:
        r = rs.post(
            conf.report_url,
            json=report_data,
            headers={'Authorization': 'token ' + conf.report_token},
            timeout=default_report_timeout)
        logger.debug('Report response: %s', r.text[:100])
        r.raise_for_status()
    except Exception as e:
        logger.error('Failed to post report to %r: %r', conf.report_url, e)
        logger.info('Report token: %s...%s', conf.report_token[:3], conf.report_token[-3:])
        logger.info('Report data: %r', report_data)
Exemplo n.º 17
0
    def get_report(self, now=None):
        now = monotime() if now is None else now
        self._roll(now)
        report = OrderedDict()
        report['path_status_count'] = OrderedDict()
        report['path_status_count']['total'] = OrderedDict()
        report['path_status_count']['last_5_min'] = OrderedDict()

        for status, path_count in sorted(self.total_path_status_count.items()):
            assert status not in report['path_status_count']['total']
            report['path_status_count']['total'][status] = OrderedDict()
            for path, count in path_count.most_common(5):
                report['path_status_count']['total'][status][path] = count

        for status, path_count in sorted(
                self.rolling_5min_path_status_count.items()):
            assert status not in report['path_status_count']['last_5_min']
            report['path_status_count']['last_5_min'][status] = OrderedDict()
            for path, count in path_count.most_common(5):
                report['path_status_count']['last_5_min'][status][path] = count

        return report
Exemplo n.º 18
0
def test_generate_random_id_unique():
    t = monotime()
    ids = [generate_random_id() for i in range(1000)]
    assert len(set(ids)) == len(ids)
    assert monotime() - t < 0.1
Exemplo n.º 19
0
Arquivo: cache.py Projeto: messa/ow2
 def set(self, key, value):
     self.data[key] = (value, monotime())
     self._fresh(key)
     self._compact()
Exemplo n.º 20
0
 def __init__(self):
     self.start_mt = monotime()
     self.orig_size = 0
     self.compressed_size = 0
     self.next_progress_mt = self.start_mt
Exemplo n.º 21
0
def check_target(rs, target, report_state, timeout=None):
    '''
    parameter timeout is used in tests
    '''
    report_state['name'] = target.name
    report_state['url'] = target.url

    # check SSL
    if target.url.startswith('https://'):
        try:
            p = urlparse(target.url)
            logger.info('Checking SSL cert of %s:%s', p.hostname, p.port)
            hostname = p.hostname
            port = p.port or 443
            t0 = monotime()
            cx = ssl.create_default_context()
            conn = cx.wrap_socket(socket(AF_INET), server_hostname=hostname)
            conn.settimeout(5)
            conn.connect((p.hostname, port or 443))
            try:
                cert = conn.getpeercert()
                peer_ip, peer_port = conn.getpeername()
                logger.debug('Connected to %s (%s) port %s, cert: %r',
                             hostname, peer_ip, port, cert)
            finally:
                conn.close()
        except Exception as e:
            report_state['ssl_certificate'] = {
                'error': {
                    '__value': str(e),
                    '__check': {
                        'state': 'red'
                    },
                },
            }
        else:
            duration = monotime() - t0
            logger.info('SSL check took %.3f s', duration)
            expire_date = datetime.strptime(cert['notAfter'],
                                            '%b %d %H:%M:%S %Y %Z')
            remaining_days = (expire_date -
                              datetime.utcnow()).total_seconds() / 86400
            report_state['ssl_certificate'] = {
                'hostname': hostname,
                'port': port,
                'ip': peer_ip,
                'notBefore': cert['notBefore'],
                'notAfter': cert['notAfter'],
                'serialNumber': cert['serialNumber'],
                'remaining_days': {
                    '__value': remaining_days,
                    '__check': {
                        'state': 'red' if remaining_days < 10 else 'green',
                    },
                },
            }

    # make HTTP request
    t1 = monotime()
    try:
        try:
            r = rs.get(target.url,
                       headers={
                           'User-Agent': default_user_agent,
                       },
                       timeout=timeout or default_timeout)
        finally:
            duration = monotime() - t1
            report_state['duration_seconds'] = duration
    except Exception as e:
        logger.info('Exception while processing url %r: %r', target.url, e)
        report_state['error'] = {
            '__value': str(e),
            '__check': {
                'state': 'red'
            },
        }
        return

    report_state['error'] = {
        '__value': None,
        '__check': {
            'state': 'green'
        },
    }
    report_state['final_url'] = r.url
    report_state['response'] = {
        'status_code': {
            '__value': r.status_code,
            '__check': {
                'state': 'green' if r.status_code == 200 else 'red',
            },
        },
        'content_length': len(r.content),
    }

    if target.response_contains:
        present = target.response_contains in r.text
        report_state['response_contains'] = {
            'text': target.response_contains,
            'present': {
                '__value': present,
                '__check': {
                    'state': 'green' if present else 'red',
                },
            },
        }
Exemplo n.º 22
0
async def check_target_ip(conf, target, report, hostname, ip):
    logger.debug('Checking IP address %s', ip)
    ip_report = {
        'error': {
            '__value': None,
            '__check': {
                'state': 'green',
            },
        },
    }
    try:
        ip_report['reverse_record'] = await get_ip_reverse(ip)
    except CancelledError as e:
        logger.debug('get_ip_reverse cancelled')
        raise e
    except ReverseLookupError as e:
        logger.debug('get_ip_reverse ReverseLookupError: %s', e)
        ip_report['reverse_record'] = None
        ip_report['reverse_record_error'] = str(e)
    start_time = monotime()
    try:
        conn = CustomTCPConnector(resolver=CustomResolver(hostname, ip))
        async with ClientSession(connector=conn) as session:
            start_time = monotime()
            async with session.get(target.url) as response:
                status_ok = response.status == 200
                ip_report['status_code'] = {
                    '__value': response.status,
                    '__check': {
                        'state': 'green' if status_ok else 'red',
                    },
                }
                data = await response.read()
                duration = monotime() - start_time
                duration_threshold = target.duration_threshold or conf.default_duration_threshold
                duration_ok = timedelta(seconds=duration) <= duration_threshold
                ip_report['duration'] = {
                    '__value': duration,
                    '__unit': 'seconds',
                    '__check': {
                        'state': 'green' if duration_ok else 'red',
                    },
                }
                ip_report['response_size'] = {
                    '__value': len(data),
                    '__unit': 'bytes',
                }
                ip_report['redirect_count'] = len(response.history)
                ip_report['final_url'] = str(response.url)
                ip_report['ssl_certificate'] = get_ssl_cert_report(
                    conn.get_last_cert())
                # try:
                #     ip_report['response_preview'] = data.decode()[:100]
                # except Exception:
                #     ip_report['response_preview'] = str(data)[:100]
                logger.debug('GET %r -> %s %s in %.3f s', target.url,
                             response.status, smart_repr(data), duration)
                if status_ok and target.check_response_contains:
                    present = to_bytes(target.check_response_contains) in data
                    ip_report['check_response_contains'] = {
                        'content': target.check_response_contains,
                        'present': {
                            '__value': present,
                            '__check': {
                                'state': 'green' if present else 'red',
                            },
                        },
                    }
    except CancelledError as e:
        logger.info('GET %r via %s cancelled: %r', target.url, ip, e)
        raise e
    except Exception as e:
        logger.info('GET %r via %s failed: %r', target.url, ip, e)
        if isinstance(e, SSLError):
            if 'certificate has expired' in str(e):
                msg = 'SSL certificate has expired'
            else:
                msg = f'SSLError: {e}'
        else:
            msg = str(e) or repr(e)
        ip_report['error'] = {
            '__value': msg,
            '__check': {
                'state': 'red',
            },
        }
    ip_report.setdefault('duration', {'__value': monotime() - start_time})
    report['state']['by_ip'][ip] = ip_report
Exemplo n.º 23
0
def backup(src_path,
           backend,
           recipients,
           recipients_files,
           reuse_backup_count=30,
           follow_symlinks=False):
    t0 = monotime()
    encryption_key = os.urandom(32)
    encryption_key_sha1 = hashlib.new('sha1', encryption_key).hexdigest()
    if recipients or recipients_files:
        age_encrypted_encryption_key = encrypt_with_age(
            encryption_key,
            recipients=recipients,
            recipients_files=recipients_files)
    else:
        logger.info(
            'No recipients specified - the data file AES key will be stored in metadata file unencrypted'
        )
        age_encrypted_encryption_key = None
    backup_id = datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
    adapter = ChunkAdapter(backend)
    logger.info('Backing up %s to %s - backup id %s', src_path, backend,
                backup_id)
    with ExitStack() as stack:
        stack.callback(backend.close_data_file)
        temp_dir = Path(
            stack.enter_context(
                TemporaryDirectory(prefix=f'baq.{backup_id}.')))
        reuse_encryption_keys, reuse_blocks = load_previous_backup_for_reuse(
            backend, temp_dir, reuse_backup_count)
        meta_path = temp_dir / f'baq.{backup_id}.meta'
        meta_file = stack.enter_context(gzip.open(meta_path, mode='wb'))
        meta_file.write(
            to_json(
                generate_header(
                    backup_id=backup_id,
                    encryption_key=encryption_key,
                    encryption_key_sha1=encryption_key_sha1,
                    age_encrypted_encryption_key=age_encrypted_encryption_key,
                    reuse_encryption_keys=reuse_encryption_keys)))
        for dir_path, dirs, files, dir_fd in os.fwalk(
                src_path, follow_symlinks=follow_symlinks):
            #logger.debug('fwalk -> %s, %s, %s, %s', dir_path, dirs, files, dir_fd)
            dir_stat = os.fstat(dir_fd)
            meta_file.write(
                to_json({
                    'directory': {
                        'path': str(Path(dir_path).relative_to(src_path)),
                        'mode': dir_stat.st_mode,
                        'uid': dir_stat.st_uid,
                        'gid': dir_stat.st_gid,
                        'atime': dir_stat.st_atime,
                        'ctime': dir_stat.st_ctime,
                        'mtime': dir_stat.st_mtime,
                    }
                }))
            for file_name in files:
                file_path = str(
                    Path(dir_path).relative_to(src_path) / file_name)
                try:
                    file_stat = os.stat(file_name,
                                        dir_fd=dir_fd,
                                        follow_symlinks=follow_symlinks)
                except FileNotFoundError as e:
                    logger.warning('Cannot stat file %s: %s', file_path, e)
                    continue
                if stat.S_ISLNK(file_stat.st_mode):
                    try:
                        symlink_target = os.readlink(file_name, dir_fd=dir_fd)
                    except Exception as e:
                        logger.warning('Cannot read symlink target: %s - %r',
                                       file_path, e)
                    else:
                        meta_file.write(
                            to_json({
                                'symlink': {
                                    'path': file_path,
                                    'target': symlink_target,
                                    'mode': file_stat.st_mode,
                                    'uid': file_stat.st_uid,
                                    'gid': file_stat.st_gid,
                                    'atime': file_stat.st_atime,
                                    'ctime': file_stat.st_ctime,
                                    'mtime': file_stat.st_mtime,
                                }
                            }))
                    continue
                elif not stat.S_ISREG(file_stat.st_mode):
                    logger.warning('Skipping file with unknown type: %s',
                                   file_path)
                    continue
                assert stat.S_ISREG(file_stat.st_mode)
                try:
                    file_stream = open(file_name,
                                       mode='rb',
                                       opener=partial(os.open, dir_fd=dir_fd))
                except PermissionError as e:
                    logger.warning('Cannot open file %s: %s', file_path, e)
                    continue
                with file_stream:
                    logger.debug('Processing file %s', file_path)
                    file_hash = hashlib.new('sha3_512')
                    file_stat = os.fstat(file_stream.fileno())
                    meta_file.write(
                        to_json({
                            'file': {
                                'path': file_path,
                                'mode': file_stat.st_mode,
                                'uid': file_stat.st_uid,
                                'gid': file_stat.st_gid,
                                'atime': file_stat.st_atime,
                                'ctime': file_stat.st_ctime,
                                'mtime': file_stat.st_mtime,
                            }
                        }))
                    while True:
                        pos = file_stream.tell()
                        chunk = file_stream.read(chunk_size)
                        if not chunk:
                            break
                        #logger.debug('Read %d bytes from file %s pos %s: %s', len(chunk), file_name, pos, smart_repr(chunk))
                        file_hash.update(chunk)
                        chunk_hash = hashlib.new('sha3_512', chunk).digest()

                        if chunk_hash in reuse_blocks:
                            meta_file.write(
                                to_json({
                                    'content': {
                                        'offset':
                                        pos,
                                        'sha3_512':
                                        chunk_hash.hex(),
                                        'df_name':
                                        reuse_blocks[chunk_hash]['df_name'],
                                        'df_offset':
                                        reuse_blocks[chunk_hash]['df_offset'],
                                        'df_size':
                                        reuse_blocks[chunk_hash]['df_size'],
                                        'encryption_key_sha1':
                                        reuse_blocks[chunk_hash]
                                        ['encryption_key_sha1'],
                                    }
                                }))
                        else:
                            chunk_df = adapter.write_data_chunk(
                                backup_id,
                                chunk,
                                encryption_key=encryption_key)
                            meta_file.write(
                                to_json({
                                    'content': {
                                        'offset': pos,
                                        'sha3_512': chunk_hash.hex(),
                                        'df_name': chunk_df.name,
                                        'df_offset': chunk_df.offset,
                                        'df_size': chunk_df.size,
                                        'encryption_key_sha1':
                                        encryption_key_sha1,
                                    }
                                }))
                            reuse_blocks[chunk_hash] = {
                                'df_name': chunk_df.name,
                                'df_offset': chunk_df.offset,
                                'df_size': chunk_df.size,
                                'encryption_key_sha1': encryption_key_sha1,
                            }
                        del chunk
                    meta_file.write(
                        to_json({
                            'file_done': {
                                'sha3_512': file_hash.hexdigest(),
                            }
                        }))
        adapter.close_data_file()
        meta_file.write(
            to_json({
                'done': {
                    'backup_id': backup_id,
                    'date': datetime.utcnow().strftime('%Y%m%dT%H%M%SZ'),
                }
            }))
        meta_file.close()
        backend.store_file(meta_path, name=meta_path.name)
    logger.info('Backup id %s done in %.3f s', backup_id, monotime() - t0)
    return BackupResult(backup_id)
Exemplo n.º 24
0
def main():
    logging.basicConfig(
        format='%(asctime)s %(name)-20s %(levelname)5s: %(message)s',
        level=logging.DEBUG)

    logger.debug('Starting')

    rs = requests.session()

    for url in watch_urls:
        logger.info('Monitoring address %s', url)

        t0 = monotime()
        r = rs.get(url)
        td = monotime() - t0

        search_string = 'source'

        data = {
            'label': {
                'agent': 'sample_web',
                'host': socket.getfqdn(),
                'url': url,
            },
            'date': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
            'values': {
                'url': url,
                'final_url': r.url,
                'response': {
                    'status_code': {
                        '__value': r.status_code,
                        '__check': {
                            'state':
                            'green' if r.status_code == 200 else 'red',
                        },
                    },
                    'content_length': len(r.content),
                    'duration': td,
                },
                'check_content': {
                    'search_string': search_string,
                    'present': {
                        '__value': search_string in r.text,
                        '__check': {
                            'state':
                            'green' if search_string in r.text else 'red',
                        },
                    },
                },
                'watchdog': {
                    '__watchdog': {
                        'deadline': int(time() * 1000 + 10000),
                    },
                },
            },
        }

        #print(yaml.dump(data, width=200))

        report_address = default_report_address
        auth_token = 'secret_report_token'
        r = rs.post(report_address,
                    json=data,
                    headers={'Authorization': 'token ' + auth_token})
        r.raise_for_status()
        print(r.content)
Exemplo n.º 25
0
async def dump_snapshots(request):
    '''
    For debugging purposes only - possibility of skipping snapshots due to
    race conditions between creating objectid and finishing DB insert.
    '''
    model = request.app['model']
    stream_id = request.query.get('streamId')
    after_snapshot_id = request.query.get('afterSnapshotId')
    tail = bool(int(request.query.get('tail', 0)))
    streams = await model.streams.list_all()
    streams = {s.id: s for s in streams}
    res = StreamResponse()
    res.headers['Content-Type'] = 'text/plain'
    res.enable_chunked_encoding()
    res.enable_compression()
    await res.prepare(request)
    try:
        while True:
            t = monotime()
            snapshots = await model.stream_snapshots.dump(
                stream_id=stream_id,
                after_snapshot_id=after_snapshot_id)
            if not snapshots:
                if not tail:
                    break
                logger.debug('No snapshots dumped, sleeping')
                await sleep(1)
                continue
            logger.debug(
                'Dumped %s snapshots %s - %s in %.3f s',
                len(snapshots), snapshots[0].id, snapshots[-1].id,
                monotime() - t)
            parts = []
            for snapshot in snapshots:
                stream = streams.get(snapshot.stream_id)
                if not stream:
                    stream = await model.streams.get_by_id(snapshot.stream_id)
                assert stream.id == snapshot.stream_id
                record = {
                    'id': str(snapshot.id),
                    'date': snapshot.date.isoformat(),
                    'stream': {
                        'id': snapshot.stream_id,
                        'label': stream.label,
                    },
                    'state_json': snapshot.state_json,
                }
                line = to_compact_json(record)
                parts.append(line.encode())
                parts.append(b'\n')
                after_snapshot_id = snapshot.id
            del snapshots
            chunk = b''.join(parts)
            logger.debug('Sending %.2f kB of JSONL response chunk', len(chunk) / 1024)
            await res.write(chunk)
        await res.write_eof()
    except CancelledError as e:
        logger.debug('dump_snapshots finished: %r', e)
    except Exception as e:
        logger.exception('dump_snapshots failed: %r', e)
    return res