Exemplo n.º 1
0
def main():
    status = Status.load()
    while True:
        # Exits when killed
        # Only do this check every five seconds.
        time.sleep(REFRESH_RATE)

        try:
            status.refresh_from_db()
        except status.DoesNotExist:
            # We've never created a status object. Just sleep until we do.
            continue

        if status.capture == status.RESTART:
            # It doesn't matter if we're running or not, restart/start capture.
            log.info("Restarting capture.")
            status.start_capture()

        elif status.capture == status.STOPPED:
            if status.pid is not None:
                # Capture is still running somewhere.
                # Shut it down.
                # Shut it down forever.
                log.info("Shutting down capture.")
                status.stop_capture()

        elif status.capture == status.STARTED:
            cap_status = status.capture_status
            if cap_status[0] == status.NOT_OK:
                # Capture should be started, but isn't
                log.info("Capture was supposed to be running, but wasn't: {}".
                         format(cap_status[1]))
                status.start_capture()
        else:
            log.error("Invalid capture mode: {}".format(status.capture))
Exemplo n.º 2
0
def main():
    status = Status.load()
    while True:
        # Exits when killed

        if status.capture == status.RESTART:
            # It doesn't matter if we're running or not, restart/start capture.
            log.info("Restarting capture.")
            status.start_capture()

        elif status.capture == status.STOPPED:
            if status.find_capture_pids():
                # Capture is still running somewhere.
                # Shut it down.
                # Shut it down forever.
                log.info("Shutting down capture.")
                status.stop_capture()

        elif status.capture == status.STARTED:
            cap_status = status.capture_status
            if cap_status[0] == status.NOT_OK:
                # Capture should be started, but isn't
                log.info("Capture was supposed to be running, but wasn't.")
                status.start_capture()
        else:
            log.error("Invalid capture mode: {}".format(status.capture))

        # Only do this check every five seconds.
        time.sleep(REFRESH_RATE)
        status.refresh_from_db()
Exemplo n.º 3
0
def set_capture_state(self, state):
    TIMEOUT = 2

    status = Status.load()
    before_time = status.capture_state_changed

    # Starting capture will restart it if it is already running.
    if state == 'start':
        status.capture = status.STARTED
    elif state == 'restart':
        status.capture = status.RESTART
    elif state == 'stop':
        status.capture = status.STOPPED
    else:
        log.info('Unknown state value: {}'.format(state))
        return {'danger': 'Unknown state: {}'.format(state)}

    status.save()

    timeout = time.time() + TIMEOUT

    while status.capture_state_changed == before_time:
        if time.time() > timeout:
            return {
                'danger':
                'No attempt to start/stop/restart capture detected. Capture runner '
                'may not be active.'
            }
        # Wait a bit
        time.sleep(0.5)
        status.refresh_from_db()

    return {'info': status.running_message}
Exemplo n.º 4
0
def list(self):
    """Return a list of disks suitable for display in a client side DataTables table.
    :param Request request: A request object
    :param str dev_type: ({})""".format(', '.join(STATUS_DEV_TYPES))

    # Refresh our catalog of devices.
    dm.Device.refresh()

    dev_status = Status.load()

    host_devices = dm.Device.get_devices().values()

    known_uuids = [disk.uuid for disk in Disk.objects.all()]

    empty_slots = [dev.as_dict() for dev in dm.Device.get_empty_slots()]
    # All the disks that aren't initialized.
    devices = []
    init_devices = []
    for dev in host_devices:
        if dev.uuid in known_uuids:
            init_devices.append(dev.as_dict())
        else:
            devices.append(dev.as_dict())

    for device in init_devices:
        if 'uuid' in device:
            try:
                disk_row = Disk.objects.get(uuid=device['uuid'])
                device['disk'] = DiskSerializer(disk_row).data
            except Disk.DoesNotExist:
                pass
            if 'label' in device and \
               device['label'] == settings.INDEX_DEV_LABEL:
                if device['uuid'] == str(dev_status.index_uuid):
                    if 'mountpoint' in device:
                        device['index_dev_status'] = 'ACTIVE INDEX DEVICE'
                    else:
                        device['index_dev_status'] = 'UNMOUNTED INDEX DEVICE'
                else:
                    device['index_dev_status'] = 'UNREGISTERED INDEX DEVICE'

    return {
        'data': {
            'empty_slots': empty_slots,
            'devices': devices,
            'init_devices': init_devices
        }
    }
Exemplo n.º 5
0
def set_capture_settings(self, settings):

    status = Status.load()

    mode = settings.get('capture_mode')

    modes = [m[0] for m in status.CAPTURE_MODES]

    if mode not in modes:
        log.info('Invalid capture mode: {}'.format(mode))
        return {'warning': 'Invalid capture mode: {}'.format(mode)}

    status.capture_mode = mode
    status.settings_changed = True
    status.save()

    return {'info': 'Updated capture mode.'}
Exemplo n.º 6
0
def make_index_device(self, disks):
    """
    Take two disks to make into the index disk. They are put into a RAID 1
    configuration, formatted and mounted.
    :param self: This is run as a method.
    :param disks: [str]
    :return:
    """

    if len(disks) not in [1, 2]:
        raise RuntimeError("You must provide one or two disks")

    # Make sure we don't already have an index device.
    status = Status.load()
    if status.index_uuid is not None and dm.Device.find_device_by_uuid(
            status.index_uuid):
        raise RuntimeError("Index device already exists.")

    if status.index_uuid is None:
        # If there is a device with the index_device label, always use it.
        idx_dev = dm.find_index_device()
        if idx_dev is not None:
            status.index_uuid = idx_dev.uuid
            status.save()
            raise RuntimeError("Index device already exists.")

    self.update_state(state="WORKING")

    idx_dev = dm.init_index_device(*disks, task=self)
    while idx_dev.uuid is None:
        time.sleep(1)
    status.index_uuid = idx_dev.uuid
    status.save()

    result, msg = status.mount_index_device()
    if result != status.OK:
        raise RuntimeError(msg)

    return {'msg': "Index disk created and initialized successfully."}
Exemplo n.º 7
0
def update_stats(self):
    """Normalize and upload the most recent capture stats for this indexer to the search head."""
    # See the search_head_api Stats model for a detailed description of what's going on here.

    status = Status.load()

    last = status.last_stats_upload

    # Get all the indexes that have been readied since our last check
    new_indexes = Index.objects.filter(ready=True).order_by('start_ts')
    if last is not None:
        new_indexes = new_indexes.filter(readied__gt=last)

    if len(new_indexes) == 0:
        # Nothing to do...
        return

    first_ts = new_indexes[0].start_ts
    # Truncate the start to get the first minute we'll mess with
    start_minute = pytz.UTC.localize(
        timezone.datetime(*[
            first_ts.year, first_ts.month, first_ts.day, first_ts.hour,
            first_ts.minute
        ]))

    capture_node = CaptureNode.objects.get(hostname=settings.NODE_NAME)

    oldest = None

    # We have to do this as a single transaction on the search head.
    with transaction.atomic(using='default'):

        # All the interfaces must exist in the db, so we create any that don't already
        interfaces = {
            iface.name: iface
            for iface in StatsInterface.objects.all()
        }
        touched_ifaces = IdxStats.objects.values('interface').distinct()
        for rec in touched_ifaces:
            iface_name = rec['interface']
            if iface_name not in interfaces:
                # The atomic transaction should ensure uniqueness here.
                new_iface = StatsInterface(name=iface_name)
                new_iface.save()
                interfaces[iface_name] = new_iface

        # Get the stats objects that might overlap with what we're adding for this capture node.
        # prefetch the interface table data too.
        existing_stats = Stats.objects.filter(capture_node=capture_node, minute__gte=start_minute)\
                                      .select_related('interface')
        # A dictionary of interface names to a dictionary of minutes.
        # Note that while interface id's would be faster, they haven't necessarily been created yet.
        interface_minutes = defaultdict(lambda: {})
        # Populate our interface minutes with the existing objects
        for stat in existing_stats:
            interface_minutes[stat.interface.name][stat.minute] = stat

        # Update/create new minutes for all the stats we're processing.
        for idx in new_indexes:
            if oldest is None or idx.readied > oldest:
                oldest = idx.readied

            # Get all the stat minutes objects referenced by this object
            try:
                iface = idx.stats.interface
            except ObjectDoesNotExist:
                continue
            stat_minutes = Stats.from_index(idx, capture_node,
                                            interfaces[iface])

            iface_minutes = interface_minutes[idx.stats.interface]
            for stat in stat_minutes:
                minute = stat.minute
                old_stat = iface_minutes.get(minute, None)
                if old_stat is None:
                    # No pre-existing minute data, so this is it.
                    iface_minutes[minute] = stat
                else:
                    # Update the existing minute with the new info.
                    old_stat.merge(stat)

        # Separate all the already existing objects from those that need to be created.
        # This allows us to bulk save the new ones.
        new = []
        old = []
        for idx_minutes in interface_minutes.values():
            for stat in idx_minutes.values():
                if not stat.pk:
                    new.append(stat)
                else:
                    old.append(stat)

        for stat in old:
            stat.save()
        Stats.objects.bulk_create(new)

    status.last_stats_upload = oldest
    status.save()

    return {}
Exemplo n.º 8
0
def get_status(self):
    data = StatusSerializer(Status.load()).data
    log.error('status: {}'.format(data))

    return {'data': {'state': data}}