Beispiel #1
0
class NewAgentTestCase(unittest.TestCase):
    @classmethod
    def setUpClass(self):
        self.agent = Agent()  # Test add
        self.id = self.agent.add("TestAgent", "any")

    @classmethod
    def tearDownClass(self):
        self.agent.remove()  # Test remove

    def test_valid_id(self):
        self.assertIsInstance(int(self.id), int, "Returned ID is not valid")

    def test_get_key(self):
        self.assertTrue(self.agent.get_key(), "Invalid key")

    def test_get(self):
        self.agent.get()
        self.assertEqual(self.agent.name, "TestAgent")
Beispiel #2
0
 def setUpClass(self):
     self.agent = Agent()  # Test add
     self.id = self.agent.add('TestAgent', 'any')
Beispiel #3
0
    def process_files_from_worker(self, files_checksums: Dict,
                                  decompressed_files_path: str, logger):
        def update_file(name, data):
            # Full path
            full_path, error_updating_file, n_merged_files = common.ossec_path + name, False, 0

            # Cluster items information: write mode and permissions
            lock_full_path = "{}/queue/cluster/lockdir/{}.lock".format(
                common.ossec_path, os.path.basename(full_path))
            lock_file = open(lock_full_path, 'a+')
            try:
                fcntl.lockf(lock_file, fcntl.LOCK_EX)
                if os.path.basename(name) == 'client.keys':
                    self.logger.warning(
                        "Client.keys received in a master node")
                    raise WazuhException(3007)
                if data['merged']:
                    is_agent_info = data['merge_type'] == 'agent-info'
                    if is_agent_info:
                        self.sync_agent_info_status['total_agent_info'] = len(
                            agent_ids)
                    else:
                        self.sync_extra_valid_status[
                            'total_extra_valid'] = len(agent_ids)
                    for file_path, file_data, file_time in cluster.unmerge_agent_info(
                            data['merge_type'], decompressed_files_path,
                            data['merge_name']):
                        full_unmerged_name = os.path.join(
                            common.ossec_path, file_path)
                        tmp_unmerged_path = os.path.join(
                            common.ossec_path, 'queue/cluster', self.name,
                            os.path.basename(file_path))
                        try:
                            if is_agent_info:
                                agent_name_re = re.match(
                                    r'(^.+)-(.+)$',
                                    os.path.basename(file_path))
                                agent_name = agent_name_re.group(
                                    1) if agent_name_re else os.path.basename(
                                        file_path)
                                if agent_name not in agent_names:
                                    n_errors['warnings'][data['cluster_item_key']] = 1 \
                                        if n_errors['warnings'].get(data['cluster_item_key']) is None \
                                        else n_errors['warnings'][data['cluster_item_key']] + 1

                                    self.logger.debug2(
                                        "Received status of an non-existent agent '{}'"
                                        .format(agent_name))
                                    continue
                            else:
                                agent_id = os.path.basename(file_path)
                                if agent_id not in agent_ids:
                                    n_errors['warnings'][data['cluster_item_key']] = 1 \
                                        if n_errors['warnings'].get(data['cluster_item_key']) is None \
                                        else n_errors['warnings'][data['cluster_item_key']] + 1

                                    self.logger.debug2(
                                        "Received group of an non-existent agent '{}'"
                                        .format(agent_id))
                                    continue

                            try:
                                mtime = datetime.strptime(
                                    file_time, '%Y-%m-%d %H:%M:%S.%f')
                            except ValueError:
                                mtime = datetime.strptime(
                                    file_time, '%Y-%m-%d %H:%M:%S')

                            if os.path.isfile(full_unmerged_name):

                                local_mtime = datetime.utcfromtimestamp(
                                    int(os.stat(full_unmerged_name).st_mtime))
                                # check if the date is older than the manager's date
                                if local_mtime > mtime:
                                    logger.debug2(
                                        "Receiving an old file ({})".format(
                                            file_path))
                                    continue

                            with open(tmp_unmerged_path, 'wb') as f:
                                f.write(file_data)

                            mtime_epoch = timegm(mtime.timetuple())
                            os.utime(
                                tmp_unmerged_path,
                                (mtime_epoch, mtime_epoch))  # (atime, mtime)
                            os.chown(tmp_unmerged_path, common.ossec_uid,
                                     common.ossec_gid)
                            os.chmod(
                                tmp_unmerged_path, self.cluster_items['files'][
                                    data['cluster_item_key']]['permissions'])
                            os.rename(tmp_unmerged_path, full_unmerged_name)
                        except Exception as e:
                            self.logger.error(
                                "Error updating agent group/status ({}): {}".
                                format(tmp_unmerged_path, e))
                            if is_agent_info:
                                self.sync_agent_info_status[
                                    'total_agent_info'] -= 1
                            else:
                                self.sync_extra_valid_status[
                                    'total_extra_valid'] -= 1

                            n_errors['errors'][data['cluster_item_key']] = 1 \
                                if n_errors['errors'].get(data['cluster_item_key']) is None \
                                else n_errors['errors'][data['cluster_item_key']] + 1

                else:
                    zip_path = "{}{}".format(decompressed_files_path, name)
                    os.chown(zip_path, common.ossec_uid, common.ossec_gid)
                    os.chmod(
                        zip_path, self.cluster_items['files'][
                            data['cluster_item_key']]['permissions'])
                    os.rename(zip_path, full_path)

            except WazuhException as e:
                logger.debug2("Warning updating file '{}': {}".format(name, e))
                error_tag = 'warnings'
                error_updating_file = True
            except Exception as e:
                logger.debug2("Error updating file '{}': {}".format(name, e))
                error_tag = 'errors'
                error_updating_file = True

            if error_updating_file:
                n_errors[error_tag][data['cluster_item_key']] = 1 if not n_errors[error_tag].get(
                    data['cluster_item_key']) \
                    else n_errors[error_tag][data['cluster_item_key']] + 1

            fcntl.lockf(lock_file, fcntl.LOCK_UN)
            lock_file.close()

        # tmp path
        tmp_path = "/queue/cluster/{}/tmp_files".format(self.name)
        n_merged_files = 0
        n_errors = {'errors': {}, 'warnings': {}}

        # create temporary directory for lock files
        lock_directory = "{}/queue/cluster/lockdir".format(common.ossec_path)
        if not os.path.exists(lock_directory):
            utils.mkdir_with_mode(lock_directory)

        try:
            agents = Agent.get_agents_overview(select={'fields': ['name']},
                                               limit=None)['items']
            agent_names = set(map(operator.itemgetter('name'), agents))
            agent_ids = set(map(operator.itemgetter('id'), agents))
        except Exception as e:
            logger.debug2("Error getting agent ids and names: {}".format(e))
            agent_names, agent_ids = {}, {}

        try:
            for filename, data in files_checksums.items():
                update_file(data=data, name=filename)

            shutil.rmtree(decompressed_files_path)

        except Exception as e:
            self.logger.error("Error updating worker files: '{}'.".format(e))
            raise e

        if sum(n_errors['errors'].values()) > 0:
            logger.error("Errors updating worker files: {}".format(' | '.join([
                '{}: {}'.format(key, value)
                for key, value in n_errors['errors'].items()
            ])))
        if sum(n_errors['warnings'].values()) > 0:
            for key, value in n_errors['warnings'].items():
                if key == '/queue/agent-info/':
                    logger.debug2(
                        "Received {} agent statuses for non-existent agents. Skipping."
                        .format(value))
                elif key == '/queue/agent-groups/':
                    logger.debug2(
                        "Received {} group assignments for non-existent agents. Skipping."
                        .format(value))
Beispiel #4
0
def main():
    # Check arguments
    if args.list_outdated:
        list_outdated()
        exit(0)

    if not args.agent:
        arg_parser.print_help()
        exit(0)

    if args.silent:
        args.debug = False

    # Capture Ctrl + C
    signal(SIGINT, signal_handler)

    # Initialize framework
    myWazuh = Wazuh(get_init=True)

    agent = Agent(id=args.agent)
    agent._load_info_from_DB()

    agent_info = "{0}/queue/agent-info/{1}-{2}".format(common.ossec_path,
                                                       agent.name, agent.ip)
    if not os.path.isfile(agent_info):
        raise WazuhException(1720)

    # Custom WPK file
    if args.file:
        if args.execute:
            upgrade_command_result = agent.upgrade_custom(
                file_path=args.file,
                installer=args.execute,
                debug=args.debug,
                show_progress=print_progress if not args.silent else None,
                chunk_size=args.chunk_size,
                rl_timeout=args.timeout)
            if not args.silent:
                if not args.debug:
                    print(
                        "\n{0}... Please wait.".format(upgrade_command_result))
                else:
                    print(upgrade_command_result)

            counter = 0
            agent_info_stat = os.stat(agent_info).st_mtime

            sleep(10)
            while agent_info_stat == os.stat(
                    agent_info
            ).st_mtime and counter < common.agent_info_retries:
                sleep(common.agent_info_sleep)
                counter = counter + 1

            if agent_info_stat == os.stat(agent_info).st_mtime:
                raise WazuhException(
                    1716, "Timeout waiting for agent reconnection.")

            upgrade_result = agent.upgrade_result(debug=args.debug)
            if not args.silent:
                print(upgrade_result)
        else:
            print("Error: Need executable filename.")

    # WPK upgrade file
    else:
        prev_ver = agent.version
        upgrade_command_result = agent.upgrade(
            wpk_repo=args.repository,
            debug=args.debug,
            version=args.version,
            force=args.force,
            show_progress=print_progress if not args.silent else None,
            chunk_size=args.chunk_size,
            rl_timeout=args.timeout)
        if not args.silent:
            if not args.debug:
                print("\n{0}... Please wait.".format(upgrade_command_result))
            else:
                print(upgrade_command_result)

        counter = 0
        agent_info_stat = os.stat(agent_info).st_mtime

        while agent_info_stat == os.stat(
                agent_info).st_mtime and counter < common.agent_info_retries:
            sleep(common.agent_info_sleep)
            counter = counter + 1

        if agent_info_stat == os.stat(agent_info).st_mtime:
            raise WazuhException(1716,
                                 "Timeout waiting for agent reconnection.")

        sleep(10)
        upgrade_result = agent.upgrade_result(debug=args.debug)
        if not args.silent:
            if not args.debug:
                agent._load_info_from_DB()
                print("Agent upgraded: {0} -> {1}".format(
                    prev_ver, agent.version))
            else:
                print(upgrade_result)
Beispiel #5
0
#    - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/var/ossec/lib

from sys import path, exit
import json
# cwd = /var/ossec/api/framework/examples
#framework_path = '{0}'.format(path[0][:-9])
# cwd = /var/ossec/api
#framework_path = '{0}/framework'.format(path[0])
# Default path
framework_path = '/var/ossec/api/framework'
path.append(framework_path)

try:
    from wazuh import Wazuh
    from wazuh.agent import Agent
except Exception as e:
    print("No module 'wazuh' found.")
    exit()

if __name__ == "__main__":

    # Creating wazuh object
    # It is possible to specify the ossec path (path argument) or get /etc/ossec-init.conf (get_init argument)
    print("\nWazuh:")
    myWazuh = Wazuh(get_init=True)
    print(myWazuh)

    print("\nAgents:")
    agents = Agent.get_agents_overview()
    print(json.dumps(agents, indent=4, sort_keys=True))
Beispiel #6
0
 def setUpClass(self):
     self.agent = Agent()  # Test add
     self.id = self.agent.add("TestAgent", "any")
Beispiel #7
0
def files(agent_id=None,
          event=None,
          filename=None,
          filetype='file',
          md5=None,
          sha1=None,
          hash=None,
          summary=False,
          offset=0,
          limit=common.database_limit,
          sort=None,
          search=None):
    """
    Return a list of files from the database that match the filters

    :param agent_id: Agent ID.
    :param event: Filters by event: added, readded, modified, deleted.
    :param filename: Filters by filename.
    :param filetype: Filters by filetype: file or registry.
    :param md5: Filters by md5 hash.
    :param sha1: Filters by sha1 hash.
    :param hash: Filters by md5 or sha1 hash.
    :param summary: Returns a summary grouping by filename.
    :param offset: First item to return.
    :param limit: Maximum number of items to return.
    :param sort: Sorts the items. Format: {"fields":["field1","field2"],"order":"asc|desc"}.
    :param search: Looks for items with the specified string.
    :return: Dictionary: {'items': array of items, 'totalItems': Number of items (without applying the limit)}
    """

    # Connection
    db_agent = glob('{0}/{1}-*.db'.format(common.database_path_agents,
                                          agent_id))
    if not db_agent:
        raise WazuhException(1600)
    else:
        db_agent = db_agent[0]

    conn = Connection(db_agent)

    agent_info = Agent(agent_id).get_basic_information()
    if 'os' in agent_info and 'platform' in agent_info['os']:
        if agent_info['os']['platform'].lower() == 'windows':
            windows_agent = True
        else:
            windows_agent = False
    else:
        # We do not know if it is a windows or linux agent.
        # It is set to windows agent in order to avoid wrong data (uid, gid, ...)
        windows_agent = True

    fields = {
        'scanDate': 'date',
        'modificationDate': 'mtime',
        'file': 'path',
        'size': 'size',
        'user': '******',
        'group': 'gname'
    }

    # Query
    query = "SELECT {0} FROM fim_event, fim_file WHERE fim_event.id_file = fim_file.id AND fim_file.type = :filetype"
    request = {'filetype': filetype}

    if event:
        query += ' AND fim_event.type = :event'
        request['event'] = event

    if filename:
        query += ' AND path = :filename'
        request['filename'] = filename

    if md5:
        query += ' AND md5 = :md5'
        request['md5'] = md5

    if sha1:
        query += ' AND sha1 = :sha1'
        request['sha1'] = sha1

    if hash:
        query += ' AND (md5 = :hash OR sha1 = :hash)'
        request['hash'] = hash

    if search:
        query += " AND NOT" if bool(search['negation']) else ' AND'
        query += " (" + " OR ".join(
            x + ' LIKE :search'
            for x in ('path', "date", 'size', 'md5', 'sha1', 'uname', 'gname',
                      'inode', 'perm')) + " )"
        request['search'] = '%{0}%'.format(search['value'])

    # Total items
    if summary:
        query += ' group by path'
        conn.execute(
            "SELECT COUNT(*) FROM ({0}) AS TEMP".format(
                query.format("max(date)")), request)
    else:
        conn.execute(query.format('COUNT(*)'), request)

    data = {'totalItems': conn.fetch()[0]}

    # Sorting
    if sort:
        if sort['fields']:
            allowed_sort_fields = fields.keys()
            # Check if every element in sort['fields'] is in allowed_sort_fields
            if not set(sort['fields']).issubset(allowed_sort_fields):
                uncorrect_fields = list(
                    map(lambda x: str(x),
                        set(sort['fields']) - set(allowed_sort_fields)))
                raise WazuhException(
                    1403, 'Allowed sort fields: {0}. Fields: {1}'.format(
                        allowed_sort_fields, uncorrect_fields))

            query += ' ORDER BY ' + ','.join([
                '{0} {1}'.format(fields[i], sort['order'])
                for i in sort['fields']
            ])
        else:
            query += ' ORDER BY date {0}'.format(sort['order'])
    else:
        query += ' ORDER BY date DESC'

    if limit:
        if limit > common.maximum_database_limit:
            raise WazuhException(1405, str(limit))
        query += ' LIMIT :offset,:limit'
        request['offset'] = offset
        request['limit'] = limit
    elif limit == 0:
        raise WazuhException(1406)

    if summary:
        select = ["max(date)", "mtime", "fim_event.type", "path"]
    else:
        select = [
            "date", "mtime", "fim_event.type", "path", "size", "perm", "uid",
            "gid", "md5", "sha1", "uname", "gname", "inode"
        ]

    conn.execute(query.format(','.join(select)), request)

    data['items'] = []

    for tuple in conn:
        data_tuple = {}

        if tuple[0] != None:
            data_tuple['scanDate'] = tuple[0]
        if tuple[1] != None:
            data_tuple['modificationDate'] = tuple[1]  # modificationDate
        else:
            data_tuple['modificationDate'] = tuple[0]  # scanDate
        if tuple[2] != None:
            data_tuple['event'] = tuple[2]
        if tuple[3] != None:
            data_tuple['file'] = tuple[3]

        if not summary:
            try:
                permissions = filemode(int(tuple[5], 8))
            except TypeError:
                permissions = None

            if tuple[4] != None:
                data_tuple['size'] = tuple[4]
            if tuple[8] != None:
                data_tuple['md5'] = tuple[8]
            if tuple[9] != None:
                data_tuple['sha1'] = tuple[9]
            if tuple[12] != None:
                data_tuple['inode'] = tuple[12]

            if not windows_agent:
                if tuple[6] != None:
                    data_tuple['uid'] = tuple[6]
                if tuple[7] != None:
                    data_tuple['gid'] = tuple[7]

                if tuple[10] != None:
                    data_tuple['user'] = tuple[10]
                if tuple[11] != None:
                    data_tuple['group'] = tuple[11]

                if tuple[5] != None:
                    data_tuple['octalMode'] = tuple[5]
                if permissions:
                    data_tuple['permissions'] = permissions

        data['items'].append(data_tuple)

    return data
Beispiel #8
0
def get_solver_node(input_json, master_name):
    """
    Gets the node(s) that can solve a request, the node(s) that has all the necessary information to answer it.
    Only called when the request type is 'master_distributed' and the node_type is master.

    :param input_json: API request parameters and description
    :param master_name: name of the master node
    :return: node name and whether the result is list or not
    """
    select_node = {'fields': ['node_name']}
    if 'agent_id' in input_json['arguments']:
        # the request is for multiple agents
        if isinstance(input_json['arguments']['agent_id'], list):
            agents = Agent.get_agents_overview(
                select=select_node,
                limit=None,
                filters={'id': input_json['arguments']['agent_id']},
                sort={
                    'fields': ['node_name'],
                    'order': 'desc'
                })['items']
            node_name = {
                k: list(map(itemgetter('id'), g))
                for k, g in groupby(agents, key=itemgetter('node_name'))
            }

            # add non existing ids in the master's dictionary entry
            non_existent_ids = list(
                set(input_json['arguments']['agent_id']) -
                set(map(itemgetter('id'), agents)))
            if non_existent_ids:
                if master_name in node_name:
                    node_name[master_name].extend(non_existent_ids)
                else:
                    node_name[master_name] = non_existent_ids

            return node_name, True
        # if the request is only for one agent
        else:
            # Get the node where the agent 'agent_id' is reporting
            node_name = Agent.get_agent(input_json['arguments']['agent_id'],
                                        select=select_node)['node_name']
            return node_name, False

    elif 'node_id' in input_json['arguments']:
        node_id = input_json['arguments']['node_id']
        del input_json['arguments']['node_id']
        return node_id, False

    else:  # agents, syscheck, rootcheck and syscollector
        # API calls that affect all agents. For example, PUT/agents/restart, DELETE/rootcheck, etc...
        agents = Agent.get_agents_overview(select=select_node,
                                           limit=None,
                                           sort={
                                               'fields': ['node_name'],
                                               'order': 'desc'
                                           })['items']
        node_name = {
            k: []
            for k, _ in groupby(agents, key=itemgetter('node_name'))
        }
        return node_name, True
Beispiel #9
0
    def _update_worker_files_in_master(self, json_file, zip_dir_path,
                                       worker_name, cluster_control_key,
                                       cluster_control_subkey, tag):
        def update_file(n_errors,
                        name,
                        data,
                        file_time=None,
                        content=None,
                        agents=None):
            # Full path
            full_path = common.ossec_path + name
            error_updating_file = False

            # Cluster items information: write mode and umask
            w_mode = cluster_items[data['cluster_item_key']]['write_mode']
            umask = cluster_items[data['cluster_item_key']]['umask']

            if content is None:
                zip_path = "{}/{}".format(zip_dir_path, name)
                with open(zip_path, 'rb') as f:
                    content = f.read()

            lock_full_path = "{}/queue/cluster/lockdir/{}.lock".format(
                common.ossec_path, os.path.basename(full_path))
            lock_file = open(lock_full_path, 'a+')
            try:
                fcntl.lockf(lock_file, fcntl.LOCK_EX)
                _update_file(file_path=name,
                             new_content=content,
                             umask_int=umask,
                             mtime=file_time,
                             w_mode=w_mode,
                             tmp_dir=tmp_path,
                             whoami='master',
                             agents=agents)

            except WazuhException as e:
                logger.debug2("{}: Warning updating file '{}': {}".format(
                    tag, name, e))
                error_tag = 'warnings'
                error_updating_file = True
            except Exception as e:
                logger.debug2("{}: Error updating file '{}': {}".format(
                    tag, name, e))
                error_tag = 'errors'
                error_updating_file = True

            if error_updating_file:
                n_errors[error_tag][data['cluster_item_key']] = 1 if not n_errors[error_tag].get(data['cluster_item_key']) \
                                                                  else n_errors[error_tag][data['cluster_item_key']] + 1

            fcntl.lockf(lock_file, fcntl.LOCK_UN)
            lock_file.close()

            return n_errors, error_updating_file

        # tmp path
        tmp_path = "/queue/cluster/{}/tmp_files".format(worker_name)
        cluster_items = get_cluster_items()['files']
        n_merged_files = 0
        n_errors = {'errors': {}, 'warnings': {}}

        # create temporary directory for lock files
        lock_directory = "{}/queue/cluster/lockdir".format(common.ossec_path)
        if not os.path.exists(lock_directory):
            mkdir_with_mode(lock_directory)

        try:
            agents = Agent.get_agents_overview(select={'fields': ['name']},
                                               limit=None)['items']
            agent_names = set(map(itemgetter('name'), agents))
            agent_ids = set(map(itemgetter('id'), agents))
        except Exception as e:
            logger.debug2("{}: Error getting agent ids and names: {}".format(
                tag, e))
            agent_names, agent_ids = {}, {}

        before = time.time()
        try:
            for filename, data in json_file.items():
                if data['merged']:
                    for file_path, file_data, file_time in unmerge_agent_info(
                            data['merge_type'], zip_dir_path,
                            data['merge_name']):
                        n_errors, error_updating_file = update_file(
                            n_errors, file_path, data, file_time, file_data,
                            (agent_names, agent_ids))
                        if not error_updating_file:
                            n_merged_files += 1

                        if self.stopper.is_set():
                            break
                else:
                    n_errors, _ = update_file(n_errors, filename, data)

        except Exception as e:
            logger.error("{}: Error updating worker files: '{}'.".format(
                tag, e))
            raise e

        after = time.time()
        logger.debug(
            "{0}: Time updating worker files: {1:.2f}s. Total of updated worker files: {2}."
            .format(tag, after - before, n_merged_files))

        if sum(n_errors['errors'].values()) > 0:
            logging.error("{}: Errors updating worker files: {}".format(
                tag, ' | '.join([
                    '{}: {}'.format(key, value)
                    for key, value in n_errors['errors'].items()
                ])))
        if sum(n_errors['warnings'].values()) > 0:
            for key, value in n_errors['warnings'].items():
                if key == '/queue/agent-info/':
                    logger.debug2(
                        "Received {} agent statuses for non-existent agents. Skipping."
                        .format(value))
                elif key == '/queue/agent-groups/':
                    logger.debug2(
                        "Received {} group assignments for non-existent agents. Skipping."
                        .format(value))

        # Save info for healthcheck
        self.manager.set_worker_status(worker_id=self.name,
                                       key=cluster_control_key,
                                       subkey=cluster_control_subkey,
                                       status=n_merged_files)
Beispiel #10
0
def last_scan(agent_id):
    """
    Gets the last scan of the agent.

    :param agent_id: Agent ID.
    :return: Dictionary: end, start.
    """
    my_agent = Agent(agent_id)
    # if agent status is never connected, a KeyError happens
    try:
        agent_version = my_agent.get_basic_information(
            select={'fields': ['version']})['version']
    except KeyError:
        # if the agent is never connected, it won't have either version (key error) or last scan information.
        return {'start': 'ND', 'end': 'ND'}

    if WazuhVersion(agent_version) < WazuhVersion('Wazuh v3.7.0'):
        db_agent = glob('{0}/{1}-*.db'.format(common.database_path_agents,
                                              agent_id))
        if not db_agent:
            raise WazuhException(1600)
        else:
            db_agent = db_agent[0]
        conn = Connection(db_agent)

        data = {}
        # end time
        query = "SELECT max(date_last) FROM pm_event WHERE log = 'Ending rootcheck scan.'"
        conn.execute(query)
        for tuple in conn:
            data['end'] = tuple['max(date_last)'] if tuple[
                'max(date_last)'] is not None else "ND"

        # start time
        query = "SELECT max(date_last) FROM pm_event WHERE log = 'Starting rootcheck scan.'"
        conn.execute(query)
        for tuple in conn:
            data['start'] = tuple['max(date_last)'] if tuple[
                'max(date_last)'] is not None else "ND"

        return data
    else:
        fim_scan_info = WazuhDBQuerySyscheck(
            agent_id=agent_id,
            query='module=fim',
            offset=0,
            sort=None,
            search=None,
            limit=common.database_limit,
            select={
                'fields': ['end', 'start']
            },
            fields={
                'end': 'end_scan',
                'start': 'start_scan',
                'module': 'module'
            },
            table='scan_info',
            default_sort_field='start_scan').run()['items'][0]

        return fim_scan_info
Beispiel #11
0
def get_os_agent(agent_id,
                 offset=0,
                 limit=common.database_limit,
                 select={},
                 search={},
                 sort={},
                 filters={},
                 q='',
                 nested=True):
    """
    Get info about an agent's OS

    :param agent_id: Agent ID
    :param offset: First item to return
    :param limit: Maximum number of items to return
    :param select: Select fields to return. Format: {"fields": ["field1", "field2"]}
    :param search: Looks for items with the specified string. Format: {"fields": ["field1", "field2"]}
    :param sort: Sorts the items. Format: {"fields": ["field1", "field2"], "order": "asc|desc"}
    :param filters: Defines field filters required by the user. Format: {"field1": "value1", "field2": ["value2","value3"]}
    :param q: Defines query to filter
    :param nested: Fields to nest

    :return: Dictionary: {'items': array of items, 'totalItems': Number of items (without applying the limit)}
    """
    agent_obj = Agent(agent_id)
    agent_obj.get_basic_information()

    # The osinfo fields in database are different in Windows and Linux
    os_name = agent_obj.get_agent_attr('os_name')
    windows_fields = {
        'hostname': 'hostname',
        'os_version': 'os_version',
        'os_name': 'os_name',
        'architecture': 'architecture',
        'os_major': 'os_major',
        'os_minor': 'os_minor',
        'os_build': 'os_build',
        'version': 'version',
        'scan_time': 'scan_time',
        'scan_id': 'scan_id'
    }
    linux_fields = {
        **windows_fields,
        **{
            'os_codename': 'os_codename',
            'os_platform': 'os_platform',
            'sysname': 'sysname',
            'release': 'release'
        }
    }

    valid_select_fields = windows_fields if 'Windows' in os_name else linux_fields

    return get_item_agent(agent_id=agent_id,
                          offset=offset,
                          limit=limit,
                          select=select,
                          nested=nested,
                          search=search,
                          sort=sort,
                          filters=filters,
                          valid_select_fields=valid_select_fields,
                          table='sys_osinfo',
                          query=q)
Beispiel #12
0
def files(agent_id=None, event=None, filename=None, filetype='file', md5=None, sha1=None, hash=None, summary=False, offset=0, limit=common.database_limit, sort=None, search=None):
    """
    Return a list of files from the database that match the filters

    :param agent_id: Agent ID.
    :param event: Filters by event: added, readded, modified, deleted.
    :param filename: Filters by filename.
    :param filetype: Filters by filetype: file or registry.
    :param md5: Filters by md5 hash.
    :param sha1: Filters by sha1 hash.
    :param hash: Filters by md5 or sha1 hash.
    :param summary: Returns a summary grouping by filename.
    :param offset: First item to return.
    :param limit: Maximum number of items to return.
    :param sort: Sorts the items. Format: {"fields":["field1","field2"],"order":"asc|desc"}.
    :param search: Looks for items with the specified string.
    :return: Dictionary: {'items': array of items, 'totalItems': Number of items (without applying the limit)}
    """

    # Connection
    db_agent = glob('{0}/{1}-*.db'.format(common.database_path_agents, agent_id))
    if not db_agent:
        raise WazuhException(1600)
    else:
        db_agent = db_agent[0]

    conn = Connection(db_agent)

    agent_os = Agent(agent_id).get_basic_information()['os']

    if "windows" in agent_os.lower():
        windows_agent = True
    else:
        windows_agent = False

    fields = {'scanDate': 'date', 'modificationDate': 'mtime', 'file': 'path', 'size': 'size', 'user': '******', 'group': 'gname'}

    # Query
    query = "SELECT {0} FROM fim_event, fim_file WHERE fim_event.id_file = fim_file.id AND fim_file.type = :filetype"
    request = {'filetype': filetype}

    if event:
        query += ' AND fim_event.type = :event'
        request['event'] = event

    if filename:
        query += ' AND path = :filename'
        request['filename'] = filename

    if md5:
        query += ' AND md5 = :md5'
        request['md5'] = md5

    if sha1:
        query += ' AND sha1 = :sha1'
        request['sha1'] = sha1

    if hash:
        query += ' AND (md5 = :hash OR sha1 = :hash)'
        request['hash'] = hash

    if search:
        query += " AND NOT" if bool(search['negation']) else ' AND'
        query += " (" + " OR ".join(x + ' LIKE :search' for x in ('path', "date", 'size', 'md5', 'sha1', 'uname', 'gname', 'inode', 'perm')) + " )"
        request['search'] = '%{0}%'.format(search['value'])

    # Total items
    if summary:
        query += ' group by path'
        conn.execute("SELECT COUNT(*) FROM ({0}) AS TEMP".format(query.format("max(date)")), request)
    else:
        conn.execute(query.format('COUNT(*)'), request)

    data = {'totalItems': conn.fetch()[0]}

    # Sorting
    if sort:
        allowed_sort_fields = fields.keys()
        for sf in sort['fields']:
            if sf not in allowed_sort_fields:
                raise WazuhException(1403, 'Allowed sort fields: {0}. Field: {1}'.format(allowed_sort_fields, sf))

        query += ' ORDER BY ' + ','.join(['{0} {1}'.format(fields[i], sort['order']) for i in sort['fields']])
    else:
        query += ' ORDER BY date DESC'

    query += ' LIMIT :offset,:limit'
    request['offset'] = offset
    request['limit'] = limit

    if summary:
        select = ["max(date)", "mtime", "fim_event.type", "path"]
    else:
        select = ["date", "mtime", "fim_event.type", "path", "size", "perm", "uid", "gid", "md5", "sha1", "uname", "gname", "inode"]

    conn.execute(query.format(','.join(select)), request)

    data['items'] = []

    for tuple in conn:
        data_tuple = {}

        if tuple[0] != None:
            data_tuple['scanDate'] = tuple[0]
        if tuple[1] != None:
            data_tuple['modificationDate'] = tuple[1]  # modificationDate
        else:
            data_tuple['modificationDate'] = tuple[0]  # scanDate
        if tuple[2] != None:
            data_tuple['event'] = tuple[2]
        if tuple[3] != None:
            data_tuple['file'] = tuple[3]

        if not summary:
            try:
                permissions = filemode(int(tuple[5], 8))
            except TypeError:
                permissions = None

            if tuple[4] != None:
                data_tuple['size'] = tuple[4]
            if tuple[8] != None:
                data_tuple['md5'] = tuple[8]
            if tuple[9] != None:
                data_tuple['sha1'] = tuple[9]
            if tuple[12] != None:
                data_tuple['inode'] = tuple[12]

            if not windows_agent:
                if tuple[6] != None:
                    data_tuple['uid'] = tuple[6]
                if tuple[7] != None:
                    data_tuple['gid'] = tuple[7]

                if tuple[10] != None:
                    data_tuple['user'] = tuple[10]
                if tuple[11] != None:
                    data_tuple['group'] = tuple[11]

                if tuple[5] != None:
                    data_tuple['octalMode'] = tuple[5]
                if permissions:
                    data_tuple['permissions'] = permissions


        data['items'].append(data_tuple)

    return data
Beispiel #13
0
#    - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/var/ossec/api/framework/lib

from sys import path, exit
import json
# cwd = /var/ossec/api/framework/examples
#framework_path = '{0}'.format(path[0][:-9])
# cwd = /var/ossec/api
#framework_path = '{0}/framework'.format(path[0])
# Default path
framework_path = '/var/ossec/api/framework'
path.append(framework_path)

try:
    from wazuh import Wazuh
    from wazuh.agent import Agent
except Exception as e:
    print("No module 'wazuh' found.")
    exit()

if __name__ == "__main__":

    # Creating wazuh object
    # It is possible to specify the ossec path (path argument) or get /etc/ossec-init.conf (get_init argument)
    print("\nWazuh:")
    myWazuh = Wazuh(get_init=True)
    print(myWazuh)

    print("\nAgents:")
    agents = Agent.get_agents_overview(status="all")
    print(json.dumps(agents, indent=4, sort_keys=True))
Beispiel #14
0
 def test_agent_overview(self):
     agents = Agent.get_agents_overview()
     self.assertGreater(agents['totalItems'], 1)
     self.assertTrue(agents['items'], 'No agents: items')
Beispiel #15
0
def show_group(agent_id):
    agent_info = Agent(id=agent_id).get_basic_information()

    str_group = agent_info['group'] if 'group' in agent_info else "Null"
    print("The agent '{0}' with ID '{1}' has the group: '{2}'.".format(
        agent_info['name'], agent_info['id'], str_group))
Beispiel #16
0
    def remove_bulk_agents(agent_ids_list, logger):
        """
        Removes files created by agents in worker nodes. This function doesn't remove agents from client.keys since the
        client.keys file is overwritten by the master node.
        :param agent_ids_list: List of agents ids to remove.
        :return: None.
        """
        def remove_agent_file_type(glob_args, agent_args, agent_files):
            for filetype in agent_files:
                for agent_file in set(glob.iglob(filetype.format(common.ossec_path, *glob_args))) & \
                                  {filetype.format(common.ossec_path, *(a[arg] for arg in agent_args)) for a in
                                   agent_info}:
                    logger.debug2("Removing {}".format(agent_file))
                    if os.path.isdir(agent_file):
                        shutil.rmtree(agent_file)
                    else:
                        os.remove(agent_file)

        if not agent_ids_list:
            return  # the function doesn't make sense if there is no agents to remove

        logger.info("Removing files from {} agents".format(
            len(agent_ids_list)))
        logger.debug("Agents to remove: {}".format(', '.join(agent_ids_list)))
        # the agents must be removed in groups of 997: 999 is the limit of SQL variables per query. Limit and offset are
        # always included in the SQL query, so that leaves 997 variables as limit.
        for agents_ids_sublist in itertools.zip_longest(*itertools.repeat(
                iter(agent_ids_list), 997),
                                                        fillvalue='0'):
            agents_ids_sublist = list(
                filter(lambda x: x != '0', agents_ids_sublist))
            # Get info from DB
            agent_info = Agent.get_agents_overview(
                q=",".join(["id={}".format(i) for i in agents_ids_sublist]),
                select={'fields': ['ip', 'id', 'name']},
                limit=None)['items']
            logger.debug2("Removing files from agents {}".format(
                ', '.join(agents_ids_sublist)))

            # Remove agent files that need agent name and ip
            agent_files = [
                '{}/queue/agent-info/{}-{}',
                '{}/queue/rootcheck/({}) {}->rootcheck'
            ]
            remove_agent_file_type(('*', '*'), ('name', 'ip'), agent_files)

            # remove agent files that need agent name
            agent_files = ['{}/queue/diff/{}']
            remove_agent_file_type(('*', ), ('name', ), agent_files)

            # Remove agent files that only need agent id
            agent_files = [
                '{}/queue/agent-groups/{}', '{}/queue/rids/{}',
                '{}/queue/db/{}.db', '{}/queue/db/{}.db-wal',
                '{}/queue/db/{}.db-shm'
            ]
            remove_agent_file_type(('*', ), ('id', ), agent_files)

            # remove agent files that need agent name and id
            agent_files = ['{}/var/db/agents/{}-{}.db']
            remove_agent_file_type(('*', '*'), ('id', 'name'), agent_files)

            # remove agent from groups
            db_global = glob.glob(common.database_path_global)
            if not db_global:
                raise WazuhException(1600)

            conn = Connection(db_global[0])
            agent_ids_db = {
                'id_agent{}'.format(i): int(i)
                for i in agents_ids_sublist
            }
            conn.execute(
                'delete from belongs where {}'.format(' or '.join([
                    'id_agent = :{}'.format(i) for i in agent_ids_db.keys()
                ])), agent_ids_db)
            conn.commit()
        logger.info("Agent files removed")
Beispiel #17
0
import sys
import json

try:
    from wazuh import Wazuh
    from wazuh.agent import Agent
except Exception as e:
    print("No module 'wazuh' found.")
    sys.exit()

if __name__ == "__main__":

    # Creating wazuh object
    # It is possible to specify the ossec path (path argument) or get /etc/ossec-init.conf (get_init argument)
    print("\nWazuh:")
    myWazuh = Wazuh(get_init=True)
    print(myWazuh)

    print("\nAgents:")
    agents = Agent.get_agents_overview(status="all")
    print(json.dumps(agents, indent=4, sort_keys=True))

    print("\nAdding 'WazuhFrameworkTest':")
    agent = Agent()
    agent_id = agent.add("WazuhFrameworkTest", "Any")
    print("\nAgent added with ID: {0}".format(agent_id))
    print("\nAgent key: {0}".format(agent.get_key()))
    agent.get()
    print("\nAgent info:")
    print(json.dumps(agent.to_dict(), indent=4, sort_keys=True))
Beispiel #18
0
def _get_agents_status():
    print pprint_table(data=Agent.get_agents_status_cluster(),
                       headers=["ID", "IP", "Name", "Status", "Node name"],
                       show_header=True)
Beispiel #19
0
    def remove_bulk_agents(agent_ids_list, logger):
        """
        Removes files created by agents in worker nodes. This function doesn't remove agents from client.keys since the
        client.keys file is overwritten by the master node.
        :param agent_ids_list: List of agents ids to remove.
        :return: None.
        """
        def remove_agent_file_type(agent_files):
            for filetype in agent_files:

                filetype_glob = filetype.format(ossec_path=common.ossec_path,
                                                id='*',
                                                name='*',
                                                ip='*')
                filetype_agent = {
                    filetype.format(ossec_path=common.ossec_path,
                                    id=a['id'],
                                    name=a['name'],
                                    ip=a['ip'])
                    for a in agent_info
                }

                for agent_file in set(
                        glob.iglob(filetype_glob)) & filetype_agent:
                    logger.debug2("Removing {}".format(agent_file))
                    if os.path.isdir(agent_file):
                        shutil.rmtree(agent_file)
                    else:
                        os.remove(agent_file)

        if not agent_ids_list:
            return  # the function doesn't make sense if there is no agents to remove

        logger.info("Removing files from {} agents".format(
            len(agent_ids_list)))
        logger.debug("Agents to remove: {}".format(', '.join(agent_ids_list)))
        # the agents must be removed in groups of 997: 999 is the limit of SQL variables per query. Limit and offset are
        # always included in the SQL query, so that leaves 997 variables as limit.
        for agents_ids_sublist in itertools.zip_longest(*itertools.repeat(
                iter(agent_ids_list), 997),
                                                        fillvalue='0'):
            agents_ids_sublist = list(
                filter(lambda x: x != '0', agents_ids_sublist))
            # Get info from DB
            agent_info = Agent.get_agents_overview(
                q=",".join(["id={}".format(i) for i in agents_ids_sublist]),
                select={'fields': ['ip', 'id', 'name']},
                limit=None)['items']
            logger.debug2("Removing files from agents {}".format(
                ', '.join(agents_ids_sublist)))

            files_to_remove = [
                '{ossec_path}/queue/agent-info/{name}-{ip}',
                '{ossec_path}/queue/rootcheck/({name}) {ip}->rootcheck',
                '{ossec_path}/queue/diff/{name}',
                '{ossec_path}/queue/agent-groups/{id}',
                '{ossec_path}/queue/rids/{id}',
                '{ossec_path}/var/db/agents/{name}-{id}.db'
            ]
            remove_agent_file_type(files_to_remove)

            logger.debug2("Removing agent group assigments from database")
            # remove agent from groups
            db_global = glob.glob(common.database_path_global)
            if not db_global:
                raise WazuhException(1600)

            conn = Connection(db_global[0])
            agent_ids_db = {
                'id_agent{}'.format(i): int(i)
                for i in agents_ids_sublist
            }
            conn.execute(
                'delete from belongs where {}'.format(' or '.join([
                    'id_agent = :{}'.format(i) for i in agent_ids_db.keys()
                ])), agent_ids_db)
            conn.commit()

            # Tell wazuhbd to delete agent database
            wdb_conn = WazuhDBConnection()
            wdb_conn.delete_agents_db(agents_ids_sublist)

        logger.info("Agent files removed")
Beispiel #20
0
def get_agents_without_group(offset=0,
                             limit=common.database_limit,
                             sort=None,
                             search=None,
                             select=None):
    """
    Gets the agents in a group

    :param group_id: Group ID.
    :param offset: First item to return.
    :param limit: Maximum number of items to return.
    :param sort: Sorts the items. Format: {"fields":["field1","field2"],"order":"asc|desc"}.
    :param search: Looks for items with the specified string.
    :return: Dictionary: {'items': array of items, 'totalItems': Number of items (without applying the limit)}
    """

    # Connect DB
    db_global = glob(common.database_path_global)
    if not db_global:
        raise WazuhException(1600)

    conn = Connection(db_global[0])
    valid_select_fiels = {
        "id", "name", "ip", "last_keepalive", "os_name", "os_version",
        "os_platform", "os_uname", "version", "config_sum", "merged_sum",
        "manager_host", "status"
    }
    # fields like status need to retrieve others to be properly computed.
    dependent_select_fields = {'status': {'last_keepalive', 'version'}}
    search_fields = {
        "id", "name", "os_name", "ip", "status", "version", "os_platform",
        "manager_host"
    }

    # Init query
    query = "SELECT {0} FROM agent WHERE `group` IS NULL AND id != 0"
    fields = {'id': 'id', 'name': 'name'}  # field: db_column
    request = {}

    # Select
    if select:
        select_fields_param = set(select['fields'])

        if not select_fields_param.issubset(valid_select_fiels):
            uncorrect_fields = select_fields_param - valid_select_fiels
            raise WazuhException(1724, "Allowed select fields: {0}. Fields {1}".\
                    format(', '.join(list(valid_select_fiels)), ', '.join(uncorrect_fields)))

        select_fields = select_fields_param
    else:
        select_fields = valid_select_fiels

    # add dependent select fields to the database select query
    db_select_fields = set()
    for dependent, dependent_fields in dependent_select_fields.items():
        if dependent in select_fields:
            db_select_fields |= dependent_fields
    db_select_fields |= (select_fields - set(dependent_select_fields.keys()))

    # Search
    if search:
        query += " AND NOT" if bool(search['negation']) else ' AND'
        query += " (" + " OR ".join(x + ' LIKE :search'
                                    for x in search_fields) + " )"
        request['search'] = '%{0}%'.format(
            int(search['value']) if search['value'].isdigit(
            ) else search['value'])

    # Count
    conn.execute(query.format('COUNT(*)'), request)
    data = {'totalItems': conn.fetch()[0]}

    # Sorting
    if sort:
        if sort['fields']:
            allowed_sort_fields = db_select_fields
            # Check if every element in sort['fields'] is in allowed_sort_fields.
            if not set(sort['fields']).issubset(allowed_sort_fields):
                raise WazuhException(1403, 'Allowed sort fields: {0}. Fields: {1}'.\
                    format(allowed_sort_fields, sort['fields']))

            order_str_fields = [
                '{0} {1}'.format(fields[i], sort['order'])
                for i in sort['fields']
            ]
            query += ' ORDER BY ' + ','.join(order_str_fields)
        else:
            query += ' ORDER BY id {0}'.format(sort['order'])
    else:
        query += ' ORDER BY id ASC'

    # OFFSET - LIMIT
    if limit:
        query += ' LIMIT :offset,:limit'
        request['offset'] = offset
        request['limit'] = limit

    # Data query
    conn.execute(query.format(','.join(db_select_fields)), request)

    non_nested = [{field:tuple_elem for field,tuple_elem \
            in zip(db_select_fields, tuple) if tuple_elem} for tuple in conn]

    if 'id' in select_fields:
        map(lambda x: setitem(x, 'id', str(x['id']).zfill(3)), non_nested)

    if 'status' in select_fields:
        try:
            map(
                lambda x: setitem(
                    x, 'status',
                    Agent.calculate_status(x['last_keepalive'], x['version'] ==
                                           None)), non_nested)
        except KeyError:
            pass

    # return only the fields requested by the user (saved in select_fields) and not the dependent ones
    non_nested = [{k: v
                   for k, v in d.items() if k in select_fields}
                  for d in non_nested]

    data['items'] = [plain_dict_to_nested_dict(d, ['os']) for d in non_nested]

    return data
Beispiel #21
0
def files(agent_id=None,
          summary=False,
          offset=0,
          limit=common.database_limit,
          sort=None,
          search=None,
          select=None,
          filters={}):
    """
    Return a list of files from the database that match the filters

    :param agent_id: Agent ID.
    :param filters: Fields to filter by
    :param summary: Returns a summary grouping by filename.
    :param offset: First item to return.
    :param limit: Maximum number of items to return.
    :param sort: Sorts the items. Format: {"fields":["field1","field2"],"order":"asc|desc"}.
    :param search: Looks for items with the specified string.
    :return: Dictionary: {'items': array of items, 'totalItems': Number of items (without applying the limit)}
    """
    parameters = {
        "date", "mtime", "file", "size", "perm", "uname", "gname", "md5",
        "sha1", "sha256", "inode", "gid", "uid", "type", "attributes",
        "symbolic_path"
    }
    summary_parameters = {"date", "mtime", "file"}

    if select is None:
        select = summary_parameters if summary else parameters
    else:
        select = set(select['fields'])
        if not select.issubset(parameters):
            raise WazuhException(
                1724, "Allowed select fields: {0}. Fields: {1}.".format(
                    ', '.join(parameters), ','.join(select - parameters)))

    if 'hash' in filters:
        or_filters = {
            'md5': filters['hash'],
            'sha1': filters['hash'],
            'sha256': filters['hash']
        }
        del filters['hash']
    else:
        or_filters = {}

    items, totalItems = Agent(agent_id)._load_info_from_agent_db(
        table='fim_entry',
        select=select,
        offset=offset,
        limit=limit,
        sort=sort,
        search=search,
        filters=filters,
        count=True,
        or_filters=or_filters)
    for date_field in select & {'mtime', 'date'}:
        for item in items:
            # date fields with value 0 are returned as ND
            item[date_field] = "ND" if item[date_field] == 0 \
                                    else datetime.fromtimestamp(float(item[date_field])).strftime('%Y-%m-%d %H:%M:%S')

    return {'totalItems': totalItems, 'items': items}
Beispiel #22
0
 def test_agent_overview(self):
     agents = Agent.get_agents_overview()
     self.assertGreater(agents["totalItems"], 1)
     self.assertTrue(agents["items"], "No agents: items")
Beispiel #23
0
def main():
    # Capture Ctrl + C
    signal(SIGINT, signal_handler)

    # Check arguments
    if args.list_outdated:
        list_outdated()
        exit(0)

    if not args.agent:
        arg_parser.print_help()
        exit(0)

    if args.silent:
        args.debug = False

    use_http = False
    if args.http:
        use_http = True

    agent = Agent(id=args.agent)
    agent._load_info_from_DB()

    agent_info = "{0}/queue/agent-info/{1}-{2}".format(common.ossec_path, agent.name, agent.registerIP)
    if not os.path.isfile(agent_info):
        raise WazuhException(1720)

    # Evaluate if the version is correct
    if args.version is not None:
        pattern = re.compile("v[0-9]+\.[0-9]+\.[0-9]+")
        if not pattern.match(args.version):
            raise WazuhException(1733, "Version received: {0}".format(args.version))

    if args.chunk_size is not None:
        if args.chunk_size < 1 or args.chunk_size > 64000:
            raise WazuhException(1744, "Chunk defined: {0}".format(args.chunk_size))

    # Custom WPK file
    if args.file:
        upgrade_command_result = agent.upgrade_custom(file_path=args.file,
                                                      installer=args.execute if args.execute else "upgrade.sh",
                                                      debug=args.debug,
                                                      show_progress=print_progress if not args.silent else None,
                                                      chunk_size=args.chunk_size,
                                                      rl_timeout=-1 if args.timeout == None else args.timeout)
        if not args.silent:
            if not args.debug:
                print("\n{0}... Please wait.".format(upgrade_command_result))
            else:
                print(upgrade_command_result)

        counter = 0
        agent_info_stat = os.stat(agent_info).st_mtime

        sleep(10)
        while agent_info_stat == os.stat(agent_info).st_mtime and counter < common.agent_info_retries:
            sleep(common.agent_info_sleep)
            counter = counter + 1

        if agent_info_stat == os.stat(agent_info).st_mtime:
            raise WazuhException(1716, "Timeout waiting for agent reconnection.")

        upgrade_result = agent.upgrade_result(debug=args.debug)
        if not args.silent:
            print(upgrade_result)

    # WPK upgrade file
    else:
        prev_ver = agent.version
        upgrade_command_result = agent.upgrade(wpk_repo=args.repository, debug=args.debug, version=args.version,
                                               force=args.force,
                                               show_progress=print_progress if not args.silent else None,
                                               chunk_size=args.chunk_size,
                                               rl_timeout=-1 if args.timeout == None else args.timeout, use_http=use_http)
        if not args.silent:
            if not args.debug:
                print("\n{0}... Please wait.".format(upgrade_command_result))
            else:
                print(upgrade_command_result)

        counter = 0
        agent_info_stat = os.stat(agent_info).st_mtime

        while agent_info_stat == os.stat(agent_info).st_mtime and counter < common.agent_info_retries:
            sleep(common.agent_info_sleep)
            counter = counter + 1

        if agent_info_stat == os.stat(agent_info).st_mtime:
            raise WazuhException(1716, "Timeout waiting for agent reconnection.")

        sleep(10)
        upgrade_result = agent.upgrade_result(debug=args.debug)
        if not args.silent:
            if not args.debug:
                agent._load_info_from_DB()
                print("Agent upgraded: {0} -> {1}".format(prev_ver, agent.version))
            else:
                print(upgrade_result)
Beispiel #24
0
    def found_terminator(self):
        response = b''.join(self.received_data)
        error = 0
        cmd = self.f.decrypt(response[:common.cluster_sync_msg_size]).decode()
        self.command = cmd.split(" ")

        logging.debug("Command received: {0}".format(self.command))

        if not check_cluster_cmd(self.command, self.node_type):
            logging.error(
                "Received invalid cluster command {0} from {1}".format(
                    self.command[0], self.addr))
            error = 1
            res = "Received invalid cluster command {0}".format(
                self.command[0])

        if error == 0:
            if self.command[0] == list_requests_cluster['node']:
                res = get_node()
            elif self.command[0] == list_requests_cluster['zip']:
                zip_bytes = self.f.decrypt(
                    response[common.cluster_sync_msg_size:])
                res = extract_zip(zip_bytes)
                self.restart = res['restart']
            elif self.command[0] == list_requests_agents['RESTART_AGENTS']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                if (len(args) == 2):
                    agents = args[0].split("-")
                    restart_all = ast.literal_eval(args[1])
                else:
                    agents = None
                    restart_all = ast.literal_eval(args[0])
                cluster_depth = ast.literal_eval(self.command[1]) - 1
                res = Agent.restart_agents(agents, restart_all, cluster_depth)
            elif self.command[0] == list_requests_agents[
                    'AGENTS_UPGRADE_RESULT']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                try:
                    agent = args[0]
                    timeout = args[1]
                    res = Agent.get_upgrade_result(agent, timeout)
                except Exception as e:
                    res = str(e)
            elif self.command[0] == list_requests_agents['AGENTS_UPGRADE']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                agent_id = args[0]
                wpk_repo = ast.literal_eval(args[1])
                version = ast.literal_eval(args[2])
                force = ast.literal_eval(args[3])
                chunk_size = ast.literal_eval(args[4])
                try:
                    res = Agent.upgrade_agent(agent_id, wpk_repo, version,
                                              force, chunk_size)
                except Exception as e:
                    res = str(e)
            elif self.command[0] == list_requests_agents[
                    'AGENTS_UPGRADE_CUSTOM']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                agent_id = args[0]
                file_path = ast.literal_eval(args[1])
                installer = ast.literal_eval(args[2])
                try:
                    res = Agent.upgrade_agent_custom(agent_id, file_path,
                                                     installer)
                except Exception as e:
                    res = str(e)
            elif self.command[0] == list_requests_syscheck[
                    'SYSCHECK_LAST_SCAN']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                agent = args.split(" ")
                res = syscheck.last_scan(agent[0])
            elif self.command[0] == list_requests_syscheck['SYSCHECK_RUN']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                if (len(args) == 2):
                    agents = args[0]
                    all_agents = ast.literal_eval(args[1])
                else:
                    agents = None
                    all_agents = ast.literal_eval(args[0])
                cluster_depth = ast.literal_eval(self.command[1]) - 1
                res = syscheck.run(agents, all_agents, cluster_depth)
            elif self.command[0] == list_requests_syscheck['SYSCHECK_CLEAR']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                if (len(args) == 2):
                    agents = args[0]
                    all_agents = ast.literal_eval(args[1])
                else:
                    agents = None
                    all_agents = ast.literal_eval(args[0])
                cluster_depth = ast.literal_eval(self.command[1]) - 1
                res = syscheck.clear(agents, all_agents, cluster_depth)
            elif self.command[0] == list_requests_rootcheck['ROOTCHECK_PCI']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                index = 0
                agents = None
                if (len(args) == 5):
                    agents = args[0]
                    index = index + 1
                offset = ast.literal_eval(args[index])
                index = index + 1
                limit = ast.literal_eval(args[index])
                index = index + 1
                sort = ast.literal_eval(args[index])
                index = index + 1
                search = ast.literal_eval(args[index])
                res = args
                res = rootcheck.get_pci(agents, offset, limit, sort, search)
            elif self.command[0] == list_requests_rootcheck['ROOTCHECK_CIS']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                index = 0
                agents = None
                if (len(args) == 5):
                    agents = args[0]
                    index = index + 1
                offset = ast.literal_eval(args[index])
                index = index + 1
                limit = ast.literal_eval(args[index])
                index = index + 1
                sort = ast.literal_eval(args[index])
                index = index + 1
                search = ast.literal_eval(args[index])
                res = args
                res = rootcheck.get_cis(agents, offset, limit, sort, search)
            elif self.command[0] == list_requests_rootcheck[
                    'ROOTCHECK_LAST_SCAN']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                agent = args.split(" ")
                res = rootcheck.last_scan(agent[0])
            elif self.command[0] == list_requests_rootcheck['ROOTCHECK_RUN']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                if (len(args) == 2):
                    agents = args[0]
                    all_agents = ast.literal_eval(args[1])
                else:
                    agents = None
                    all_agents = ast.literal_eval(args[0])
                cluster_depth = ast.literal_eval(self.command[1]) - 1
                res = rootcheck.run(agents, all_agents, cluster_depth)
            elif self.command[0] == list_requests_rootcheck['ROOTCHECK_CLEAR']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                if (len(args) == 2):
                    agents = args[0]
                    all_agents = ast.literal_eval(args[1])
                else:
                    agents = None
                    all_agents = ast.literal_eval(args[0])
                cluster_depth = ast.literal_eval(self.command[1]) - 1
                res = rootcheck.clear(agents, all_agents, cluster_depth)
            elif self.command[0] == list_requests_managers['MANAGERS_STATUS']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                cluster_depth = ast.literal_eval(self.command[1]) - 1
                res = manager.managers_status(cluster_depth=cluster_depth)
            elif self.command[0] == list_requests_managers['MANAGERS_LOGS']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                cluster_depth = ast.literal_eval(self.command[1]) - 1
                type_log = args[0]
                category = args[1]
                months = ast.literal_eval(args[2])
                offset = ast.literal_eval(args[3])
                limit = ast.literal_eval(args[4])
                sort = ast.literal_eval(args[5])
                search = ast.literal_eval(args[6])
                res = manager.managers_ossec_log(type_log=type_log,
                                                 category=category,
                                                 months=months,
                                                 offset=offset,
                                                 limit=limit,
                                                 sort=sort,
                                                 search=search,
                                                 cluster_depth=cluster_depth)
            elif self.command[0] == list_requests_managers[
                    'MANAGERS_LOGS_SUMMARY']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                cluster_depth = ast.literal_eval(self.command[1]) - 1
                months = ast.literal_eval(args[0])
                res = manager.managers_ossec_log_summary(
                    months=months, cluster_depth=cluster_depth)
            elif self.command[0] == list_requests_managers[
                    'MANAGERS_STATS_TOTALS']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                cluster_depth = ast.literal_eval(self.command[1]) - 1
                year = ast.literal_eval(args[0])
                month = ast.literal_eval(args[1])
                day = ast.literal_eval(args[2])
                res = stats.totals(year=year,
                                   month=month,
                                   day=day,
                                   cluster_depth=cluster_depth)
            elif self.command[0] == list_requests_managers[
                    'MANAGERS_STATS_HOURLY']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                cluster_depth = ast.literal_eval(self.command[1]) - 1
                res = stats.hourly(cluster_depth=cluster_depth)
            elif self.command[0] == list_requests_managers[
                    'MANAGERS_STATS_WEEKLY']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                cluster_depth = ast.literal_eval(self.command[1]) - 1
                res = stats.weekly(cluster_depth=cluster_depth)
            elif self.command[0] == list_requests_managers[
                    'MANAGERS_OSSEC_CONF']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                cluster_depth = ast.literal_eval(self.command[1]) - 1
                section = args[0]
                field = ast.literal_eval(args[1])
                res = manager.managers_get_ossec_conf(
                    section=section, field=field, cluster_depth=cluster_depth)
            elif self.command[0] == list_requests_managers['MANAGERS_INFO']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                cluster_depth = ast.literal_eval(self.command[1]) - 1
                res = myWazuh.managers_get_ossec_init(
                    cluster_depth=cluster_depth)
            elif self.command[0] == list_requests_cluster['CLUSTER_CONFIG']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                cluster_depth = ast.literal_eval(self.command[1]) - 1
                res = get_config_distributed(cluster_depth=cluster_depth)

            elif self.command[0] == list_requests_cluster['MASTER_FORW']:
                args = self.f.decrypt(response[common.cluster_sync_msg_size:])
                args = args.split(" ")
                args_list = []
                if args[0] in all_list_requests.values():
                    agent_id = None
                    request_type = args[0]
                    if (len(args) > 1):
                        args_list = args[1:]
                elif len(args) > 1 and args[1] in all_list_requests.values():
                    agent_id = args[0].split("-")
                    request_type = args[1]
                    if (len(args) > 2):
                        args_list = args[2:]
                res = distributed_api_request(request_type=request_type,
                                              agent_id=agent_id,
                                              args=args_list,
                                              cluster_depth=1,
                                              affected_nodes=None,
                                              from_cluster=True)

            elif self.command[0] == list_requests_cluster['ready']:
                res = "Starting to sync client's files"
                # execute an independent process to "crontab" the sync interval
                kill(child_pid, SIGUSR1)
            elif self.command[0] == list_requests_cluster['data']:
                res = "Saving data from actual master"
                actual_master_data = json.loads(
                    self.f.decrypt(
                        response[common.cluster_sync_msg_size:]).decode())
                if save_actual_master_data_on_db(actual_master_data):
                    restart_manager()

            logging.debug("Command {0} executed for {1}".format(
                self.command[0], self.addr))

        self.data = json.dumps({'error': error, 'data': res})

        self.handle_write()
Beispiel #25
0
def test_get_distinct_agents(test_data, fields, expected_items):
    """Test get_distinct_agents function."""
    with patch('sqlite3.connect') as mock_db:
        mock_db.return_value = test_data.global_db
        distinct = Agent.get_distinct_agents(fields=fields)
        assert distinct['items'] == expected_items
Beispiel #26
0
def upload_group_configuration(group_id, xml_file):
    """
    Updates group configuration
    :param group_id: Group to update
    :param xml_file: File contents of the new configuration in string.
    :return: Confirmation message.
    """
    # check if the group exists
    if not Agent.group_exists(group_id):
        raise WazuhException(1710)

    # path of temporary files for parsing xml input
    tmp_file_path = '{}/tmp/api_tmp_file_{}_{}.xml'.format(
        common.ossec_path, time.time(), random.randint(0, 1000))

    # create temporary file for parsing xml input and validate XML format
    try:
        with open(tmp_file_path, 'w') as tmp_file:
            # beauty xml file
            xml = parseString('<root>' + xml_file + '</root>')
            # remove first line (XML specification: <? xmlversion="1.0" ?>), <root> and </root> tags, and empty lines
            pretty_xml = '\n'.join(
                filter(lambda x: x.strip(),
                       xml.toprettyxml(indent='  ').split('\n')[2:-2])) + '\n'
            # revert xml.dom replacings
            # (https://github.com/python/cpython/blob/8e0418688906206fe59bd26344320c0fc026849e/Lib/xml/dom/minidom.py#L305)
            pretty_xml = pretty_xml.replace("&amp;", "&").replace("&lt;", "<").replace("&quot;", "\"",)\
                                   .replace("&gt;", ">")
            tmp_file.write(pretty_xml)
    except Exception as e:
        raise WazuhException(1113, str(e))

    try:

        # check Wazuh xml format
        try:
            subprocess.check_output([
                '{}/bin/verify-agent-conf'.format(common.ossec_path), '-f',
                tmp_file_path
            ],
                                    stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            # extract error message from output.
            # Example of raw output
            # 2019/01/08 14:51:09 verify-agent-conf: ERROR: (1230): Invalid element in the configuration: 'agent_conf'.\n2019/01/08 14:51:09 verify-agent-conf: ERROR: (1207): Syscheck remote configuration in '/var/ossec/tmp/api_tmp_file_2019-01-08-01-1546959069.xml' is corrupted.\n\n
            # Example of desired output:
            # Invalid element in the configuration: 'agent_conf'. Syscheck remote configuration in '/var/ossec/tmp/api_tmp_file_2019-01-08-01-1546959069.xml' is corrupted.
            output_regex = re.findall(
                pattern=
                r"\d{4}\/\d{2}\/\d{2} \d{2}:\d{2}:\d{2} verify-agent-conf: ERROR: "
                r"\(\d+\): ([\w \/ \_ \- \. ' :]+)",
                string=e.output.decode())
            raise WazuhException(1114, ' '.join(output_regex))
        except Exception as e:
            raise WazuhException(1743, str(e))

        # move temporary file to group folder
        try:
            new_conf_path = "{}/{}/agent.conf".format(common.shared_path,
                                                      group_id)
            move(tmp_file_path, new_conf_path)
        except Exception as e:
            raise WazuhException(1017, str(e))

        return 'Agent configuration was updated successfully'
    except Exception as e:
        # remove created temporary file
        remove(tmp_file_path)
        raise e
Beispiel #27
0
def test_get_os_summary(test_data):
    """Tests get_os_summary function."""
    with patch('sqlite3.connect') as mock_db:
        mock_db.return_value = test_data.global_db
        summary = Agent.get_os_summary()
        assert summary['items'] == ['ubuntu']
Beispiel #28
0
def show_synced_agent(agent_id):

    result = Agent(agent_id).get_sync_group(agent_id)

    print("Agent '{}' is{} synchronized. ".format(
        agent_id, '' if result['synced'] else ' not'))
Beispiel #29
0
#!/usr/bin/env python
import json
from sys import exit
from wazuh.agent import Agent
from wazuh.exception import WazuhException

agent = Agent()

try:
    agent_id = agent.add('WazuhTestAgent', 'Any')
except WazuhException as e:
    print("Error {0}: {1}.".format(e.code, e.message))
    exit(1)

agent_key = agent.get_key()
print("\nAgent added with:\n\tID: {0}\n\tKey: {1}".format(agent_id, agent_key))

agent.get()  # Load information generated by OSSEC
print("\nAgent info:")
print(json.dumps(agent.to_dict(), indent=4, sort_keys=True))