def install(alsi=None): from assemblyline.al.common import forge ds = forge.get_datastore() ip = net.get_hostip() mac = net.get_mac_for_ip(ip) existing_reg = ds.get_node(mac) if existing_reg: alsi.info("Registration already exist. Skipping...") return reg = DEFAULT_CORE_REGISTRATION.copy() reg['hostname'] = net.get_hostname() reg['ip'] = ip reg['mac_address'] = mac reg['machine_info'] = sysinfo.get_machine_info() reg['platform'] = sysinfo.get_platform() if 'roles' not in reg: reg['roles'] = [] if "dispatcher" not in reg["roles"]: reg['roles'].append("dispatcher") if "middleman" not in reg["roles"]: reg['roles'].append("middleman") ds.save_node(mac, reg) alsi.info("Core server registered!")
def start(): bad_disks = [] # Test if smartmontools is installed try: subprocess.call(['smartctl'], stderr=subprocess.PIPE, stdout=subprocess.PIPE) except OSError: # Install smartmontools ret = subprocess.call( ["sudo", "apt-get", "-y", "install", "smartmontools"], stderr=subprocess.PIPE, stdout=subprocess.PIPE) if ret != 0: exit("Can't install smartmontools, stopping...") # Find drives proc = subprocess.Popen(['smartctl', '--scan'], stderr=subprocess.PIPE, stdout=subprocess.PIPE) out, _ = proc.communicate() if out: device = out.split(" ")[0] for x in xrange(16): status_proc = subprocess.Popen( ['smartctl', '-H', '-d', 'megaraid,%s' % x, device], stderr=subprocess.PIPE, stdout=subprocess.PIPE) status_out, status_err = status_proc.communicate() if "INQUIRY failed" in status_out: break else: # Report status if is_drive_ok(status_out): log.info("All is good with drive: %s [disk: %s]" % (device, x)) else: bad_disks.append((device, x)) log.error( "Device %s [disk: %s] has a failure state. Report to your administrator." % (device, x)) if len(bad_disks) > 0: bad_disk_body = { 'hostname': get_hostname(), 'ip': get_hostip(), 'bad_disks': [x[1] for x in bad_disks], 'device': device } msg = message.Message(to="*", sender='harddrive_monitor', mtype=message.MT_HARDDRIVE_FAILURES, body=bad_disk_body).as_dict() statusq = queue.CommsQueue('status') statusq.publish(msg)
def get_machine_info(is_agent=False): import psutil out = {'cores': len(psutil.cpu_percent(interval=0.01, percpu=True)), 'memory': "%.1f" % (float(psutil.phymem_usage().total) / 1024 / 1024 / 1024), 'os': get_osinfo(), 'ip': net.get_hostip()} if is_agent: out['uid'] = "Agent-" + net.get_mac_address() else: out['uid'] = "Core-" + net.get_mac_address() out['name'] = net.get_hostname() return out
def main(): global previous_disk_io, previous_net_io elastic_ip = config.get('logging', {}).get('logserver', {}).get('node', None) elastic_port = config.get('logging', {}).get('logserver', {}).get('elastic', {}).get('port', 9200) if not elastic_ip or not elastic_port: log.error( "Elasticsearch cluster not configured in the seed. There is no need to gather stats on this box." ) sys.exit(1) scheduler = Scheduler() cur_host = get_hostname() cur_ip = get_hostip() es = elasticsearch.Elasticsearch([{ 'host': elastic_ip, 'port': elastic_port }]) scheduler.add_interval_job(calculate_system_metrics, seconds=60, kwargs={ "es": es, "cur_ip": cur_ip, "cur_host": cur_host }) if is_riak(cur_ip, cur_host): scheduler.add_interval_job(calculate_solr_metrics, seconds=60, kwargs={ "es": es, "cur_ip": cur_ip, "cur_host": cur_host }) scheduler.add_interval_job(calculate_riak_metrics, seconds=60, kwargs={ "es": es, "cur_ip": cur_ip, "cur_host": cur_host }) scheduler.daemonic = False scheduler.start()
def __init__( self, service_manager, #pylint: disable=R0913 control_queue=None, debug=False, high=config.core.dispatcher.max.inflight / config.core.dispatcher.shards, pop=forge.get_dispatch_queue().pop, shard='0'): if debug: self.debug = log.info else: self.debug = lambda *msg: None self.hostinfo = { 'ip:': get_hostip(), 'mac_address': get_mac_address(), 'host': get_hostname(), } self.ack_timeout = {} self.child_timeout = {} self.completed = {} self.control_queue = control_queue or \ forge.get_control_queue('control-queue-' + shard) self.drain = False self.entries = {} self.errors = {} self.high = high self.ingest_queue = 'ingest-queue-' + shard self.last_check = 0 self.lock = threading.Lock() self.pop = pop self.queue_size = {} # Reponse queues are named: <hostname>-<pid>-<seconds>-<shard>. self.response_queue = '-'.join((socket.gethostname(), str(os.getpid()), str(int(time.time())), shard)) self.results = {} self.running = False self.score = {} self.service_manager = service_manager self.service_timeout = {} self.shard = shard self.storage_queue = LocalQueue() self.watchers = {} log.info('Dispatcher started. Dispatching to services:{0}'.format( [s for s in service_manager.services]))
def register_host(self): if self.is_a_vm(): return "This is a VM, no need to register." existing_reg = self.store.get_node(self.mac) if existing_reg: return "already registered: %s" % pprint.pformat(existing_reg) reg = DEFAULT_REGISTRATION.copy() reg['hostname'] = net.get_hostname() reg['ip'] = self.ip reg['mac_address'] = self.mac reg['machine_info'] = sysinfo.get_machine_info() reg['last_checkin'] = isotime.now_as_iso() reg['platform'] = sysinfo.get_platform() reg['profile'] = config.workers.default_profile reg['created'] = time.asctime() if 'roles' not in reg: reg['roles'] = [] if "controller" not in reg["roles"]: reg['roles'].append("controller") if "hostagent" not in reg["roles"]: reg['roles'].append("hostagent") self.store.save_node(self.mac, reg) return 'Registered %s with %s' % (self.mac, pprint.pformat(reg))
def create_transport(url): """ Transport are being initiated using an URL. They follow the normal url format: ftp://user:[email protected]/path/to/file In this example, it will extract the following parameters: scheme: ftp host: host.com user: user password: pass Certain transports can have extra parameters, those parameters need to be specified in the query part of the url. e.g.: sftp://host.com/path/to/file?private_key=/etc/ssl/pkey&private_key_pass=pass&validate_host=true scheme: ftp host: host.com user: password: private_key: /etc/ssl/pkey private_key_pass: pass validate_host: True NOTE: For transport with extra parameters, only specific extra parameters are allow. This is the list of extra parameter allowed: ftp: None http: pki (string) sftp: private_key (string), private_key_pass (string), validate_host (bool) file: None """ parsed = urlparse(url) base = parsed.path or '/' host = parsed.hostname if parsed.password: password = unquote(parsed.password) else: password = '' user = parsed.username or '' scheme = parsed.scheme.lower() if (scheme == 'ftp' or scheme == 'sftp' or scheme == 'http') and (host == get_hostname() or host == get_hostip()): scheme = 'file' if scheme == 'ftp': from assemblyline.al.common.transport.ftp import TransportFTP base = base.replace(config.filestore.ftp_root, "") t = TransportFTP(base=base, host=host, password=password, user=user) elif scheme == "sftp": from assemblyline.al.common.transport.sftp import TransportSFTP def get_extras(parsed_dict): valid_str_keys = ['private_key', 'private_key_pass'] valid_bool_keys = ['validate_host'] out = {} for k, v in parsed_dict.iteritems(): if k in valid_bool_keys: if v[0].lower() == 'true': out[k] = True elif v[0].lower() == 'true': out[k] = False if k in valid_str_keys: out[k] = v[0] return out extras = get_extras(parse_qs(parsed.query)) t = TransportSFTP(base=base, host=host, password=password, user=user, **extras) elif scheme == 'http': from assemblyline.al.common.transport.http import TransportHTTP def get_extras(parsed_dict): valid_str_keys = ['pki'] out = {} for k, v in parsed_dict.iteritems(): if k in valid_str_keys: out[k] = v[0] return out extras = get_extras(parse_qs(parsed.query)) t = TransportHTTP(base=base, host=host, password=password, user=user, **extras) elif scheme == 'file': t = TransportLocal(base=base) else: raise FileStoreException("Unknown transport: %s" % scheme) return t
# Globals alertq = queue.NamedQueue('m-alert', **persistent) # df line queue cache = {} cache_lock = RLock() chunk_size = 1000 completeq_name = 'm-complete-' + shard date_fmt = '%Y-%m-%dT%H:%M:%SZ' default_prefix = config.core.middleman.default_prefix dup_prefix = 'w-' + shard + '-' dupq = queue.MultiQueue(**persistent) # df line queue expire_after_seconds = config.core.middleman.expire_after get_whitelist_verdict = forge.get_get_whitelist_verdict() hostinfo = { 'ip:': get_hostip(), 'mac_address': get_mac_address(), 'host': get_hostname(), } ingestq_name = 'm-ingest-' + shard is_low_priority = forge.get_is_low_priority() max_priority = config.submissions.max.priority max_retries = 10 max_time = 2 * 24 * 60 * 60 # Wait 2 days for responses. max_waiting = int(config.core.dispatcher.max.inflight) / (2 * shards) min_priority = 1 priority_value = constants.PRIORITIES retry_delay = 180 retryq = queue.NamedQueue('m-retry-' + shard, **persistent) # df line queue running = True sampling = False selected_initial = [ 'Antivirus', 'Extraction', 'Filtering', 'Networking', 'Static Analysis'
hostname = 'unknownhost' # noinspection PyBroadException try: from assemblyline.common.net import get_hostname hostname = get_hostname() except Exception: # pylint:disable=W0702 pass ip = 'x.x.x.x' # noinspection PyBroadException try: from assemblyline.common.net import get_hostip ip = get_hostip() except Exception: # pylint:disable=W0702 pass AL_SYSLOG_FORMAT = f'{ip} AL %(levelname)8s %(process)5d %(name)20s | %(message)s' AL_LOG_FORMAT = f'%(asctime)-16s %(levelname)8s {hostname} %(process)d %(name)30s | %(message)s' AL_JSON_FORMAT = f'{{' \ f'"@timestamp": "%(asctime)s", ' \ f'"event": {{ "module": "assemblyline", "dataset": "%(name)s" }}, ' \ f'"host": {{ "ip": "{ip}", "hostname": "{hostname}" }}, ' \ f'"log": {{ "level": "%(levelname)s", "logger": "%(name)s" }}, ' \ f'"process": {{ "pid": "%(process)d" }}, ' \ f'"message": %(message)s}}'
def _init_registration(self): if self.is_a_vm(): nq = NamedQueue('vm-%s' % self.mac, db=DATABASE_NUM) reg = nq.pop() nq.push(reg) self.log.info('Updating our registration.') reg['hostname'] = net.get_hostname() reg['ip'] = self.ip reg['machine_info'] = sysinfo.get_machine_info() reg['last_checkin'] = isotime.now_as_iso() reg['platform'] = sysinfo.get_platform() reg['updated'] = time.asctime() reg['system_name'] = config.system.name if 'roles' not in reg: reg['roles'] = [] if "hostagent" not in reg["roles"]: reg['roles'].append("hostagent") else: reg = self.store.get_node(self.mac) if not reg: self.log.info( 'This appears to be our first run on this host. Registering ourselves.' ) reg = DEFAULT_REGISTRATION.copy() reg['hostname'] = net.get_hostname() reg['ip'] = self.ip reg['mac_address'] = self.mac reg['machine_info'] = sysinfo.get_machine_info() reg['last_checkin'] = isotime.now_as_iso() reg['platform'] = sysinfo.get_platform() reg['profile'] = 'idle' reg['created'] = time.asctime() if 'roles' not in reg: reg['roles'] = [] if "controller" not in reg["roles"]: reg['roles'].append("controller") if "hostagent" not in reg["roles"]: reg['roles'].append("hostagent") self.store.save_node(self.mac, reg) else: # Just do an update of the extra info in registration. self.log.info('Updating our registration.') reg['hostname'] = net.get_hostname() reg['ip'] = self.ip if not reg.get('profile', None): reg['profile'] = config.workers.default_profile reg['machine_info'] = sysinfo.get_machine_info() reg['last_checkin'] = isotime.now_as_iso() reg['platform'] = sysinfo.get_platform() reg['updated'] = time.asctime() reg['system_name'] = config.system.name if 'roles' not in reg: reg['roles'] = [] if "controller" not in reg["roles"] and not reg.get( 'is_vm', False): reg['roles'].append("controller") if "hostagent" not in reg["roles"]: reg['roles'].append("hostagent") self.store.save_node(self.mac, reg) self.registration = reg msgs = forge.apply_overrides(reg.get('config_overrides', None)) if msgs: self.log.info("Using %s.", " and ".join(msgs)) self.log.info('Our registration: %s', pprint.pformat(self.registration))