def ping(self): s = self._health_check() if s is None or s['status'] == 'red': raise CIFException('ES Cluster Issue') return True
def delete(self, token, data, id=None, flush=True): q_filters = {} for x in DELETE_FILTERS: if data.get(x): q_filters[x] = data[x] logger.debug(q_filters) if len(q_filters) == 0: return '0, must specify valid filter. valid filters: {}'.format( DELETE_FILTERS) try: rv = self.search(token, q_filters, sort='reporttime', raw=True) rv = rv['hits']['hits'] except Exception as e: raise CIFException(e) logger.debug('delete match: {}'.format(rv)) # docs matched if len(rv) > 0: actions = [] for i in rv: actions.append({ '_op_type': 'delete', '_index': i['_index'], '_type': 'indicator', '_id': i['_id'] }) try: helpers.bulk(self.handle, actions) except Exception as e: raise CIFException(e) if flush: self.flush() logger.info('{} deleted {} indicators'.format( token['username'], len(rv))) return len(rv) # no matches, return 0 return 0
def ping(self, token): s = self._health_check() if s is None or s['status'] == 'red': raise CIFException('ES Cluster Issue') if self.tokens.read(token) or self.tokens.write(token): return True
def upsert(self, token, indicators, flush=False): if not UPSERT_MODE: return self.create_bulk(token, indicators, flush=flush) # Create current index if needed index = self._create_index() count = 0 # http://stackoverflow.com/questions/30111258/elasticsearch-in-equivalent-operator-in-elasticsearch # aggregate indicators based on dedup criteria agg = {} for d in sorted(indicators, key=lambda k: k['lasttime'], reverse=True): key = [] for v in UPSERT_MATCH: if d.get(v): if isinstance(d[v], basestring): key.append(d[v]) elif isinstance(d[v], float) or isinstance(d[v], int): key.append(str(d[v])) elif isinstance(d[v], list): for k in d[v]: key.append(k) key = "_".join(key) # already seen in batch if key in agg: # look for older first times if d.get('firsttime') < agg[key].get('firsttime'): agg[key]['firsttime'] = d['firsttime'] if d.get('count'): agg[key]['count'] = agg[key].get('count') + d.get( 'count') # haven't yet seen in batch else: agg[key] = d actions = [] #self.lockm.lock_aquire() for d in agg: d = agg[d] filters = {'limit': 1} for x in UPSERT_MATCH: if d.get(x): if x == 'confidence': filters[x] = '{},{}'.format(d[x], d[x]) else: filters[x] = d[x] if d.get('tags'): filters['tags'] = d['tags'] if d.get('rdata'): filters['rdata'] = d['rdata'] # search for existing, return latest record try: # search the current index only rv = self.search(token, filters, sort='reporttime', raw=True, sindex=index) except Exception as e: logger.error(e) raise e try: rv = rv['hits']['hits'] except Exception as e: raise CIFException(e) # Indicator does not exist in results if len(rv) == 0: if not d.get('count'): d['count'] = 1 if d.get('group') and type(d['group']) != list: d['group'] = [d['group']] expand_ip_idx(d) # append create to create set if UPSERT_TRACE: logger.debug('upsert: creating new {}'.format( d['indicator'])) actions.append({ '_index': index, '_type': 'indicator', '_source': d, }) count += 1 continue # Indicator exists in results else: if UPSERT_TRACE: logger.debug('upsert: match indicator {}'.format( rv[0]['_id'])) # map result i = rv[0] # skip new indicators that don't have a more recent lasttime if not self._is_newer(d, i['_source']): logger.debug('skipping...') continue # map existing indicator i = i['_source'] # we're working within the same index if rv[0]['_index'] == self._current_index(): # update fields i['count'] += 1 i['lasttime'] = d['lasttime'] i['reporttime'] = d['reporttime'] # if existing indicator doesn't have message field but new indicator does, add new message to upsert if d.get('message'): if not i.get('message'): i['message'] = [] i['message'].append(d['message']) # always update description if it exists if d.get('description'): i['description'] = d['description'] # append update to create set if UPSERT_TRACE: logger.debug( 'upsert: updating same index {}, {}'.format( d.get('indicator'), rv[0]['_id'])) actions.append({ '_op_type': 'update', '_index': rv[0]['_index'], '_type': 'indicator', '_id': rv[0]['_id'], '_body': { 'doc': i } }) count += 1 continue # if we aren't in the same index else: # update fields i['count'] = i['count'] + 1 i['lasttime'] = d['lasttime'] i['reporttime'] = d['reporttime'] # if existing indicator doesn't have message field but new indicator does, add new message to upsert if d.get('message'): if not i.get('message'): i['message'] = [] i['message'].append(d['message']) # always update description if exists if d.get('description'): i['description'] = d['description'] # append create to create set if UPSERT_TRACE: logger.debug('upsert: updating across index {}'.format( d['indicator'])) actions.append({ '_index': index, '_type': 'indicator', '_source': i, }) # delete the old document if UPSERT_TRACE: logger.debug( 'upsert: deleting old index {}, {}'.format( d['indicator'], rv[0]['_id'])) actions.append({ '_op_type': 'delete', '_index': rv[0]['_index'], '_type': 'indicator', '_id': rv[0]['_id'] }) count += 1 continue if len(actions) > 0: try: helpers.bulk(self.handle, actions) except Exception as e: #self.lockm.lock_release() raise e if flush: self.flush() #self.lockm.lock_release() return count