def handle(self, *args, **options): """ Script Execution. """ buckets = {} types = [ 'Dataset', 'EmailAddress', 'Event', 'Hash', 'Target', 'Username' ] for otype in types: klass = class_from_type(otype) if not klass: continue objs = klass.objects().only('bucket_list') for obj in objs: for bucket in obj.bucket_list: if not bucket: continue # Avoid empty strings if bucket not in buckets: buckets[bucket] = Bucket() buckets[bucket].name = bucket setattr(buckets[bucket], otype, 1) else: buckets[bucket][otype] += 1 # Drop all existing buckets Bucket.objects().delete() for bucket in buckets.values(): bucket.save()
def handle(self, *args, **options): """ Script Execution. """ sectors = {} types = ['Dataset', 'EmailAddress', 'Event','Hash', 'Target', 'Username'] for otype in types: klass = class_from_type(otype) if not klass: continue objs = klass.objects().only('sectors') for obj in objs: for sector in obj.sectors: if not sector: continue # Avoid empty strings if sector not in sectors: sectors[sector] = Sector() sectors[sector].name = sector setattr(sectors[sector], otype, 1) else: sectors[sector][otype] += 1 # Drop all existing sectors Sector.objects().delete() for sector in sectors.values(): sector.save()
def delete_object_file(value): """ In the event this is a file (but not PCAP), clean up after ourselves when deleting an object. :param value: The value of the object we are deleting. :type value: str """ if not re.match(r"^[a-f\d]{32}$", value, re.I): return #XXX: MongoEngine provides no direct GridFS access so we # need to use pymongo directly. obj_list = ('Dataset', 'EmailAddress', 'Event', 'Hash', 'Target', 'UserName') # In order to make sure this object isn't tied to more than one top-level # object, we need to check the rest of the database. We will at least find # one instance, which is the one we are going to be removing. If we find # another instance, then we should not remove the object from GridFS. count = 0 query = {'objects.value': value} for obj in obj_list: obj_class = class_from_type(obj) count += len(obj_class.objects(__raw__=query)) if count > 1: break else: col = settings.COL_OBJECTS grid = mongo_connector("%s.files" % col) grid.remove({'md5': value}) return
def add_results(object_type, object_id, analysis_id, result, type_, subtype, analyst): """ Add multiple results to an analysis task. :param object_type: The top-level object type. :type object_type: str :param object_id: The ObjectId to search for. :type object_id: str :param analysis_id: The ID of the task to update. :type analysis_id: str :param result: The list of result to append. :type result: list of str :param type_: The list of result types. :type type_: list of str :param subtype: The list of result subtypes. :type subtype: list of str :param analyst: The user updating the results. :type analyst: str :returns: dict with keys "success" (boolean) and "message" (str) if failed. """ res = {'success': False} if not object_type or not object_id or not analysis_id: res['message'] = "Must supply object id/type and analysis id." return res # Validate user can add service results to this TLO. klass = class_from_type(object_type) sources = user_sources(analyst) obj = klass.objects(id=object_id, source__name__in=sources).first() if not obj: res['message'] = "Could not find object to add results to." return res if not(result and type_ and subtype): res['message'] = "Need a result, type, and subtype to add a result." return res if not(len(result) == len(type_) == len(subtype)): res['message'] = "result, type, and subtype need to be the same length." return res # Update analysis results final_list = [] for key, r in enumerate(result): final = {} final['subtype'] = subtype[key] final['result'] = r tmp = ast.literal_eval(type_[key]) for k in tmp: final[k] = tmp[k] final_list.append(final) ar = AnalysisResult.objects(analysis_id=analysis_id).first() if ar: AnalysisResult.objects(id=ar.id).update_one(push_all__results=final_list) res['success'] = True return res
def handle(self, *args, **options): """ Script Execution. """ buckets = {} types = ["Dataset", "EmailAddress", "Event", "Hash", "Target", "Username"] for otype in types: klass = class_from_type(otype) if not klass: continue objs = klass.objects().only("bucket_list") for obj in objs: for bucket in obj.bucket_list: if not bucket: continue # Avoid empty strings if bucket not in buckets: buckets[bucket] = Bucket() buckets[bucket].name = bucket setattr(buckets[bucket], otype, 1) else: buckets[bucket][otype] += 1 # Drop all existing buckets Bucket.objects().delete() for bucket in buckets.values(): bucket.save()
def get_parent(self): """ Get the parent CRIPTs object. :returns: class which inherits from :class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`. """ col_obj = class_from_type(self.obj_type) doc = col_obj.objects(id=self.obj_id).first() return doc
def get_parent(self): """ Get the parent CRIPTs object. :returns: class which inherits from :class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`. """ col_obj = class_from_type(self.obj_type) doc = col_obj.objects(id=self.obj_id).first() return doc
def get_user_allowed_comments(comments, sources): """ Limit the comments to those a user should have access to see. :param comments: The list of comments. :type comments: list :param sources: The sources the user has access to. :type sources: list :returns: list of :class:`cripts.comments.comment.Comment` """ docs = { 'Dataset': {}, 'EmailAddress': {}, 'Event': {}, 'Hash': {}, 'Target': {}, 'UserName': {}, } for c in comments: c.comment_to_html() try: docs[c.obj_type][c.obj_id].append(c) except KeyError: docs[c.obj_type][c.obj_id] = [c] final_comments = [] for key, val in docs.items(): cls = class_from_type(key) obj_ids = [v for v in val] #get keys query = { '_id': { '$in': obj_ids }, '$or': [{ 'source.name': { '$in': sources } }, { 'source': { '$exists': 0 } }] } result = cls.objects(__raw__=query).only('id') for r in result: final_comments += val[r.id] final_comments.sort(key=lambda x: x.created, reverse=True) return final_comments
def refresh_services(request, cripts_type, identifier): """ Refresh the Analysis tab with the latest information. """ response = {} # Verify user can see results. sources = user_sources(request.user.username) klass = class_from_type(cripts_type) if not klass: msg = 'Could not find object to refresh!' response['success'] = False response['html'] = msg return HttpResponse(json.dumps(response), content_type="application/json") if hasattr(klass, 'source'): obj = klass.objects(id=identifier, source__name__in=sources).first() else: obj = klass.objects(id=identifier).first() if not obj: msg = 'Could not find object to refresh!' response['success'] = False response['html'] = msg return HttpResponse(json.dumps(response), content_type="application/json") # Get analysis results. results = AnalysisResult.objects(object_type=cripts_type, object_id=identifier) relationship = {'type': cripts_type, 'value': identifier} subscription = {'type': cripts_type, 'id': identifier} service_list = get_supported_services(cripts_type) response['success'] = True response['html'] = render_to_string( "services_analysis_listing.html", { 'relationship': relationship, 'subscription': subscription, 'service_results': results, 'cripts_type': cripts_type, 'identifier': identifier, 'service_list': service_list }, RequestContext(request)) return HttpResponse(json.dumps(response), content_type="application/json")
def refresh_services(request, cripts_type, identifier): """ Refresh the Analysis tab with the latest information. """ response = {} # Verify user can see results. sources = user_sources(request.user.username) klass = class_from_type(cripts_type) if not klass: msg = 'Could not find object to refresh!' response['success'] = False response['html'] = msg return HttpResponse(json.dumps(response), content_type="application/json") if hasattr(klass, 'source'): obj = klass.objects(id=identifier,source__name__in=sources).first() else: obj = klass.objects(id=identifier).first() if not obj: msg = 'Could not find object to refresh!' response['success'] = False response['html'] = msg return HttpResponse(json.dumps(response), content_type="application/json") # Get analysis results. results = AnalysisResult.objects(object_type=cripts_type, object_id=identifier) relationship = {'type': cripts_type, 'value': identifier} subscription = {'type': cripts_type, 'id': identifier} service_list = get_supported_services(cripts_type) response['success'] = True response['html'] = render_to_string("services_analysis_listing.html", {'relationship': relationship, 'subscription': subscription, 'service_results': results, 'cripts_type': cripts_type, 'identifier': identifier, 'service_list': service_list}, RequestContext(request)) return HttpResponse(json.dumps(response), content_type="application/json")
def add_log(object_type, object_id, analysis_id, log_message, level, analyst): """ Add a log entry to an analysis task. :param object_type: The top-level object type. :type object_type: str :param object_id: The ObjectId to search for. :type object_id: str :param analysis_id: The ID of the task to update. :type analysis_id: str :param log_message: The log entry to append. :type log_message: dict :param level: The log level. :type level: str :param analyst: The user updating the log. :type analyst: str :returns: dict with keys "success" (boolean) and "message" (str) if failed. """ results = {'success': False} if not object_type or not object_id or not analysis_id: results['message'] = "Must supply object id/type and analysis id." return results # Validate user can add service results to this TLO. klass = class_from_type(object_type) sources = user_sources(analyst) obj = klass.objects(id=object_id, source__name__in=sources).first() if not obj: results['message'] = "Could not find object to add results to." return results # Update analysis log le = EmbeddedAnalysisResultLog() le.message = log_message le.level = level le.datetime = str(datetime.datetime.now()) ar = AnalysisResult.objects(analysis_id=analysis_id).first() if ar: AnalysisResult.objects(id=ar.id).update_one(push__log=le) results['success'] = True else: results['message'] = "Could not find task to add log to." return results
def finish_task(object_type, object_id, analysis_id, status, analyst): """ Finish a task by setting its status to "completed" and setting the finish date. :param object_type: The top-level object type. :type object_type: str :param object_id: The ObjectId to search for. :type object_id: str :param analysis_id: The ID of the task to update. :type analysis_id: str :param status: The status of the task. :type status: str ("error", "completed") :param analyst: The user updating the log. :type analyst: str :returns: dict with keys "success" (boolean) and "message" (str) if failed. """ results = {'success': False} if not status: status = "completed" if status not in ('error', 'completed'): status = "completed" if not object_type or not object_id or not analysis_id: results['message'] = "Must supply object id/type and analysis id." return results # Validate user can add service results to this TLO. klass = class_from_type(object_type) sources = user_sources(analyst) obj = klass.objects(id=object_id, source__name__in=sources).first() if not obj: results['message'] = "Could not find object to add results to." return results # Update analysis log date = str(datetime.datetime.now()) ar = AnalysisResult.objects(analysis_id=analysis_id).first() if ar: AnalysisResult.objects(id=ar.id).update_one(set__status=status, set__finish_date=date) results['success'] = True return results
def get_user_allowed_comments(comments, sources): """ Limit the comments to those a user should have access to see. :param comments: The list of comments. :type comments: list :param sources: The sources the user has access to. :type sources: list :returns: list of :class:`cripts.comments.comment.Comment` """ docs = { 'Dataset':{}, 'EmailAddress':{}, 'Event':{}, 'Hash':{}, 'Target':{}, 'UserName':{}, } for c in comments: c.comment_to_html() try: docs[c.obj_type][c.obj_id].append(c) except KeyError: docs[c.obj_type][c.obj_id] = [c] final_comments = [] for key, val in docs.items(): cls = class_from_type(key) obj_ids = [v for v in val] #get keys query = {'_id': {'$in':obj_ids}, '$or': [{'source.name': {'$in':sources}}, {'source': {'$exists': 0}} ] } result = cls.objects(__raw__=query).only('id') for r in result: final_comments += val[r.id] final_comments.sort(key=lambda x: x.created, reverse=True) return final_comments
def delete_object_file(value): """ In the event this is a file (but not PCAP), clean up after ourselves when deleting an object. :param value: The value of the object we are deleting. :type value: str """ if not re.match(r"^[a-f\d]{32}$", value, re.I): return #XXX: MongoEngine provides no direct GridFS access so we # need to use pymongo directly. obj_list = ('Dataset', 'EmailAddress', 'Event', 'Hash', 'Target', 'UserName' ) # In order to make sure this object isn't tied to more than one top-level # object, we need to check the rest of the database. We will at least find # one instance, which is the one we are going to be removing. If we find # another instance, then we should not remove the object from GridFS. count = 0 query = {'objects.value': value} for obj in obj_list: obj_class = class_from_type(obj) count += len(obj_class.objects(__raw__=query)) if count > 1: break else: col = settings.COL_OBJECTS grid = mongo_connector("%s.files" % col) grid.remove({'md5': value}) return