def create_item(obj_id, obj_metadata, io_content): ''' Create a new Item (Import or Test only). :param obj_id: item id :type obj_metadata: dict - 'first_seen', 'tags' :return: is item created :rtype: boolean ''' # check if datetime match ?? # # TODO: validate obj_id res = save_raw_content(obj_id, io_content) # item saved if res: # creata tags if 'tags' in obj_metadata: # # TODO: handle mixed tags: taxonomies and Galaxies Tag.api_add_obj_tags(tags=obj_metadata['tags'], object_id=obj_id, object_type="item") return True # Item not created return False
def showItem(): # # TODO: support post item_id = request.args.get('id') if not item_id or not Item.exist_item(item_id): abort(404) dict_item = {} dict_item['id'] = item_id dict_item['name'] = dict_item['id'].replace('/', ' / ') dict_item['father'] = Item.get_item_parent(item_id) dict_item['content'] = Item.get_item_content(item_id) dict_item['metadata'] = Item.get_item_metadata( item_id, item_content=dict_item['content']) dict_item['tags'] = Tag.get_obj_tag(item_id) #dict_item['duplicates'] = Item.get_item_nb_duplicates(item_id) dict_item['duplicates'] = Item.get_item_duplicates_dict(item_id) dict_item['crawler'] = Item.get_crawler_matadata(item_id, ltags=dict_item['tags']) ## EXPORT SECTION # # TODO: ADD in Export SECTION dict_item['hive_case'] = Export.get_item_hive_cases(item_id) return render_template( "show_item.html", bootstrap_label=bootstrap_label, modal_add_tags=Tag.get_modal_add_tags(dict_item['id'], object_type='item'), is_hive_connected=Export.get_item_hive_cases(item_id), dict_item=dict_item)
def update_domain_by_item(domain_obj, item_id): domain_name = domain_obj.get_domain_name() # update domain tags for tag in Tag.get_item_tags(item_id): if tag != 'infoleak:submission="crawler"' and tag != 'infoleak:submission="manual"': Tag.add_domain_tag(tag, domain_name, Item.get_item_date(item_id)) # update domain correlation item_correlation = Item.get_item_all_correlation(item_id) for correlation_name in item_correlation: for correlation_type in item_correlation[correlation_name]: if correlation_name in ('pgp', 'cryptocurrency'): for correl_value in item_correlation[correlation_name][ correlation_type]: if correlation_name == 'pgp': pgp.save_domain_correlation(domain_name, correlation_type, correl_value) if correlation_name == 'cryptocurrency': cryptocurrency.save_domain_correlation( domain_name, correlation_type, correl_value) if correlation_name == 'decoded': for decoded_item in item_correlation['decoded']: Decoded.save_domain_decoded(domain_name, decoded_item)
def delete_item(obj_id): # check if item exists if not exist_item(obj_id): return False else: Tag.delete_obj_tags(obj_id, 'item', Tag.get_obj_tag(obj_id)) delete_item_duplicate(obj_id) # delete MISP event r_serv_metadata.delete('misp_events:{}'.format(obj_id)) r_serv_metadata.delete('hive_cases:{}'.format(obj_id)) os.remove(get_item_filename(obj_id)) # get all correlation obj_correlations = get_item_all_correlation(obj_id) for correlation in obj_correlations: if correlation=='cryptocurrency' or correlation=='pgp': for obj2_subtype in obj_correlations[correlation]: for obj2_id in obj_correlations[correlation][obj2_subtype]: Correlate_object.delete_obj_relationship(correlation, obj2_id, 'item', obj_id, obj1_subtype=obj2_subtype) else: for obj2_id in obj_correlations[correlation]: Correlate_object.delete_obj_relationship(correlation, obj2_id, 'item', obj_id) return True ### REQUIRE MORE WORK # delete child/son !!! ### TODO in inport V2 # delete from tracked items # delete from queue ### return False
def delete_screenshot_file(obj_id): filepath = get_screenshot_filepath(obj_id) if not os.path.isfile(filepath): return False Tag.delete_obj_tags(obj_id, 'image', Tag.get_obj_tag(obj_id)) os.remove(filepath) return True
def get_tag_metadata(): data = request.get_json() tag = data.get('tag', None) if not Tag.is_tag_in_all_tag(tag): return Response(json.dumps({'status': 'error', 'reason':'Tag not found'}, indent=2, sort_keys=True), mimetype='application/json'), 404 metadata = Tag.get_tag_metadata(tag) return Response(json.dumps(metadata, indent=2, sort_keys=True), mimetype='application/json'), 200
def showDomain(): if request.method == 'POST': domain_name = request.form.get('in_show_domain') epoch = None port = None else: domain_name = request.args.get('domain') epoch = request.args.get('epoch') port = request.args.get('port') res = api_validator(Domain.api_verify_if_domain_exist(domain_name)) if res: return res domain = Domain.Domain(domain_name, port=port) dict_domain = domain.get_domain_metadata() dict_domain['domain'] = domain_name if domain.domain_was_up(): dict_domain = {**dict_domain, **domain.get_domain_correlation()} dict_domain['correlation_nb'] = Domain.get_domain_total_nb_correlation(dict_domain) dict_domain['origin_item'] = domain.get_domain_last_origin() dict_domain['tags'] = domain.get_domain_tags() dict_domain['tags_safe'] = Tag.is_tags_safe(dict_domain['tags']) dict_domain['history'] = domain.get_domain_history_with_status() dict_domain['crawler_history'] = domain.get_domain_items_crawled(items_link=True, epoch=epoch, item_screenshot=True, item_tag=True) # # TODO: handle multiple port if dict_domain['crawler_history']['items']: dict_domain['crawler_history']['random_item'] = random.choice(dict_domain['crawler_history']['items']) return render_template("showDomain.html", dict_domain=dict_domain, bootstrap_label=bootstrap_label, modal_add_tags=Tag.get_modal_add_tags(dict_domain['domain'], object_type="domain"))
def PasteSubmit_page(): # Get all active tags/galaxy active_taxonomies = Tag.get_active_taxonomies() active_galaxies = Tag.get_active_galaxies() return render_template("submit_items.html", active_taxonomies=active_taxonomies, active_galaxies=active_galaxies)
def delete_decoded_file(obj_id): filepath = get_decoded_filepath(obj_id) if not os.path.isfile(filepath): return False Tag.delete_obj_tags(obj_id, 'decoded', Tag.get_obj_tag(obj_id)) os.remove(filepath) return True
def get_domain_metadata(domain, domain_type, first_seen=True, last_ckeck=True, status=True, ports=True, tags=False, tags_safe=False, languages=False, screenshot=False): ''' Get Domain basic metadata :param first_seen: get domain first_seen :type first_seen: boolean :param last_ckeck: get domain last_check :type last_ckeck: boolean :param ports: get all domain ports :type ports: boolean :param tags: get all domain tags :type tags: boolean :return: a dict of all metadata for a given domain :rtype: dict ''' dict_metadata = {} dict_metadata['id'] = domain dict_metadata['type'] = domain_type if first_seen: res = get_domain_first_seen(domain, domain_type=domain_type) if res is not None: dict_metadata['first_seen'] = res if last_ckeck: res = get_domain_last_check(domain, domain_type=domain_type) if res is not None: dict_metadata['last_check'] = res if status: dict_metadata['status'] = is_domain_up(domain, domain_type) if ports: dict_metadata['ports'] = get_domain_all_ports(domain, domain_type) if tags: dict_metadata['tags'] = get_domain_tags(domain) if tags_safe: if tags: dict_metadata['is_tags_safe'] = Tag.is_tags_safe( dict_metadata['tags']) else: dict_metadata['is_tags_safe'] = Tag.is_tags_safe( get_domain_tags(domain)) if languages: dict_metadata['languages'] = Language.get_languages_from_iso( get_domain_languages(domain, r_list=True), sort=True) if screenshot: dict_metadata['screenshot'] = get_domain_random_screenshot(domain) return dict_metadata
def create_screenshot(obj_id, obj_meta, io_content): # # TODO: check if sha256 res = save_screenshot_file(obj_id, io_content) if res: # creata tags if 'tags' in obj_meta: # # TODO: handle mixed tags: taxonomies and Galaxies Tag.api_add_obj_tags(tags=obj_meta['tags'], object_id=obj_id, object_type="image") return True return False
def tag(bot, update): prep_query = re.findall('#\w+', update.message.text) if not prep_query: info = """ Usage: /tag <tagName> """ bot.send_message(chat_id=update.message.chat_id, text=info) else: t = Tag(prep_query[0]) urls = t.show() bot.send_message(chat_id=update.message.chat_id, text=urls)
def PasteSubmit_page(): # Get all active tags/galaxy active_taxonomies = Tag.get_active_taxonomies() active_galaxies = Tag.get_active_galaxies() return render_template("submit_items.html", active_taxonomies = active_taxonomies, active_galaxies = active_galaxies, text_max_size = text_max_size, file_max_size = file_max_size, allowed_extensions = allowed_extensions)
def create_paste(uuid, paste_content, ltags, ltagsgalaxies, name): now = datetime.datetime.now() save_path = 'submitted/' + now.strftime("%Y") + '/' + now.strftime( "%m") + '/' + now.strftime("%d") + '/' + name + '.gz' full_path = filename = os.path.join(os.environ['AIL_HOME'], p.config.get("Directories", "pastes"), save_path) if os.path.isfile(full_path): addError(uuid, 'File: ' + save_path + ' already exist in submitted pastes') return 1 try: gzipencoded = gzip.compress(paste_content) gzip64encoded = base64.standard_b64encode(gzipencoded).decode() except: abord_file_submission(uuid, "file error") return 1 # use relative path rel_item_path = save_path.replace(PASTES_FOLDER, '', 1) # send paste to Global module relay_message = "{0} {1}".format(rel_item_path, gzip64encoded) p.populate_set_out(relay_message, 'Mixer') # increase nb of paste by feeder name r_serv_log_submit.hincrby("mixer_cache:list_feeder", "submitted", 1) # add tags for tag in ltags: Tag.add_tag('item', tag, rel_item_path) for tag in ltagsgalaxies: Tag.add_tag('item', tag, rel_item_path) r_serv_log_submit.incr(uuid + ':nb_end') r_serv_log_submit.incr(uuid + ':nb_sucess') if r_serv_log_submit.get(uuid + ':nb_end') == r_serv_log_submit.get(uuid + ':nb_total'): r_serv_log_submit.set(uuid + ':end', 1) print(' {} send to Global'.format(rel_item_path)) r_serv_log_submit.sadd(uuid + ':paste_submit_link', rel_item_path) curr_date = datetime.date.today() serv_statistics.hincrby(curr_date.strftime("%Y%m%d"), 'submit_paste', 1) return 0
def get_crawler_matadata(item_id, ltags=None): dict_crawler = {} if is_crawled(item_id): dict_crawler['domain'] = get_item_domain(item_id) if not ltags: ltags = Tag.get_obj_tag(item_id) dict_crawler['is_tags_safe'] = Tag.is_tags_safe(ltags) dict_crawler['url'] = get_item_link(item_id) dict_crawler['screenshot'] = get_item_screenshot(item_id) dict_crawler['har'] = get_item_har_name(item_id) return dict_crawler
def getDocs( self ): #reads files in the library file directory and converts to Document. if not os.path.exists('docfiles'): os.makedirs('docfiles') for filename in os.listdir( 'docfiles'): # get the directory '.' + 'os.sep' + if filename.endswith('.txt'): with open('docfiles' + os.sep + filename, "rt") as f: contents = f.read() doctags = [] docContents = [] pagetags = [] convertedTagList = [] pages = [] section = contents.split("(/)")[1:] try: assert (section[0].startswith("Title:")) assert (section[1].startswith("Doctags:")) assert (section[2].startswith("Pages:")) assert (section[3].startswith("Pagetags:")) except AssertionError: print("Attempted to read invalid document.") continue title = section[0][len("Title:"):].strip() if not section[1][len("Doctags:"):].isspace(): doctagstr = section[1][len("Doctags:"):].strip() for tag in doctagstr.split(","): doctags.append(Tag(tag.strip())) if not section[2][len("Pages:"):].isspace(): pagestr = section[2][len("Pages:"):].strip() for elem in pagestr.split("<pwords>"): docContents.append(str(elem.strip())) pagetagstr = section[3][len("Pagetags:"):].strip() for elem in pagetagstr.split("<tname>"): if not elem.isspace(): pagetags.append(elem.strip( )) # pagetags is a list with a string of tags for taglist in pagetags: convertedPageTags = [] for tag in taglist.split(","): if not tag.isspace() and not tag == "": convertedPageTags.append(Tag(tag.strip())) convertedTagList.append(convertedPageTags) makeTime = os.path.getctime editTime = os.path.getmtime for i in range(len(docContents)): pages.append( Page(self, docContents[i], convertedTagList[i])) self.documents.append( Document(self, filename, title, f"{makeTime}", f"{editTime}", doctags, pages))
def update_domain_tags(domain): domain_sanitized = sanitize_domain(domain) if domain != domain_sanitized: r_serv_onion.sadd('incorrect_domain', domain) domain = domain_sanitized domain_tags = Tag.get_obj_tag(domain) for tag in domain_tags: # delete incorrect tags if tag == 'infoleak:submission="crawler"' or tag == 'infoleak:submission="manual"': r_serv_metadata.srem('tag:{}'.format(domain), tag) else: Tag.add_global_tag(tag, object_type='domain') r_serv_tags.sadd('{}:{}'.format('domain', tag), domain)
def sourceLIDARThread(pipeFromC, pipeToC, network_send_queue, start_time, interval): # Start the connection with the LIDAR through pipes lidarDevice = communication.connectLIDAR(pipeFromC, pipeToC) target = interval # Init the LIDAR processing class lidarRecognition = lidar_recognition.LIDAR(time.time()) # Wait for 1 second before we start everything time.sleep(2) start, end = lidarDevice.getFromC() lidarcoordinates, lidartimestamp = lidarRecognition.processLidarFrame(lidarDevice.parseFromC(), start) index = 0 # Now wait for input while 1: if (round(time.time(),3) % target) == 0.000: # Take the LIDAR frame and process #print ( "LIDAR start " + str(time.time())) start, end = lidarDevice.getFromC() #print ( "LIDAR got " + str(time.time())) lidarcoordinates, lidartimestamp = lidarRecognition.processLidarFrame(lidarDevice.parseFromC(), start) localization = [lidarDevice.localizationX, lidarDevice.localizationY, lidarDevice.localizationYaw, index, start] #print ( "LIDAR detect " + str(time.time())) # Prep value to be sent to the part of the program output_tag = Tag.Tag('local_fusion', 'localization', start, end) output_token = Token.Token(output_tag, localization) recipient_name = 'CAV1' network_send_queue.put((output_token, recipient_name)) #send to named device, e.g. CAV1, RSU # Prep value to be sent to the part of the program output_tag = Tag.Tag('local_fusion', 'lidar_obs', start, end) output_token = Token.Token(output_tag, lidarcoordinates) recipient_name = 'CAV1' network_send_queue.put((output_token, recipient_name)) #send to named device, e.g. CAV1, RSU # Prep value to be sent to the part of the program output_tag = Tag.Tag('actuate', 'response', start, end) output_token = Token.Token(output_tag, [localization,None]) recipient_name = 'CAV1' network_send_queue.put((output_token, recipient_name)) #send to named device, e.g. CAV1, RSU # Log this to a file index += 1
def confirm_tag(): #TODO verify input path = request.args.get('paste') tag = request.args.get('tag') if (tag[9:28] == 'automatic-detection'): Tag.remove_item_tag(tag, path) tag = tag.replace('automatic-detection', 'analyst-detection', 1) #add analyst tag Tag.add_item_tag(tag, path) return redirect(url_for('showsavedpastes.showsavedpaste', paste=path)) return 'incompatible tag'
def get_item(request_dict): if not request_dict: return Response({'status': 'error', 'reason': 'Malformed JSON'}, 400) item_id = request_dict.get('id', None) if not item_id: return ( {'status': 'error', 'reason': 'Mandatory parameter(s) not provided'}, 400 ) if not exist_item(item_id): return ( {'status': 'error', 'reason': 'Item not found'}, 404 ) dict_item = {} dict_item['id'] = item_id date = request_dict.get('date', True) if date: dict_item['date'] = get_item_date(item_id) tags = request_dict.get('tags', True) if tags: dict_item['tags'] = Tag.get_item_tags(item_id) size = request_dict.get('size', False) if size: dict_item['size'] = get_item_size(item_id) content = request_dict.get('content', False) if content: # UTF-8 outpout, # TODO: use base64 dict_item['content'] = get_item_content(item_id) lines_info = request_dict.get('lines', False) if lines_info: dict_item['lines'] = get_lines_info(item_id, dict_item.get('content', 'None')) return (dict_item, 200)
def get_problem(conn, id): # Get problem by ID # construct the full problem object # 1. First get all the fields from the Problem table problem, error = db_get_problem(conn, id) if problem is None: return None, error full_problem = row_to_dictionary(problem, COLUMNS) # 2. Now get all the tags from ProblemTag table problem_tags = Tag.get_problem_tags(conn, id) if problem_tags is None: return None, error tags = [] for tag in tags: tags.append(row_to_dictionary(tag, ["id", "title"])) full_problem["tags"] = tags # 3. Finally get all the tests from the Test table problem_tests = Test.get_tests(conn, id) if problem_tests is None: return None, error tests = [] for test in tests: tests.append( row_to_dictionary(test, ["test_id", "test_code", "test_output"])) full_problem["tests"] = tests return full_problem, error
def process_Token(words): assert(isinstance(words, str)) token_list = [] word_len = len(words) if word_len == 1: token_list.append(T.Token(words[0], T.Tag_BMES.S.name)) else: for i in range(word_len): word = words[i] if i == 0: token_list.append(T.Token(word, T.Tag_BMES.B.name)) elif i == (word_len - 1): token_list.append(T.Token(word, T.Tag_BMES.E.name)) else: token_list.append(T.Token(word, T.Tag_BMES.M.name)) return token_list
def import_item(): data = request.get_json() if not data: return Response(json.dumps({'status': 'error', 'reason': 'Malformed JSON'}, indent=2, sort_keys=True), mimetype='application/json'), 400 # unpack json text_to_import = data.get('text', None) if not text_to_import: return Response(json.dumps({'status': 'error', 'reason': 'No text supplied'}, indent=2, sort_keys=True), mimetype='application/json'), 400 tags = data.get('tags', []) if not type(tags) is list: tags = [] galaxy = data.get('galaxy', []) if not type(galaxy) is list: galaxy = [] if not Tag.is_valid_tags_taxonomies_galaxy(tags, galaxy): return Response(json.dumps({'status': 'error', 'reason': 'Tags or Galaxy not enabled'}, indent=2, sort_keys=True), mimetype='application/json'), 400 default_tags = data.get('default_tags', True) if default_tags: tags.append('infoleak:submission="manual"') if sys.getsizeof(text_to_import) > 900000: return Response(json.dumps({'status': 'error', 'reason': 'Size exceeds default'}, indent=2, sort_keys=True), mimetype='application/json'), 413 UUID = str(uuid.uuid4()) Import_helper.create_import_queue(tags, galaxy, text_to_import, UUID) return Response(json.dumps({'uuid': UUID}, indent=2, sort_keys=True), mimetype='application/json')
def openFile(self): if self.hasLogin: dialog = QFileDialog() # dayu_theme.apply(dialog) dialog.setStyleSheet(Data.getQSS()) #根据当前所在文件目录,设置默认打开文件格式 dialog.setNameFilter(u"资源文件(*.png);;图片文件(*.jpg *.png *.jpeg);;") #加载对应的文件 dialog.setFileMode(QFileDialog.ExistingFiles) dialog.setViewMode(QFileDialog.Detail) self.paths = None if dialog.exec_(): self.paths = dialog.selectedFiles() if self.paths == None: return # 创建标签选择窗口 self.tagWidget = Tag.TagWidget(self.paths, self.userName) self.tagWidget.show() Data.setWindowCenter(self.tagWidget) self.tagWidget.send_tag_signal.connect( self.cenWinAddTags) # 链接添加标签的信号到中心窗口添加标签方法 self.tagWidget.load_file_signal.connect(self.slot_show_loading) else: self.slot_show_message(MMessage.info, (u'请先登录账号!'))
def get_domain_tags(domain): ''' Retun all tags of a given domain. :param domain: crawled domain ''' return Tag.get_obj_tag(domain)
def test_0007_api_add_item_tag(self): tags_to_add = ["infoleak:analyst-detection=\"api-key\""] current_item_tag = Tag.get_item_tags(self.__class__.item_id) current_item_tag.append(tags_to_add[0]) #galaxy_to_add = ["misp-galaxy:stealer=\"Vidar\""] input_json = {"id": self.__class__.item_id, "tags": tags_to_add} req = self.client.post('/api/v1/add/item/tag', json=input_json, headers={'Authorization': self.apikey}) req_json = parse_response(self, req) item_tags = req_json['tags'] self.assertEqual(item_tags, tags_to_add) new_item_tag = Tag.get_item_tags(self.__class__.item_id) self.assertCountEqual(new_item_tag, current_item_tag)
def get_domain_items_crawled(domain, domain_type, port, epoch=None, items_link=False, item_screenshot=False, item_tag=False): ''' ''' item_crawled = {} item_root = get_domain_crawled_item_root(domain, domain_type, port, epoch=epoch) if item_root: item_crawled['port'] = port item_crawled['epoch'] = item_root['epoch'] item_crawled['date'] = time.strftime('%Y/%m/%d - %H:%M.%S', time.gmtime(item_root['epoch'])) item_crawled['items'] = [] if item_root['root_item'] != str(item_root['epoch']): for item in get_domain_items(domain, item_root['root_item']): dict_item = {"id": item} if items_link: dict_item['link'] = Item.get_item_link(item) if item_screenshot: dict_item['screenshot'] = Item.get_item_screenshot(item) if item_tag: dict_item['tags'] = Tag.get_obj_tags_minimal(item) item_crawled['items'].append(dict_item) return item_crawled
def confirm_tag(): #TODO verify input path = request.args.get('paste') tag = request.args.get('tag') if (tag[9:28] == 'automatic-detection'): Tag.api_delete_obj_tags(tags=[tag], object_id=path, object_type="item") tag = tag.replace('automatic-detection', 'analyst-detection', 1) #add analyst tag Tag.add_tag('item', tag, path) return redirect(url_for('objects_item.showItem', id=path)) return 'incompatible tag'
def get_item(request_dict): if not request_dict: return Response({'status': 'error', 'reason': 'Malformed JSON'}, 400) item_id = request_dict.get('id', None) if not item_id: return ({ 'status': 'error', 'reason': 'Mandatory parameter(s) not provided' }, 400) if not exist_item(item_id): return ({'status': 'error', 'reason': 'Item not found'}, 404) dict_item = {} dict_item['id'] = item_id date = request_dict.get('date', True) if date: add_separator = False if request_dict.get('date_separator', False): add_separator = True dict_item['date'] = get_item_date(item_id, add_separator=add_separator) tags = request_dict.get('tags', True) if tags: dict_item['tags'] = Tag.get_obj_tag(item_id) size = request_dict.get('size', False) if size: dict_item['size'] = get_item_size(item_id) content = request_dict.get('content', False) if content: # UTF-8 outpout, # TODO: use base64 dict_item['content'] = get_item_content(item_id) raw_content = request_dict.get('raw_content', False) if raw_content: dict_item['raw_content'] = get_raw_content(item_id) lines_info = request_dict.get('lines', False) if lines_info: dict_item['lines'] = get_lines_info(item_id, dict_item.get('content', 'None')) if request_dict.get('pgp'): dict_item['pgp'] = {} if request_dict['pgp'].get('key'): dict_item['pgp']['key'] = get_item_pgp_key(item_id) if request_dict['pgp'].get('mail'): dict_item['pgp']['mail'] = get_item_pgp_mail(item_id) if request_dict['pgp'].get('name'): dict_item['pgp']['name'] = get_item_pgp_name(item_id) if request_dict.get('cryptocurrency'): dict_item['cryptocurrency'] = {} if request_dict['cryptocurrency'].get('bitcoin'): dict_item['cryptocurrency']['bitcoin'] = get_item_bitcoin(item_id) return (dict_item, 200)
def get_card_metadata(object_type, correlation_id, type_id=None, expand_card=False): card_dict = {} if object_type == 'cryptocurrency': card_dict[ "sparkline"] = Cryptocurrency.cryptocurrency.get_list_nb_previous_correlation_object( type_id, correlation_id, 6) card_dict["icon"] = Correlate_object.get_correlation_node_icon( object_type, type_id) if type_id == 'bitcoin' and expand_card: card_dict["related_btc"] = btc_ail.get_bitcoin_info(correlation_id) elif object_type == 'pgp': card_dict[ "sparkline"] = Pgp.pgp.get_list_nb_previous_correlation_object( type_id, correlation_id, 6) card_dict["icon"] = Correlate_object.get_correlation_node_icon( object_type, type_id) elif object_type == 'username': card_dict[ "sparkline"] = telegram.correlation.get_list_nb_previous_correlation_object( type_id, correlation_id, 6) card_dict["icon"] = Correlate_object.get_correlation_node_icon( object_type, type_id) elif object_type == 'decoded': card_dict["sparkline"] = Decoded.get_list_nb_previous_hash( correlation_id, 6) card_dict["icon"] = Correlate_object.get_correlation_node_icon( object_type, value=correlation_id) card_dict["vt"] = Decoded.get_decoded_vt_report(correlation_id) card_dict["vt"]["status"] = vt_enabled card_dict["add_tags_modal"] = Tag.get_modal_add_tags( correlation_id, object_type='decoded') elif object_type == 'domain': card_dict["icon"] = Correlate_object.get_correlation_node_icon( object_type, value=correlation_id) card_dict["tags"] = Domain.get_domain_tags(correlation_id) elif object_type == 'screenshot': card_dict["add_tags_modal"] = Tag.get_modal_add_tags( correlation_id, object_type='image') elif object_type == 'paste': card_dict["icon"] = Correlate_object.get_correlation_node_icon( object_type, value=correlation_id) return card_dict
def get_value(hoge): t2.delete(0, tk.END) if t0.tag_ranges(tk.SEL): t2.insert( tk.END, tag_list[tagID.get()][1] + str(tag_dict[tag_list[tagID.get()][0]]) + ",text:" + t0.get(t0.index(tk.SEL_FIRST), t0.index(tk.SEL_LAST)), ) else: t2.insert(tk.END, tag_list[tagID.get()][1] + str(tag_dict[tag_list[tagID.get()][0]])) ID_dict = Tag.get_id() entry_dict = Tag.get_entry() if len(ID_dict.keys()) > 0: for k, v in ID_dict.items(): if v.get() != "null": t2.insert(tk.END, "," + k + ":" + v.get()) if len(entry_dict.keys()) > 0: for k, v in entry_dict.items(): if v.get() != "": t2.insert(tk.END, "," + k + ":" + v.get())
def addJSONLinksByUser(data, userId): #FIXME: change insertion algorithm for sql queries that inserting data #as zip, json or ... #curTime = time.time() query = {"linksQuery":StringHolder("INSERT INTO link (id, user_id, url, url_hash, title, description, type_name, modified_at) VALUES\n"), "updateLinkSubQuery":{"id":StringHolder(""), "title":StringHolder(""), "description":StringHolder(""), "type_name":StringHolder(""), "modified_at":StringHolder("")}, "tagsQuery":StringHolder("INSERT INTO tag (id, user_id, name) VALUES\n"), "mapsQuery":StringHolder("INSERT INTO link_tag_map (tag_id, link_id) VALUES\n"), "mdQuery":StringHolder("INSERT INTO meta_data (link_id, l_key, value) VALUES\n")} tags = [] savedTags = tagQM.getTagNames(userId) savedLinks = linkQM.getLinkUrlsAndIds(userId) parseData(userId, data, tags, query, savedLinks, savedTags) #print "calculating time: " + str(utils.timeDifference(curTime)) + " ms" #curTime = time.time() conn = db.connect() cursor = conn.cursor() if not query["linksQuery"].value.endswith("VALUES\n"): cursor.execute(query["linksQuery"].value[:-2]) if not query["mdQuery"].value.endswith("VALUES\n"): cursor.execute(query["mdQuery"].value[:-2]) if not query["tagsQuery"].value.endswith("VALUES\n"): cursor.execute(query["tagsQuery"].value[:-2]) #FIXME : check if link refer to another tag if not query["mapsQuery"].value.endswith("VALUES\n"): cursor.execute(query["mapsQuery"].value[:-2]) if query["updateLinkSubQuery"]["id"].value != "": cursor.execute(buildUpdateQuery(query["updateLinkSubQuery"])) conn.commit() cursor.close() #print "database request time: " + str(utils.timeDifference(curTime)) + " ms" return
t2.place(x=10, y=610) confirm.place(x=530, y=640) submit.place(x=533, y=665) finish.place(x=537, y=690) confirm.bind("<ButtonRelease>", get_value) submit.bind("<ButtonRelease>", annotate) finish.bind("<ButtonRelease>", stringize) # t0にファイルの内容を反映 for element in text: t0.insert(tk.END, " " + element) # アノテーションするタグの選択 read_tag_and_attribute() tag_rb_list = [] i = 0 while i < len(tag_list): tag_rb_list.append(Tag.TagRB(tag_list[i], tagID, i)) Tag.place(tag_rb_list[i]) i += 1 tag_confirm = tk.Button(text="choose tag") tag_confirm.place(x=900, y=10 + i * 25) def selected(hoge): Tag.tag_selected(tag_rb_list[tagID.get()]) tag_confirm.bind("<ButtonRelease>", selected) # 実行処理はここまで root.mainloop()
def selected(hoge): Tag.tag_selected(tag_rb_list[tagID.get()])
def _newTag(self,tagNm, nl): tmp = Tag(tagNm) self.tagList.insert(0,tmp.tagName) self._write(tmp._openTag(), nl) return tmp
def _closeTags(self,tlist): close = Tag() for item in tlist: #print item self._write(close._closeTag(item),1)