Exemplo n.º 1
0
def search():
    query = request.form['query']
    q = []
    q.append(query)
    r = []  #complete path
    c = []  #preview of the paste content
    paste_date = []
    paste_size = []
    num_elem_to_get = 50

    # Search filename
    for path in r_serv_pasteName.smembers(q[0]):
        r.append(path)
        paste = Paste.Paste(path)
        content = paste.get_p_content().decode('utf8', 'ignore')
        content_range = max_preview_char if len(
            content) > max_preview_char else len(content) - 1
        c.append(content[0:content_range])
        curr_date = str(paste._get_p_date())
        curr_date = curr_date[0:4] + '/' + curr_date[4:6] + '/' + curr_date[6:]
        paste_date.append(curr_date)
        paste_size.append(paste._get_p_size())

    # Search full line
    from whoosh import index
    from whoosh.fields import Schema, TEXT, ID
    schema = Schema(title=TEXT(stored=True),
                    path=ID(stored=True),
                    content=TEXT)

    indexpath = os.path.join(os.environ['AIL_HOME'],
                             cfg.get("Indexer", "path"))
    ix = index.open_dir(indexpath)
    from whoosh.qparser import QueryParser
    with ix.searcher() as searcher:
        query = QueryParser("content", ix.schema).parse(" ".join(q))
        results = searcher.search_page(query, 1, pagelen=num_elem_to_get)
        for x in results:
            r.append(x.items()[0][1])
            paste = Paste.Paste(x.items()[0][1])
            content = paste.get_p_content().decode('utf8', 'ignore')
            content_range = max_preview_char if len(
                content) > max_preview_char else len(content) - 1
            c.append(content[0:content_range])
            curr_date = str(paste._get_p_date())
            curr_date = curr_date[0:4] + '/' + curr_date[
                4:6] + '/' + curr_date[6:]
            paste_date.append(curr_date)
            paste_size.append(paste._get_p_size())
        results = searcher.search(query)
        num_res = len(results)

    return render_template("search.html",
                           r=r,
                           c=c,
                           query=request.form['query'],
                           paste_date=paste_date,
                           paste_size=paste_size,
                           char_to_display=max_preview_modal,
                           num_res=num_res)
Exemplo n.º 2
0
def search():
    query = request.form['query']
    q = []
    q.append(query)
    r = [] #complete path
    c = [] #preview of the paste content
    paste_date = []
    paste_size = []
    index_name = request.form['index_name']
    num_elem_to_get = 50

    # select correct index
    if index_name is None or index_name == "0":
        selected_index = get_current_index()
    else:
        selected_index = os.path.join(baseindexpath, index_name)

    # Search filename
    for path in r_serv_pasteName.smembers(q[0]):
        r.append(path)
        paste = Paste.Paste(path)
        content = paste.get_p_content().decode('utf8', 'ignore')
        content_range = max_preview_char if len(content)>max_preview_char else len(content)-1
        c.append(content[0:content_range])
        curr_date = str(paste._get_p_date())
        curr_date = curr_date[0:4]+'/'+curr_date[4:6]+'/'+curr_date[6:]
        paste_date.append(curr_date)
        paste_size.append(paste._get_p_size())

    # Search full line
    schema = Schema(title=TEXT(stored=True), path=ID(stored=True), content=TEXT)

    ix = index.open_dir(selected_index)
    with ix.searcher() as searcher:
        query = QueryParser("content", ix.schema).parse(" ".join(q))
        results = searcher.search_page(query, 1, pagelen=num_elem_to_get)
        for x in results:
            r.append(x.items()[0][1])
            paste = Paste.Paste(x.items()[0][1])
            content = paste.get_p_content().decode('utf8', 'ignore')
            content_range = max_preview_char if len(content)>max_preview_char else len(content)-1
            c.append(content[0:content_range])
            curr_date = str(paste._get_p_date())
            curr_date = curr_date[0:4]+'/'+curr_date[4:6]+'/'+curr_date[6:]
            paste_date.append(curr_date)
            paste_size.append(paste._get_p_size())
        results = searcher.search(query)
        num_res = len(results)

    index_min = 1
    index_max = len(get_index_list())
    return render_template("search.html", r=r, c=c, 
            query=request.form['query'], paste_date=paste_date, 
            paste_size=paste_size, char_to_display=max_preview_modal, 
            num_res=num_res, index_min=index_min, index_max=index_max,
            index_list=get_index_list(selected_index)
           )
Exemplo n.º 3
0
def showDiff():
    s1 = request.args.get('s1', '')
    s2 = request.args.get('s2', '')
    p1 = Paste.Paste(s1)
    p2 = Paste.Paste(s2)
    maxLengthLine1 = p1.get_lines_info()[1]
    maxLengthLine2 = p2.get_lines_info()[1]
    if maxLengthLine1 > DiffMaxLineLength or maxLengthLine2 > DiffMaxLineLength:
        return "Can't make the difference as the lines are too long."
    htmlD = difflib.HtmlDiff()
    lines1 = p1.get_p_content().splitlines()
    lines2 = p2.get_p_content().splitlines()
    the_html = htmlD.make_file(lines1, lines2)
    return the_html
Exemplo n.º 4
0
 def post(self):
     key = self.request.get("key")
     if key != APP_KEY:
         self.error(404)
         return
     
     content = self.request.get("content")
     if content in (None,""):
         self.error(400)
         return
     
     title = self.request.get("title")
     mime = self.request.get("mime") or "application/octet-stream"
     
     keyName = str(uuid4())
     
     #check if paste is a code
     if self.request.get("type") == "code":
         data = {"code":content}
         path = os.path.join(os.path.dirname(__file__), 'html/pasteShow.html')
         content = render(path, {'DATA':data})
         mime = "text/html"
     
     Paste.Paste(title=title, content=db.Blob(str(content)),key_name=keyName, mime=mime).put()
     
     url = self.request.url[:self.request.url.index("/paste")] + "/" + keyName + "/paste"
     
     #Add a possible extension to the link
     ext = mimetypes.guess_extension(mime)
     if ext:
         url += ext
     
     self.response.out.write(json.dumps({"result":"success", "url":ShortLink.getShortUrl(url)}))
     self.response.out.write("\r\n")
Exemplo n.º 5
0
def importantPasteByModule():
    module_name = request.args.get('moduleName')

    all_content = []
    paste_date = []
    paste_linenum = []
    all_path = []

    for path in getPastebyType(r_serv_db, module_name):
        all_path.append(path)
        paste = Paste.Paste(path)
        content = paste.get_p_content().decode('utf8', 'ignore')
        content_range = max_preview_char if len(
            content) > max_preview_char else len(content) - 1
        all_content.append(content[0:content_range])
        curr_date = str(paste._get_p_date())
        curr_date = curr_date[0:4] + '/' + curr_date[4:6] + '/' + curr_date[6:]
        paste_date.append(curr_date)
        paste_linenum.append(paste.get_lines_info()[0])

    return render_template("important_paste_by_module.html",
                           all_path=all_path,
                           content=all_content,
                           paste_date=paste_date,
                           paste_linenum=paste_linenum,
                           char_to_display=max_preview_modal)
Exemplo n.º 6
0
def terms_management_query_paste():
    term =  request.args.get('term')
    paste_info = []

    # check if regex or not
    if term.startswith('/') and term.endswith('/'):
        set_paste_name = "regex_" + term
        track_list_path = r_serv_term.smembers(set_paste_name)
    elif term.startswith('\\') and term.endswith('\\'):
        set_paste_name = "set_" + term
        track_list_path = r_serv_term.smembers(set_paste_name)
    else:
        set_paste_name = "tracked_" + term
        track_list_path = r_serv_term.smembers(set_paste_name)

    for path in track_list_path:
        paste = Paste.Paste(path)
        p_date = str(paste._get_p_date())
        p_date = p_date[0:4]+'/'+p_date[4:6]+'/'+p_date[6:8]
        p_source = paste.p_source
        p_size = paste.p_size
        p_mime = paste.p_mime
        p_lineinfo = paste.get_lines_info()
        p_content = paste.get_p_content()
        if p_content != 0:
            p_content = p_content[0:400]
        paste_info.append({"path": path, "date": p_date, "source": p_source, "size": p_size, "mime": p_mime, "lineinfo": p_lineinfo, "content": p_content})

    return jsonify(paste_info)
Exemplo n.º 7
0
def terms_management_query_paste():
    term = request.args.get('term')
    TrackedTermsSet_Name = "TrackedSetTermSet"
    paste_info = []

    set_paste_name = "tracked_" + term
    track_list_path = r_serv_term.smembers(set_paste_name)

    for path in track_list_path:
        paste = Paste.Paste(path)
        p_date = str(paste._get_p_date())
        p_date = p_date[6:] + '/' + p_date[4:6] + '/' + p_date[0:4]
        p_source = paste.p_source
        p_encoding = paste._get_p_encoding()
        p_size = paste.p_size
        p_mime = paste.p_mime
        p_lineinfo = paste.get_lines_info()
        p_content = paste.get_p_content().decode('utf-8', 'ignore')
        if p_content != 0:
            p_content = p_content[0:400]
        paste_info.append({
            "path": path,
            "date": p_date,
            "source": p_source,
            "encoding": p_encoding,
            "size": p_size,
            "mime": p_mime,
            "lineinfo": p_lineinfo,
            "content": p_content
        })

    return jsonify(paste_info)
Exemplo n.º 8
0
def SuperMainCode():
    while True:
        OperationChoice = input("Выберите операцию. Если вы не прочитали инструкции, введите \'--help \'. Если хотите выйти, введите \'exit\':\t\t")

        if OperationChoice == "--help":
            Functions.Info()
        elif OperationChoice == "cls":
            Functions.ClearConsole()
        elif OperationChoice == "exit":
            Functions.Exit()
            break
            
        elif OperationChoice == "1" or OperationChoice == "copy":
            Paste.CopyPaste()

        elif OperationChoice == "2" or OperationChoice == "create":
            CreateNew.CreateNewFile()

        elif OperationChoice == "3" or OperationChoice == "edit":
            EditFile.EditFile()

        elif OperationChoice == "4" or OperationChoice == "rename":
            RenameFile.RenameFile()

        elif OperationChoice == "5" or OperationChoice == "remove":
            RemoveFile.RemoveFile()
Exemplo n.º 9
0
def credentials_management_query_paste():
    cred = request.args.get('cred')
    allPath = request.json['allPath']

    paste_info = []
    for pathNum in allPath:
        path = r_serv_cred.hget(REDIS_KEY_ALL_PATH_SET_REV, pathNum)
        paste = Paste.Paste(path)
        p_date = str(paste._get_p_date())
        p_date = p_date[6:] + '/' + p_date[4:6] + '/' + p_date[0:4]
        p_source = paste.p_source
        p_encoding = paste._get_p_encoding()
        p_size = paste.p_size
        p_mime = paste.p_mime
        p_lineinfo = paste.get_lines_info()
        p_content = paste.get_p_content().decode('utf-8', 'ignore')
        if p_content != 0:
            p_content = p_content[0:400]
        paste_info.append({
            "path": path,
            "date": p_date,
            "source": p_source,
            "encoding": p_encoding,
            "size": p_size,
            "mime": p_mime,
            "lineinfo": p_lineinfo,
            "content": p_content
        })

    return jsonify(paste_info)
Exemplo n.º 10
0
def importantPasteByModule():
    module_name = request.args.get('moduleName')

    all_content = []
    paste_date = []
    paste_linenum = []
    all_path = []
    allPastes = getPastebyType(r_serv_db, module_name)

    for path in allPastes[0:10]:
        all_path.append(path)
        paste = Paste.Paste(path)
        content = paste.get_p_content().decode('utf8', 'ignore')
        content_range = max_preview_char if len(
            content) > max_preview_char else len(content) - 1
        all_content.append(content[0:content_range].replace(
            "\"", "\'").replace("\r", " ").replace("\n", " "))
        curr_date = str(paste._get_p_date())
        curr_date = curr_date[0:4] + '/' + curr_date[4:6] + '/' + curr_date[6:]
        paste_date.append(curr_date)
        paste_linenum.append(paste.get_lines_info()[0])

    if len(allPastes) > 10:
        finished = False
    else:
        finished = True

    return render_template("important_paste_by_module.html",
                           moduleName=module_name,
                           all_path=all_path,
                           content=all_content,
                           paste_date=paste_date,
                           paste_linenum=paste_linenum,
                           char_to_display=max_preview_modal,
                           finished=finished)
Exemplo n.º 11
0
def get_more_search_result():
    query = request.form['query']
    q = []
    q.append(query)
    page_offset = int(request.form['page_offset'])
    index_name = request.form['index_name']
    num_elem_to_get = 50

    # select correct index
    if index_name is None or index_name == "0":
        selected_index = get_current_index()
    else:
        selected_index = os.path.join(baseindexpath, index_name)

    path_array = []
    preview_array = []
    date_array = []
    size_array = []

    schema = Schema(title=TEXT(stored=True),
                    path=ID(stored=True),
                    content=TEXT)

    ix = index.open_dir(selected_index)
    with ix.searcher() as searcher:
        query = QueryParser("content", ix.schema).parse(" ".join(q))
        results = searcher.search_page(query, page_offset, num_elem_to_get)
        for x in results:
            path_array.append(x.items()[0][1])
            paste = Paste.Paste(x.items()[0][1])
            content = paste.get_p_content().decode('utf8', 'ignore')
            content_range = max_preview_char if len(
                content) > max_preview_char else len(content) - 1
            preview_array.append(content[0:content_range])
            curr_date = str(paste._get_p_date())
            curr_date = curr_date[0:4] + '/' + curr_date[
                4:6] + '/' + curr_date[6:]
            date_array.append(curr_date)
            size_array.append(paste._get_p_size())
        to_return = {}
        to_return["path_array"] = path_array
        to_return["preview_array"] = preview_array
        to_return["date_array"] = date_array
        to_return["size_array"] = size_array
        if len(path_array) < num_elem_to_get:  #pagelength
            to_return["moreData"] = False
        else:
            to_return["moreData"] = True

    return jsonify(to_return)
Exemplo n.º 12
0
def get_more_search_result():
    query = request.form['query']
    q = []
    q.append(query)
    page_offset = int(request.form['page_offset'])
    num_elem_to_get = 50

    path_array = []
    preview_array = []
    date_array = []
    size_array = []

    from whoosh import index
    from whoosh.fields import Schema, TEXT, ID
    schema = Schema(title=TEXT(stored=True),
                    path=ID(stored=True),
                    content=TEXT)

    indexpath = os.path.join(os.environ['AIL_HOME'],
                             cfg.get("Indexer", "path"))
    ix = index.open_dir(indexpath)
    from whoosh.qparser import QueryParser
    with ix.searcher() as searcher:
        query = QueryParser("content", ix.schema).parse(" ".join(q))
        results = searcher.search_page(query, page_offset, num_elem_to_get)
        for x in results:
            path_array.append(x.items()[0][1])
            paste = Paste.Paste(x.items()[0][1])
            content = paste.get_p_content().decode('utf8', 'ignore')
            content_range = max_preview_char if len(
                content) > max_preview_char else len(content) - 1
            preview_array.append(content[0:content_range])
            curr_date = str(paste._get_p_date())
            curr_date = curr_date[0:4] + '/' + curr_date[
                4:6] + '/' + curr_date[6:]
            date_array.append(curr_date)
            size_array.append(paste._get_p_size())
        to_return = {}
        to_return["path_array"] = path_array
        to_return["preview_array"] = preview_array
        to_return["date_array"] = date_array
        to_return["size_array"] = size_array
        print "len(path_array)=" + str(len(path_array))
        if len(path_array) < num_elem_to_get:  #pagelength
            to_return["moreData"] = False
        else:
            to_return["moreData"] = True

    return jsonify(to_return)
def event_stream_getImportantPasteByModule(module_name, year):
    index = 0
    all_pastes_list = getPastebyType(r_serv_db[year], module_name)
    paste_tags = []

    for path in all_pastes_list:
        index += 1
        paste = Paste.Paste(path)
        content = paste.get_p_content()
        content_range = max_preview_char if len(
            content) > max_preview_char else len(content) - 1
        curr_date = str(paste._get_p_date())
        curr_date = curr_date[0:4] + '/' + curr_date[4:6] + '/' + curr_date[6:]
        p_tags = r_serv_metadata.smembers('tag:' + path)
        l_tags = []
        for tag in p_tags:
            complete_tag = tag.replace('"', '&quot;')
            tag = tag.split('=')
            if len(tag) > 1:
                if tag[1] != '':
                    tag = tag[1][1:-1]
                # no value
                else:
                    tag = tag[0][1:-1]
            # use for custom tags
            else:
                tag = tag[0]

            l_tags.append((tag, complete_tag))

        data = {}
        data["module"] = module_name
        data["index"] = index
        data["path"] = path
        data["content"] = content[0:content_range]
        data["linenum"] = paste.get_lines_info()[0]
        data["date"] = curr_date
        data["l_tags"] = l_tags
        data["bootstrap_label"] = bootstrap_label
        data["char_to_display"] = max_preview_modal
        data["finished"] = True if index == len(all_pastes_list) else False
        yield 'retry: 100000\ndata: %s\n\n' % json.dumps(
            data)  #retry to avoid reconnection of the browser
Exemplo n.º 14
0
def importantPasteByModule():
    module_name = request.args.get('moduleName')

    # # TODO: VERIFY YEAR VALIDITY
    try:
        currentSelectYear = int(request.args.get('year'))
    except:
        print('Invalid year input')
        currentSelectYear = int(datetime.now().year)

    all_content = []
    paste_date = []
    paste_linenum = []
    all_path = []
    allPastes = getPastebyType(r_serv_db[currentSelectYear], module_name)

    for path in allPastes[0:10]:
        all_path.append(path)
        paste = Paste.Paste(path)
        content = paste.get_p_content()
        content_range = max_preview_char if len(
            content) > max_preview_char else len(content) - 1
        all_content.append(content[0:content_range].replace(
            "\"", "\'").replace("\r", " ").replace("\n", " "))
        curr_date = str(paste._get_p_date())
        curr_date = curr_date[0:4] + '/' + curr_date[4:6] + '/' + curr_date[6:]
        paste_date.append(curr_date)
        paste_linenum.append(paste.get_lines_info()[0])

    if len(allPastes) > 10:
        finished = False
    else:
        finished = True

    return render_template("important_paste_by_module.html",
                           moduleName=module_name,
                           year=currentSelectYear,
                           all_path=all_path,
                           content=all_content,
                           paste_date=paste_date,
                           paste_linenum=paste_linenum,
                           char_to_display=max_preview_modal,
                           finished=finished)
Exemplo n.º 15
0
def event_stream_getImportantPasteByModule(module_name, year):
    index = 0
    all_pastes_list = getPastebyType(r_serv_db[year], module_name)
    for path in all_pastes_list:
        index += 1
        paste = Paste.Paste(path)
        content = paste.get_p_content().decode('utf8', 'ignore')
        content_range = max_preview_char if len(
            content) > max_preview_char else len(content) - 1
        curr_date = str(paste._get_p_date())
        curr_date = curr_date[0:4] + '/' + curr_date[4:6] + '/' + curr_date[6:]
        data = {}
        data["module"] = module_name
        data["index"] = index
        data["path"] = path
        data["content"] = content[0:content_range]
        data["linenum"] = paste.get_lines_info()[0]
        data["date"] = curr_date
        data["char_to_display"] = max_preview_modal
        data["finished"] = True if index == len(all_pastes_list) else False
        yield 'retry: 100000\ndata: %s\n\n' % json.dumps(
            data)  #retry to avoid reconnection of the browser
Exemplo n.º 16
0
def Tags_page():
    date_from = request.args.get('date_from')
    date_to = request.args.get('date_to')
    tags = request.args.get('ltags')

    if tags is None:
        dates = get_all_dates_range(date_from, date_to)
        return render_template("Tags.html",
                               date_from=dates['date_from'],
                               date_to=dates['date_to'])

    # unpack tags
    list_tags = tags.split(',')
    list_tag = []
    for tag in list_tags:
        list_tag.append(tag.replace('"', '\"'))

    #no search by date, use last_seen for  date_from/date_to
    if date_from is None and date_to is None and tags is not None:
        date_from = get_last_seen_from_tags_list(list_tags)
        date_to = date_from

    # TODO verify input

    dates = get_all_dates_range(date_from, date_to)

    if (type(list_tags) is list):
        # no tag
        if list_tags is False:
            print('empty')
        # 1 tag
        elif len(list_tags) < 2:
            tagged_pastes = []
            for date in dates['date_range']:
                tagged_pastes.extend(
                    r_serv_tags.smembers('{}:{}'.format(list_tags[0], date)))

        # 2 tags or more
        else:
            tagged_pastes = []
            for date in dates['date_range']:
                tag_keys = []
                for tag in list_tags:
                    tag_keys.append('{}:{}'.format(tag, date))

                if len(tag_keys) > 1:
                    daily_items = r_serv_tags.sinter(tag_keys[0],
                                                     *tag_keys[1:])
                else:
                    daily_items = r_serv_tags.sinter(tag_keys[0])
                tagged_pastes.extend(daily_items)

    else:
        return 'INCORRECT INPUT'

    all_content = []
    paste_date = []
    paste_linenum = []
    all_path = []
    allPastes = list(tagged_pastes)
    paste_tags = []

    try:
        page = int(request.args.get('page'))
    except:
        page = 1
    if page <= 0:
        page = 1
    nb_page_max = len(tagged_pastes) / (max_tags_result)
    if not nb_page_max.is_integer():
        nb_page_max = int(nb_page_max) + 1
    else:
        nb_page_max = int(nb_page_max)
    if page > nb_page_max:
        page = nb_page_max
    start = max_tags_result * (page - 1)
    stop = max_tags_result * page

    for path in allPastes[start:stop]:
        all_path.append(path)
        paste = Paste.Paste(path)
        content = paste.get_p_content()
        content_range = max_preview_char if len(
            content) > max_preview_char else len(content) - 1
        all_content.append(content[0:content_range].replace(
            "\"", "\'").replace("\r", " ").replace("\n", " "))
        curr_date = str(paste._get_p_date())
        curr_date = curr_date[0:4] + '/' + curr_date[4:6] + '/' + curr_date[6:]
        paste_date.append(curr_date)
        paste_linenum.append(paste.get_lines_info()[0])
        p_tags = r_serv_metadata.smembers('tag:' + path)
        complete_tags = []
        l_tags = []
        for tag in p_tags:
            complete_tag = tag

            tag = tag.split('=')
            if len(tag) > 1:
                if tag[1] != '':
                    tag = tag[1][1:-1]
                # no value
                else:
                    tag = tag[0][1:-1]
            # use for custom tags
            else:
                tag = tag[0]

            l_tags.append((tag, complete_tag))

        paste_tags.append(l_tags)

    if len(allPastes) > 10:
        finished = False
    else:
        finished = True

    if len(list_tag) == 1:
        tag_nav = tags.replace('"', '').replace('=', '').replace(':', '')
    else:
        tag_nav = 'empty'

    return render_template("Tags.html",
                           all_path=all_path,
                           tags=tags,
                           tag_nav=tag_nav,
                           list_tag=list_tag,
                           date_from=dates['date_from'],
                           date_to=dates['date_to'],
                           page=page,
                           nb_page_max=nb_page_max,
                           paste_tags=paste_tags,
                           bootstrap_label=bootstrap_label,
                           content=all_content,
                           paste_date=paste_date,
                           paste_linenum=paste_linenum,
                           char_to_display=max_preview_modal,
                           finished=finished)
Exemplo n.º 17
0
def getmoredata():
    requested_path = request.args.get('paste', '')
    paste = Paste.Paste(requested_path)
    p_content = paste.get_p_content()
    to_return = p_content[max_preview_modal - 1:]
    return to_return
Exemplo n.º 18
0
def showsavedrawpaste():
    requested_path = request.args.get('paste', '')
    paste = Paste.Paste(requested_path)
    content = paste.get_p_content()
    return Response(content, mimetype='text/plain')
Exemplo n.º 19
0
def show_item_min(requested_path, content_range=0):
    relative_path = None
    if PASTES_FOLDER not in requested_path:
        relative_path = requested_path
        requested_path = os.path.join(PASTES_FOLDER, requested_path)
    else:
        relative_path = requested_path.replace(PASTES_FOLDER, '', 1)
    # remove old full path
    #requested_path = requested_path.replace(PASTES_FOLDER, '')
    # escape directory transversal
    if os.path.commonprefix(
        (os.path.realpath(requested_path), PASTES_FOLDER)) != PASTES_FOLDER:
        return 'path transversal detected'

    item_info = {}

    paste = Paste.Paste(requested_path)
    item_basic_info = get_item_basic_info(paste)
    item_info['nb_duplictates'] = paste.get_nb_duplicate()

    ## TODO: use this for fix ?
    item_content = paste.get_p_content()
    char_to_display = len(item_content)
    if content_range != 0:
        item_content = item_content[0:content_range]

    vt_enabled = Flask_config.vt_enabled

    p_hashtype_list = []

    print(requested_path)
    l_tags = r_serv_metadata.smembers('tag:' + relative_path)
    if relative_path is not None:
        l_tags.union(r_serv_metadata.smembers('tag:' + relative_path))
    item_info['tags'] = l_tags
    item_info['name'] = relative_path.replace('/', ' / ')

    l_64 = []
    # load hash files
    if r_serv_metadata.scard('hash_paste:' + relative_path) > 0:
        set_b64 = r_serv_metadata.smembers('hash_paste:' + relative_path)
        for hash in set_b64:
            nb_in_file = r_serv_metadata.zscore('nb_seen_hash:' + hash,
                                                relative_path)
            # item list not updated
            if nb_in_file is None:
                l_pastes = r_serv_metadata.zrange('nb_seen_hash:' + hash, 0,
                                                  -1)
                for paste_name in l_pastes:
                    # dynamic update
                    if PASTES_FOLDER in paste_name:
                        score = r_serv_metadata.zscore(
                            'nb_seen_hash:{}'.format(hash), paste_name)
                        r_serv_metadata.zrem('nb_seen_hash:{}'.format(hash),
                                             paste_name)
                        paste_name = paste_name.replace(PASTES_FOLDER, '', 1)
                        r_serv_metadata.zadd('nb_seen_hash:{}'.format(hash),
                                             score, paste_name)
                nb_in_file = r_serv_metadata.zscore(
                    'nb_seen_hash:{}'.format(hash), relative_path)
            nb_in_file = int(nb_in_file)
            estimated_type = r_serv_metadata.hget('metadata_hash:' + hash,
                                                  'estimated_type')
            file_type = estimated_type.split('/')[0]
            # set file icon
            if file_type == 'application':
                file_icon = 'fa-file '
            elif file_type == 'audio':
                file_icon = 'fa-file-video '
            elif file_type == 'image':
                file_icon = 'fa-file-image'
            elif file_type == 'text':
                file_icon = 'fa-file-alt'
            else:
                file_icon = 'fa-file'
            saved_path = r_serv_metadata.hget('metadata_hash:' + hash,
                                              'saved_path')
            if r_serv_metadata.hexists('metadata_hash:' + hash, 'vt_link'):
                b64_vt = True
                b64_vt_link = r_serv_metadata.hget('metadata_hash:' + hash,
                                                   'vt_link')
                b64_vt_report = r_serv_metadata.hget('metadata_hash:' + hash,
                                                     'vt_report')
            else:
                b64_vt = False
                b64_vt_link = ''
                b64_vt_report = r_serv_metadata.hget('metadata_hash:' + hash,
                                                     'vt_report')
                # hash never refreshed
                if b64_vt_report is None:
                    b64_vt_report = ''

            l_64.append((file_icon, estimated_type, hash, saved_path,
                         nb_in_file, b64_vt, b64_vt_link, b64_vt_report))

    crawler_metadata = {}
    if 'infoleak:submission="crawler"' in l_tags:
        crawler_metadata['get_metadata'] = True
        crawler_metadata['domain'] = r_serv_metadata.hget(
            'paste_metadata:' + relative_path, 'domain')
        crawler_metadata['paste_father'] = r_serv_metadata.hget(
            'paste_metadata:' + relative_path, 'father')
        crawler_metadata['real_link'] = r_serv_metadata.hget(
            'paste_metadata:' + relative_path, 'real_link')
        crawler_metadata['screenshot'] = get_item_screenshot_path(
            relative_path)
    else:
        crawler_metadata['get_metadata'] = False

    misp_event = r_serv_metadata.get('misp_events:' + requested_path)
    if misp_event is None:
        misp_eventid = False
        misp_url = ''
    else:
        misp_eventid = True
        misp_url = misp_event_url + misp_event

    hive_case = r_serv_metadata.get('hive_cases:' + requested_path)
    if hive_case is None:
        hive_caseid = False
        hive_url = ''
    else:
        hive_caseid = True
        hive_url = hive_case_url.replace('id_here', hive_case)

    return render_template("show_saved_item_min.html",
                           bootstrap_label=bootstrap_label,
                           content=item_content,
                           item_basic_info=item_basic_info,
                           item_info=item_info,
                           initsize=len(item_content),
                           hashtype_list=p_hashtype_list,
                           crawler_metadata=crawler_metadata,
                           l_64=l_64,
                           vt_enabled=vt_enabled,
                           misp_eventid=misp_eventid,
                           misp_url=misp_url,
                           hive_caseid=hive_caseid,
                           hive_url=hive_url)
Exemplo n.º 20
0
def get_more_search_result():
    query = request.form['query']
    q = []
    q.append(query)
    page_offset = int(request.form['page_offset'])
    index_name = request.form['index_name']
    num_elem_to_get = 50

    # select correct index
    if index_name is None or index_name == "0":
        selected_index = get_current_index()
    else:
        selected_index = os.path.join(baseindexpath, index_name)

    path_array = []
    preview_array = []
    date_array = []
    size_array = []
    list_tags = []

    schema = Schema(title=TEXT(stored=True), path=ID(stored=True), content=TEXT)

    ix = index.open_dir(selected_index)
    with ix.searcher() as searcher:
        query = QueryParser("content", ix.schema).parse(" ".join(q))
        results = searcher.search_page(query, page_offset, num_elem_to_get)
        for x in results:
            path = x.items()[0][1]
            path = path.replace(PASTES_FOLDER, '', 1)
            path_array.append(path)
            paste = Paste.Paste(path)
            content = paste.get_p_content()
            content_range = max_preview_char if len(content)>max_preview_char else len(content)-1
            preview_array.append(content[0:content_range])
            curr_date = str(paste._get_p_date())
            curr_date = curr_date[0:4]+'/'+curr_date[4:6]+'/'+curr_date[6:]
            date_array.append(curr_date)
            size_array.append(paste._get_p_size())
            p_tags = r_serv_metadata.smembers('tag:'+path)
            l_tags = []
            for tag in p_tags:
                complete_tag = tag
                tag = tag.split('=')
                if len(tag) > 1:
                    if tag[1] != '':
                        tag = tag[1][1:-1]
                    # no value
                    else:
                        tag = tag[0][1:-1]
                # use for custom tags
                else:
                    tag = tag[0]

                l_tags.append( (tag, complete_tag) )
            list_tags.append(l_tags)

        to_return = {}
        to_return["path_array"] = path_array
        to_return["preview_array"] = preview_array
        to_return["date_array"] = date_array
        to_return["size_array"] = size_array
        to_return["list_tags"] = list_tags
        to_return["bootstrap_label"] = bootstrap_label
        if len(path_array) < num_elem_to_get: #pagelength
            to_return["moreData"] = False
        else:
            to_return["moreData"] = True

    return jsonify(to_return)
Exemplo n.º 21
0
def search():
    query = request.form['query']
    q = []
    q.append(query)
    r = [] #complete path
    c = [] #preview of the paste content
    paste_date = []
    paste_size = []
    paste_tags = []
    index_name = request.form['index_name']
    num_elem_to_get = 50

    # select correct index
    if index_name is None or index_name == "0":
        selected_index = get_current_index()
    else:
        selected_index = os.path.join(baseindexpath, index_name)

    ''' temporary disabled
    # Search filename
    for path in r_serv_pasteName.smembers(q[0]):
        r.append(path)
        paste = Paste.Paste(path)
        content = paste.get_p_content()
        content_range = max_preview_char if len(content)>max_preview_char else len(content)-1
        c.append(content[0:content_range])
        curr_date = str(paste._get_p_date())
        curr_date = curr_date[0:4]+'/'+curr_date[4:6]+'/'+curr_date[6:]
        paste_date.append(curr_date)
        paste_size.append(paste._get_p_size())
    '''

    # Search full line
    schema = Schema(title=TEXT(stored=True), path=ID(stored=True), content=TEXT)

    ix = index.open_dir(selected_index)
    with ix.searcher() as searcher:
        query = QueryParser("content", ix.schema).parse("".join(q))
        results = searcher.search_page(query, 1, pagelen=num_elem_to_get)
        for x in results:
            r.append(x.items()[0][1].replace(PASTES_FOLDER, '', 1))
            path = x.items()[0][1].replace(PASTES_FOLDER, '', 1)
            paste = Paste.Paste(path)
            content = paste.get_p_content()
            content_range = max_preview_char if len(content)>max_preview_char else len(content)-1
            c.append(content[0:content_range])
            curr_date = str(paste._get_p_date())
            curr_date = curr_date[0:4]+'/'+curr_date[4:6]+'/'+curr_date[6:]
            paste_date.append(curr_date)
            paste_size.append(paste._get_p_size())
            p_tags = r_serv_metadata.smembers('tag:'+path)
            l_tags = []
            for tag in p_tags:
                complete_tag = tag
                tag = tag.split('=')
                if len(tag) > 1:
                    if tag[1] != '':
                        tag = tag[1][1:-1]
                    # no value
                    else:
                        tag = tag[0][1:-1]
                # use for custom tags
                else:
                    tag = tag[0]

                l_tags.append( (tag, complete_tag) )

            paste_tags.append(l_tags)
        results = searcher.search(query)
        num_res = len(results)

    index_list = get_index_list()

    index_min = 1
    index_max = len(index_list)

    return render_template("search.html", r=r, c=c,
            query=request.form['query'], paste_date=paste_date,
            paste_size=paste_size, char_to_display=max_preview_modal,
            num_res=num_res, index_min=index_min, index_max=index_max,
            bootstrap_label=bootstrap_label,
            paste_tags=paste_tags,
            index_list=index_list
           )
Exemplo n.º 22
0
def misp_create_event(distribution, threat_level_id, analysis, info, l_tags,
                      publish, path):

    paste = Paste.Paste(path)
    source = path.split('/')[-6:]
    source = '/'.join(source)[:-3]
    ail_uuid = r_serv_db.get('ail:uuid')
    pseudofile = BytesIO(paste.get_p_content().encode())

    temp = paste._get_p_duplicate()

    #beautifier
    if not temp:
        temp = ''

    p_duplicate_number = len(temp) if len(temp) >= 0 else 0

    to_ret = ""
    for dup in temp[:10]:
        dup = dup.replace('\'', '\"').replace('(', '[').replace(')', ']')
        dup = json.loads(dup)
        algo = dup[0]
        path = dup[1].split('/')[-6:]
        path = '/'.join(path)[:-3]  # -3 removes .gz
        if algo == 'tlsh':
            perc = 100 - int(dup[2])
        else:
            perc = dup[2]
        to_ret += "{}: {} [{}%]\n".format(path, algo, perc)
    p_duplicate = to_ret

    today = datetime.date.today()
    # [0-3]
    if publish == 'True':
        published = True
    else:
        published = False
    org_id = None
    orgc_id = None
    sharing_group_id = None
    date = today
    event = pymisp.new_event(distribution, threat_level_id, analysis, info,
                             date, published, orgc_id, org_id,
                             sharing_group_id)
    eventUuid = event['Event']['uuid']
    eventid = event['Event']['id']

    r_serv_metadata.set('misp_events:' + path, eventid)

    # add tags
    for tag in l_tags:
        pymisp.tag(eventUuid, tag)

    # create attributes
    obj_name = 'ail-leak'
    leak_obj = MISPObject(obj_name)
    leak_obj.add_attribute('sensor', value=ail_uuid, type="text")
    leak_obj.add_attribute('origin', value=source, type='text')
    leak_obj.add_attribute('last-seen',
                           value=date_to_str(paste.p_date),
                           type='datetime')
    leak_obj.add_attribute('raw-data',
                           value=source,
                           data=pseudofile,
                           type="attachment")

    if p_duplicate_number > 0:
        leak_obj.add_attribute('duplicate', value=p_duplicate, type='text')
        leak_obj.add_attribute('duplicate_number',
                               value=p_duplicate_number,
                               type='counter')

    try:
        templateID = [
            x['ObjectTemplate']['id']
            for x in pymisp.get_object_templates_list()['response']
            if x['ObjectTemplate']['name'] == obj_name
        ][0]
    except IndexError:
        valid_types = ", ".join([
            x['ObjectTemplate']['name']
            for x in pymisp.get_object_templates_list()
        ])
        print("Template for type {} not found! Valid types are: {%s}".format(
            obj_name, valid_types))
    r = pymisp.add_object(eventid, templateID, leak_obj)
    if 'errors' in r:
        print(r)
        return False
    else:
        event_url = misp_event_url + eventid
        return event_url
Exemplo n.º 23
0
def importantPasteByModule():
    module_name = request.args.get('moduleName')

    # # TODO: VERIFY YEAR VALIDITY
    try:
        currentSelectYear = int(request.args.get('year'))
    except:
        print('Invalid year input')
        currentSelectYear = int(datetime.now().year)

    all_content = []
    paste_date = []
    paste_linenum = []
    all_path = []
    paste_tags = []
    allPastes = getPastebyType(r_serv_db[currentSelectYear], module_name)

    for path in allPastes[0:10]:
        all_path.append(path)
        paste = Paste.Paste(path)
        content = paste.get_p_content()
        content_range = max_preview_char if len(content)>max_preview_char else len(content)-1
        all_content.append(content[0:content_range].replace("\"", "\'").replace("\r", " ").replace("\n", " "))
        curr_date = str(paste._get_p_date())
        curr_date = curr_date[0:4]+'/'+curr_date[4:6]+'/'+curr_date[6:]
        paste_date.append(curr_date)
        paste_linenum.append(paste.get_lines_info()[0])
        p_tags = r_serv_metadata.smembers('tag:'+path)
        l_tags = []
        for tag in p_tags:
            complete_tag = tag
            tag = tag.split('=')
            if len(tag) > 1:
                if tag[1] != '':
                    tag = tag[1][1:-1]
                # no value
                else:
                    tag = tag[0][1:-1]
            # use for custom tags
            else:
                tag = tag[0]

            l_tags.append( (tag, complete_tag) )

        paste_tags.append(l_tags)

    if len(allPastes) > 10:
        finished = False
    else:
        finished = True

    return render_template("important_paste_by_module.html",
            moduleName=module_name,
            year=currentSelectYear,
            all_path=all_path,
            content=all_content,
            paste_date=paste_date,
            paste_linenum=paste_linenum,
            char_to_display=max_preview_modal,
            paste_tags=paste_tags,
            bootstrap_label=bootstrap_label,
            finished=finished)
Exemplo n.º 24
0
def showpaste(content_range, requested_path):
    if PASTES_FOLDER not in requested_path:
        # remove full path
        requested_path_full = os.path.join(requested_path, PASTES_FOLDER)
    else:
        requested_path_full = requested_path
        requested_path = requested_path.replace(PASTES_FOLDER, '', 1)

    # escape directory transversal
    if os.path.commonprefix(
        (requested_path_full, PASTES_FOLDER)) != PASTES_FOLDER:
        return 'path transversal detected'

    vt_enabled = Flask_config.vt_enabled

    paste = Paste.Paste(requested_path)
    p_date = str(paste._get_p_date())
    p_date = p_date[6:] + '/' + p_date[4:6] + '/' + p_date[0:4]
    p_source = paste.p_source
    p_encoding = paste._get_p_encoding()
    p_language = 'None'
    p_size = paste.p_size
    p_mime = paste.p_mime
    p_lineinfo = paste.get_lines_info()
    p_content = paste.get_p_content()
    p_duplicate_str_full_list = paste._get_p_duplicate()

    p_duplicate_full_list = []
    p_duplicate_list = []
    p_simil_list = []
    p_date_list = []
    p_hashtype_list = []

    for dup_list in p_duplicate_str_full_list:
        dup_list = dup_list[1:-1].replace('\'', '').replace(' ', '').split(',')
        if dup_list[0] == "tlsh":
            dup_list[2] = 100 - int(dup_list[2])
        else:
            dup_list[2] = int(dup_list[2])
        p_duplicate_full_list.append(dup_list)

    #p_duplicate_full_list.sort(lambda x,y: cmp(x[2], y[2]), reverse=True)

    # Combine multiple duplicate paste name and format for display
    new_dup_list = []
    dup_list_removed = []
    for dup_list_index in range(0, len(p_duplicate_full_list)):
        if dup_list_index in dup_list_removed:
            continue
        indices = [
            i for i, x in enumerate(p_duplicate_full_list)
            if x[1] == p_duplicate_full_list[dup_list_index][1]
        ]
        hash_types = []
        comp_vals = []
        for i in indices:
            hash_types.append(p_duplicate_full_list[i][0])
            comp_vals.append(p_duplicate_full_list[i][2])
            dup_list_removed.append(i)

        #hash_types = str(hash_types).replace("[","").replace("]","") if len(hash_types)==1 else str(hash_types)
        #comp_vals = str(comp_vals).replace("[","").replace("]","") if len(comp_vals)==1 else str(comp_vals)

        if len(p_duplicate_full_list[dup_list_index]) > 3:
            try:
                date_paste = str(int(p_duplicate_full_list[dup_list_index][3]))
                date_paste = date_paste[0:4] + "-" + date_paste[
                    4:6] + "-" + date_paste[6:8]
            except ValueError:
                date_paste = str(p_duplicate_full_list[dup_list_index][3])
        else:
            date_paste = "No date available"
        new_dup_list.append([
            hash_types, p_duplicate_full_list[dup_list_index][1], comp_vals,
            date_paste
        ])

    # Create the list to pass to the webpage
    for dup_list in new_dup_list:
        hash_type, path, simil_percent, date_paste = dup_list
        p_duplicate_list.append(path)
        p_simil_list.append(simil_percent)
        p_hashtype_list.append(hash_type)
        p_date_list.append(date_paste)

    if content_range != 0:
        p_content = p_content[0:content_range]

    #active taxonomies
    active_taxonomies = r_serv_tags.smembers('active_taxonomies')

    l_tags = r_serv_metadata.smembers('tag:' + requested_path)

    #active galaxies
    active_galaxies = r_serv_tags.smembers('active_galaxies')

    list_tags = []

    for tag in l_tags:
        if (tag[9:28] == 'automatic-detection'):
            automatic = True
        else:
            automatic = False

        if r_serv_statistics.sismember('tp:' + tag, requested_path):
            tag_status_tp = True
        else:
            tag_status_tp = False
        if r_serv_statistics.sismember('fp:' + tag, requested_path):
            tag_status_fp = True
        else:
            tag_status_fp = False

        list_tags.append((tag, automatic, tag_status_tp, tag_status_fp))

    l_64 = []
    # load hash files
    if r_serv_metadata.scard('hash_paste:' + requested_path) > 0:
        set_b64 = r_serv_metadata.smembers('hash_paste:' + requested_path)
        for hash in set_b64:
            nb_in_file = r_serv_metadata.zscore('nb_seen_hash:' + hash,
                                                requested_path)
            # item list not updated
            if nb_in_file is None:
                l_pastes = r_serv_metadata.zrange('nb_seen_hash:' + hash, 0,
                                                  -1)
                for paste_name in l_pastes:
                    # dynamic update
                    if PASTES_FOLDER in paste_name:
                        score = r_serv_metadata.zscore(
                            'nb_seen_hash:{}'.format(hash), paste_name)
                        r_serv_metadata.zrem('nb_seen_hash:{}'.format(hash),
                                             paste_name)
                        paste_name = paste_name.replace(PASTES_FOLDER, '', 1)
                        r_serv_metadata.zadd('nb_seen_hash:{}'.format(hash),
                                             score, paste_name)
                nb_in_file = r_serv_metadata.zscore('nb_seen_hash:' + hash,
                                                    requested_path)
            nb_in_file = int(nb_in_file)
            estimated_type = r_serv_metadata.hget('metadata_hash:' + hash,
                                                  'estimated_type')
            file_type = estimated_type.split('/')[0]
            # set file icon
            if file_type == 'application':
                file_icon = 'fa-file-o '
            elif file_type == 'audio':
                file_icon = 'fa-file-video-o '
            elif file_type == 'image':
                file_icon = 'fa-file-image-o'
            elif file_type == 'text':
                file_icon = 'fa-file-text-o'
            else:
                file_icon = 'fa-file'
            saved_path = r_serv_metadata.hget('metadata_hash:' + hash,
                                              'saved_path')
            if r_serv_metadata.hexists('metadata_hash:' + hash, 'vt_link'):
                b64_vt = True
                b64_vt_link = r_serv_metadata.hget('metadata_hash:' + hash,
                                                   'vt_link')
                b64_vt_report = r_serv_metadata.hget('metadata_hash:' + hash,
                                                     'vt_report')
            else:
                b64_vt = False
                b64_vt_link = ''
                b64_vt_report = r_serv_metadata.hget('metadata_hash:' + hash,
                                                     'vt_report')
                # hash never refreshed
                if b64_vt_report is None:
                    b64_vt_report = ''

            l_64.append((file_icon, estimated_type, hash, saved_path,
                         nb_in_file, b64_vt, b64_vt_link, b64_vt_report))

    crawler_metadata = {}
    if 'infoleak:submission="crawler"' in l_tags:
        crawler_metadata['get_metadata'] = True
        crawler_metadata['domain'] = r_serv_metadata.hget(
            'paste_metadata:' + requested_path, 'domain')
        crawler_metadata['paste_father'] = r_serv_metadata.hget(
            'paste_metadata:' + requested_path, 'father')
        crawler_metadata['real_link'] = r_serv_metadata.hget(
            'paste_metadata:' + requested_path, 'real_link')
        crawler_metadata['screenshot'] = get_item_screenshot_path(
            requested_path)
    else:
        crawler_metadata['get_metadata'] = False

    if Flask_config.pymisp is False:
        misp = False
    else:
        misp = True

    if Flask_config.HiveApi is False:
        hive = False
    else:
        hive = True

    misp_event = r_serv_metadata.get('misp_events:' + requested_path)
    if misp_event is None:
        misp_eventid = False
        misp_url = ''
    else:
        misp_eventid = True
        misp_url = misp_event_url + misp_event

    hive_case = r_serv_metadata.get('hive_cases:' + requested_path)
    if hive_case is None:
        hive_caseid = False
        hive_url = ''
    else:
        hive_caseid = True
        hive_url = hive_case_url.replace('id_here', hive_case)

    return render_template("show_saved_paste.html",
                           date=p_date,
                           bootstrap_label=bootstrap_label,
                           active_taxonomies=active_taxonomies,
                           active_galaxies=active_galaxies,
                           list_tags=list_tags,
                           source=p_source,
                           encoding=p_encoding,
                           language=p_language,
                           size=p_size,
                           mime=p_mime,
                           lineinfo=p_lineinfo,
                           content=p_content,
                           initsize=len(p_content),
                           duplicate_list=p_duplicate_list,
                           simil_list=p_simil_list,
                           hashtype_list=p_hashtype_list,
                           date_list=p_date_list,
                           crawler_metadata=crawler_metadata,
                           l_64=l_64,
                           vt_enabled=vt_enabled,
                           misp=misp,
                           hive=hive,
                           misp_eventid=misp_eventid,
                           misp_url=misp_url,
                           hive_caseid=hive_caseid,
                           hive_url=hive_url)
def showpaste(content_range):
    requested_path = request.args.get('paste', '')
    paste = Paste.Paste(requested_path)
    p_date = str(paste._get_p_date())
    p_date = p_date[6:]+'/'+p_date[4:6]+'/'+p_date[0:4]
    p_source = paste.p_source
    p_encoding = paste._get_p_encoding()
    p_language = paste._get_p_language()
    p_size = paste.p_size
    p_mime = paste.p_mime
    p_lineinfo = paste.get_lines_info()
    p_content = paste.get_p_content()
    p_duplicate_str_full_list = paste._get_p_duplicate()

    p_duplicate_full_list = []
    p_duplicate_list = []
    p_simil_list = []
    p_date_list = []
    p_hashtype_list = []


    for dup_list in p_duplicate_str_full_list:
        dup_list = dup_list[1:-1].replace('\'', '').replace(' ', '').split(',')
        if dup_list[0] == "tlsh":
            dup_list[2] = 100 - int(dup_list[2])
        else:
            dup_list[2] = int(dup_list[2])
        p_duplicate_full_list.append(dup_list)

    #p_duplicate_full_list.sort(lambda x,y: cmp(x[2], y[2]), reverse=True)

    # Combine multiple duplicate paste name and format for display
    new_dup_list = []
    dup_list_removed = []
    for dup_list_index in range(0, len(p_duplicate_full_list)):
        if dup_list_index in dup_list_removed:
            continue
        indices = [i for i, x in enumerate(p_duplicate_full_list) if x[1] == p_duplicate_full_list[dup_list_index][1]]
        hash_types = []
        comp_vals = []
        for i in indices:
            hash_types.append(p_duplicate_full_list[i][0])
            comp_vals.append(p_duplicate_full_list[i][2])
            dup_list_removed.append(i)

        #hash_types = str(hash_types).replace("[","").replace("]","") if len(hash_types)==1 else str(hash_types)
        #comp_vals = str(comp_vals).replace("[","").replace("]","") if len(comp_vals)==1 else str(comp_vals)

        if len(p_duplicate_full_list[dup_list_index]) > 3:
            try:
                date_paste = str(int(p_duplicate_full_list[dup_list_index][3]))
                date_paste = date_paste[0:4]+"-"+date_paste[4:6]+"-"+date_paste[6:8]
            except ValueError:
                date_paste = str(p_duplicate_full_list[dup_list_index][3])
        else:
            date_paste = "No date available"
        new_dup_list.append([hash_types, p_duplicate_full_list[dup_list_index][1], comp_vals, date_paste])

    # Create the list to pass to the webpage
    for dup_list in new_dup_list:
        hash_type, path, simil_percent, date_paste = dup_list
        p_duplicate_list.append(path)
        p_simil_list.append(simil_percent)
        p_hashtype_list.append(hash_type)
        p_date_list.append(date_paste)

    if content_range != 0:
       p_content = p_content[0:content_range]

    #active taxonomies
    active_taxonomies = r_serv_tags.smembers('active_taxonomies')

    l_tags = r_serv_metadata.smembers('tag:'+requested_path)

    #active galaxies
    active_galaxies = r_serv_tags.smembers('active_galaxies')

    list_tags = []

    for tag in l_tags:
        if(tag[9:28] == 'automatic-detection'):
            automatic = True
        else:
            automatic = False

        tag_hash = ssdeep.hash(tag)
        if r_serv_statistics.sismember('tp:'+tag, requested_path):
            tag_status_tp = True
        else:
            tag_status_tp = False
        if r_serv_statistics.sismember('fp:'+tag, requested_path):
            tag_status_fp = True
        else:
            tag_status_fp = False

        list_tags.append( (tag, automatic, tag_status_tp, tag_status_fp) )

    if Flask_config.pymisp is False:
        misp = False
    else:
        misp = True

    if Flask_config.HiveApi is False:
        hive = False
    else:
        hive = True

    misp_event = r_serv_metadata.get('misp_events:' + requested_path)
    if misp_event is None:
        misp_eventid = False
        misp_url = ''
    else:
        misp_eventid = True
        misp_url = misp_event_url + misp_event

    hive_case = r_serv_metadata.get('hive_cases:' + requested_path)
    if hive_case is None:
        hive_caseid = False
        hive_url = ''
    else:
        hive_caseid = True
        hive_url = hive_case_url.replace('id_here', hive_case)

    return render_template("show_saved_paste.html", date=p_date, bootstrap_label=bootstrap_label, active_taxonomies=active_taxonomies, active_galaxies=active_galaxies, list_tags=list_tags, source=p_source, encoding=p_encoding, language=p_language, size=p_size, mime=p_mime, lineinfo=p_lineinfo, content=p_content, initsize=len(p_content), duplicate_list = p_duplicate_list, simil_list = p_simil_list, hashtype_list = p_hashtype_list, date_list=p_date_list,
                            misp=misp, hive=hive, misp_eventid=misp_eventid, misp_url=misp_url, hive_caseid=hive_caseid, hive_url=hive_url)
def showsavedrawpaste():
    requested_path = request.args.get('paste', '')
    paste = Paste.Paste(requested_path)
    content = paste.get_p_content()
    return content, 200, {'Content-Type': 'text/plain'}
Exemplo n.º 27
0
def showpaste(content_range):
    requested_path = request.args.get('paste', '')
    paste = Paste.Paste(requested_path)
    p_date = str(paste._get_p_date())
    p_date = p_date[6:]+'/'+p_date[4:6]+'/'+p_date[0:4]
    p_source = paste.p_source
    p_encoding = paste._get_p_encoding()
    p_language = paste._get_p_language()
    p_size = paste.p_size
    p_mime = paste.p_mime
    p_lineinfo = paste.get_lines_info()
    p_content = paste.get_p_content().decode('utf-8', 'ignore')
    p_duplicate_full_list = json.loads(paste._get_p_duplicate())
    p_duplicate_list = []
    p_simil_list = []
    p_date_list = []
    p_hashtype_list = []


    for dup_list in p_duplicate_full_list:
        if dup_list[0] == "tlsh":
            dup_list[2] = int(((tlsh_to_percent - float(dup_list[2])) / tlsh_to_percent)*100)
        else:
            dup_list[2] = int(dup_list[2])

    p_duplicate_full_list.sort(lambda x,y: cmp(x[2], y[2]), reverse=True)

    # Combine multiple duplicate paste name and format for display
    new_dup_list = []
    dup_list_removed = []
    for dup_list_index in range(0, len(p_duplicate_full_list)):
        if dup_list_index in dup_list_removed:
            continue
        indices = [i for i, x in enumerate(p_duplicate_full_list) if x[1] == p_duplicate_full_list[dup_list_index][1]]
        hash_types = []
        comp_vals = []
        for i in indices:
            hash_types.append(p_duplicate_full_list[i][0].encode('utf8'))
            comp_vals.append(p_duplicate_full_list[i][2])
            dup_list_removed.append(i)

        hash_types = str(hash_types).replace("[","").replace("]","") if len(hash_types)==1 else str(hash_types)
        comp_vals = str(comp_vals).replace("[","").replace("]","") if len(comp_vals)==1 else str(comp_vals)
        if len(p_duplicate_full_list[dup_list_index]) > 3:
            try:
                date_paste = str(int(p_duplicate_full_list[dup_list_index][3]))
                date_paste = date_paste[0:4]+"-"+date_paste[4:6]+"-"+date_paste[6:8]
            except ValueError:
                date_paste = str(p_duplicate_full_list[dup_list_index][3])
        else:
            date_paste = "No date available"
        new_dup_list.append([hash_types.replace("'", ""), p_duplicate_full_list[dup_list_index][1], comp_vals, date_paste])

    # Create the list to pass to the webpage
    for dup_list in new_dup_list:
        hash_type, path, simil_percent, date_paste = dup_list
        p_duplicate_list.append(path)
        p_simil_list.append(simil_percent)
        p_hashtype_list.append(hash_type)
        p_date_list.append(date_paste)

    if content_range != 0:
       p_content = p_content[0:content_range]


    return render_template("show_saved_paste.html", date=p_date, source=p_source, encoding=p_encoding, language=p_language, size=p_size, mime=p_mime, lineinfo=p_lineinfo, content=p_content, initsize=len(p_content), duplicate_list = p_duplicate_list, simil_list = p_simil_list, hashtype_list = p_hashtype_list, date_list=p_date_list)
Exemplo n.º 28
0
def get_tagged_paste():

    tags = request.args.get('ltags')

    list_tags = tags.split(',')
    list_tag = []
    for tag in list_tags:
        list_tag.append(tag.replace('"', '\"'))

    # TODO verify input

    if (type(list_tags) is list):
        # no tag
        if list_tags is False:
            print('empty')
        # 1 tag
        elif len(list_tags) < 2:
            tagged_pastes = r_serv_tags.smembers(list_tags[0])

        # 2 tags or more
        else:
            tagged_pastes = r_serv_tags.sinter(list_tags[0], *list_tags[1:])

    else:
        return 'INCORRECT INPUT'

    #TODO FIXME
    currentSelectYear = int(datetime.now().year)

    all_content = []
    paste_date = []
    paste_linenum = []
    all_path = []
    allPastes = list(tagged_pastes)
    paste_tags = []

    for path in allPastes[0:50]:  ######################moduleName
        all_path.append(path)
        paste = Paste.Paste(path)
        content = paste.get_p_content()
        content_range = max_preview_char if len(
            content) > max_preview_char else len(content) - 1
        all_content.append(content[0:content_range].replace(
            "\"", "\'").replace("\r", " ").replace("\n", " "))
        curr_date = str(paste._get_p_date())
        curr_date = curr_date[0:4] + '/' + curr_date[4:6] + '/' + curr_date[6:]
        paste_date.append(curr_date)
        paste_linenum.append(paste.get_lines_info()[0])
        p_tags = r_serv_metadata.smembers('tag:' + path)
        complete_tags = []
        l_tags = []
        for tag in p_tags:
            complete_tag = tag

            tag = tag.split('=')
            if len(tag) > 1:
                if tag[1] != '':
                    tag = tag[1][1:-1]
                # no value
                else:
                    tag = tag[0][1:-1]
            # use for custom tags
            else:
                tag = tag[0]

            l_tags.append((tag, complete_tag))

        paste_tags.append(l_tags)

    if len(allPastes) > 10:
        finished = False
    else:
        finished = True

    return render_template("tagged.html",
                           year=currentSelectYear,
                           all_path=all_path,
                           tags=tags,
                           list_tag=list_tag,
                           paste_tags=paste_tags,
                           bootstrap_label=bootstrap_label,
                           content=all_content,
                           paste_date=paste_date,
                           paste_linenum=paste_linenum,
                           char_to_display=max_preview_modal,
                           finished=finished)