Example #1
0
def parserDump_b(data, limiter):
    i = 0
    ca = 0
    cb = 0
    tmp = ""
    data_list = []

    while (i < len(data) - 1):
        if (data.find("Duplicate entry") > -1):
            i = data.find("Duplicate entry")
            ca = i
            while (i < len(data) - 1 and data[i] != "'"):
                i += 1
                cb = i
                tmp = block_cutter(data, ca, cb)
                if (tmp == "Duplicate entry '" or tmp == "duplicate entry '"
                        or tmp == ">Duplicate entry '"
                        or tmp == ">duplicate entry '"):

                    i += 1
                    ca = i
                    while (i < len(data) - 1 and data[i] != limiter):
                        i += 1
                    cb = i
                    tmp = block_cutter(data, ca, cb - 1)
                    data_list.append(tmp)
                    print colored(" " + tmp, 'yellow')

                    #return data_list
                    return tmp
            i += 1
Example #2
0
def parserDump(data, mod):
    i = 0
    ca = 0
    cb = 0
    data_list = []

    while (i < len(data) - 1):
        if (data[i] == '(' and data[i + 1] == '^' and data[i + 2] == '#'):
            i += 5
            ca = i
            while (i < len(data) - 2
                   and (data[i] != '(' and data[i + 2] != '#')):
                if (i == data.find("' for key")):
                    break
                if (data[i] == '<'):
                    break
                i += 1
            i -= 1
            cb = i
            tmp = block_cutter(data, ca, cb)
            data_list.append(tmp)

            print colored(" " + tmp, 'yellow')
            if (mod == 1):
                return data_list
        i += 1

    return data_list
Example #3
0
def     s5o (hashstring):

    ca = 0
    cb = 0
    i = 0

    nurl = "http://www.nitrxgen.net/md5db/"+str(hashstring)
    r = requests.get(nurl)
    hash_value = r.text.encode('utf-8')
    if (hash_value == ''):
        nurl = "https://www.google.com/search?q="+str(hashstring)+" plain:"
        r = requests.get(nurl)
        data = r.text.encode('utf-8')
        nurl = "https://www.google.com/search?q=list intext:Hash:"+str(hashstring)+" & intext:Plain:"
        r = requests.get(nurl)
        data += r.text.encode('utf-8')
        while (i < len(data)-1):
                if (data[i] == 'H'):
                    ca = i
                    while (i < len(data)-1 and data[i] != ':'):
                        i += 1
                    cb = i
                    tmp = block_cutter(data, ca, cb)
                    if (tmp == 'Hash:'):
                        while (i < len(data)-1 and data[i] != 'A'):
                            i += 1
                        cb = i
                        found = block_cutter(data, ca, cb)
                        hash_view = block_cutter(found, found.find('<b>')+3, found.find('</b>')-1)
                        if (hash_view == hashstring):
                            hash_value = block_cutter(found, found.find('<b>Plain</b>:')+14, found.find('. A')-1)
                            if (hash_value != ''):
                                return hash_value

                i += 1
    else:
        return hash_value

    return hash_value
Example #4
0
def focpa(url, param):
    burl = ""
    i = 0
    ca = 0
    cb = 0
    tmp = ""

    while (i < len(url)):
        if (url[i] == '?' or url[i] == '&'):
            i += 1
            ca = i
            while (i < len(url) and url[i] != param[len(param) - 1]):
                i += 1
            cb = i
            tmp = block_cutter(url, ca, cb)
            if (tmp == param):
                burl = block_cutter(url, 0, cb)

                return burl
            else:
                i = ca + 1
        i += 1

    return -1