Example #1
0
def button(update,context):
    query=update.callback_query
    query.answer()
    a=query.data
    if a=="unshort":
        unshortener=UnshortenIt()
        uri=unshortener.unshorten(link)
        query.edit_message_text(text="Unshorted url 👇🏼 : \n"+str(uri))
    if a=="short":
        response=s.bitly.short(link)
        query.edit_message_text("Shorted url 👇🏼:\n"+str(response))
Example #2
0
def inlinequery(update,context):
	query = update.inline_query.query
	###for short links#######
	shortlink=s.bitly.short(query)
	#####for unshort link####$#$
	unshortener=UnshortenIt()
	unshortlink=unshortener.unshorten(query)
	
	results=[InlineQueryResultArticle(id=uuid4(),title="short",input_message_content=InputTextMessageContent(shortlink), description="Click to shorten the link"),
                     InlineQueryResultArticle(id=uuid4(),title="unshort",input_message_content=InputTextMessageContent(unshortlink), description="Click to unshort the link")]
	update.inline_query.answer(results)
Example #3
0
def url_unshortener(short_link):
    """
    Расшифровка коротких ссылок

    :param shor_link: короткая ссылка, которую нужно расшифровать.
    :return: возвращает оригинульную ссылку.

    Умеет расшифровывать 300+ сервисов-укорачивателей
    """
    unshortener = UnshortenIt()
    original_url = unshortener.unshorten(short_link)
    return original_url
Example #4
0
class Adfly:

    def __init__(self):
        self.unshorten = UnshortenIt()
        self.session = requests.session()

    def unshorten_adfly_url(self, adfly_url: str):
        spinner = Halo(text='Grabbing url...', spinner='dots')
        spinner.start(text='Getting url...')

        current_link = adfly_url

        while True:
            try:
                url = self.unshorten.unshorten(uri=current_link)
                current_link = url

                if 'adfly' in url:
                    pass
                else:
                    request = self.session.get(url=url)

                    soup = BeautifulSoup(request.text, 'html.parser')

                    if 'redirecting' in request.url:
                        current_link = soup.find_all('p')[2].text.replace('You will be visiting: ', '')

                    spinner.succeed(text='Successfully retrieved url!')

                    return current_link

            except (InvalidURL, MissingSchema, ConnectionError, NotFound, UnshortenFailed) as exception:
                spinner.fail(text=f'Error: {exception}')
                return
def deal_with_error():
    new_ids = []
    unshortener = UnshortenIt(default_timeout=20)
    for line in tqdm(open("data/ira-urls-plus-1.json")):
        d = json.loads(line.strip())
        if "error" in d and d["error"] and d["hostname"] not in ["blackmattersus.com", "blacktolive.org"]:
            try:
                url = unshortener.unshorten(d["url"])
                d["final_url"] = url
                d['hostname'] = get_hostname_from_url(url)
                del d["error"]
            except Exception as e:
                print(d["url"])
        
        new_ids.append(d)
    write2json(new_ids)
Example #6
0
def home(request):
    global data
    data = ''

    global title
    title = ""

    global uri
    uri = ''

    global meta
    meta = ''

    global get_domain
    get_domain = ''

    if request.method == 'POST':
        form = UrlForm(request.POST)

        if form.is_valid():
            data = form.cleaned_data.get('url')
            form.save()
            form = UrlForm()

            unshortener = UnshortenIt()
            uri = unshortener.unshorten(data)

            article = Goose().extract(uri)
            title = article.title
            meta = article.meta_description

            domain = tldextract.extract(uri)

            get_domain = domain.domain + '.' + domain.suffix

    else:
        form = UrlForm()

    return render(
        request, 'unmask/home.html', {
            'form': form,
            'ori_link': uri,
            'title': title,
            'meta': meta,
            'domain': get_domain,
            'data': data
        })
Example #7
0
def get_tweets(username):

    auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_key, access_secret)
    api = tweepy.API(auth)

    #set count to however many tweets you want
    number_of_tweets = 500

    #get tweets
    for tweet in tweepy.Cursor(api.user_timeline,
                               screen_name=username).items(number_of_tweets):
        #create array of tweet information: username, tweet id, date/time, text
        if ("https://t.co/" in tweet.text):
            index = (tweet.text).index('https://t.co')
            t_url = (tweet.text)[index:(index + 24)]
            unshortener = UnshortenIt()
            url = unshortener.unshorten(t_url)
            if ("open.spotify" in url):
                print(url)
Example #8
0
def task(_ids):
    print("{} task starts ... ".format(os.getpid()), len(_ids))
    unshortener = UnshortenIt(default_timeout=20)
    new_ids = []
    for d in tqdm(_ids):
        # if "error" in d and d["error"]:
        #     print(d)
        try:
            d["error"] = False
            if d["short"]:
                # print(d)
                url = unshortener.unshorten(d["url"])
                d["final_url"] = url
                d['hostname'] = get_hostname_from_url(url)
        except Exception as e:
            # print(e); traceback.print_exc(sys.stdout)
            d['error'] = True

        new_ids.append(d)
    write2json(new_ids)

    return new_ids
import csv
import re
from unshortenit import UnshortenIt

unshortener = UnshortenIt()
final = open('locations_coordinates.csv', mode='w')
csv_writer = csv.writer(final, dialect='excel')
with open('locations_link.csv') as csv_file:
    csv_reader = csv.reader(csv_file, delimiter=',')
    for row in csv_reader:
        url = row[1]
        if len(url) == 31 or len(url) == 32:
            url = unshortener.unshorten(row[1])
        if re.search('@(\-?[0-9]+\.[0-9]+),(\-?[0-9]+\.[0-9]+)', url,
                     re.DOTALL):
            location = re.search('@(\-?[0-9]+\.[0-9]+),(\-?[0-9]+\.[0-9]+)',
                                 url, re.DOTALL)
            latitude = location.groups()[0]
            longitude = location.groups()[1]
            csv_writer.writerow([row[0], latitude, longitude])
            print(
                f'Location {row[0]}: latitude={latitude} and longitude = {longitude}'
            )
        elif re.search('q=(\-?[0-9]+\.[0-9]+),(\-?[0-9]+\.[0-9]+)', url,
                       re.DOTALL):
            location = re.search('q=(\-?[0-9]+\.[0-9]+),(\-?[0-9]+\.[0-9]+)',
                                 url, re.DOTALL)
            latitude = location.groups()[0]
            longitude = location.groups()[1]
            csv_writer.writerow([row[0], latitude, longitude])
            print(
def url_decompress(url):
    print("[+] Now we are in URL Decompression Engine")
    r = UnshortenIt().unshorten(url)
    print("[+] Found the Original URL : ", r)
    '''
	check_url(url)
	
def check_url(url):
	'''
    print('Checking The Cyrelic IDN Homograph Attack')
    bad_chars = [
        '\u0430', '\u03F2', '\u0435', '\u043E', '\u0440', '\u0455', '\u0501',
        '\u051B', '\u051D'
    ]
    result = [
        bad_chars[i] for i in range(len(bad_chars)) if bad_chars[i] in url
    ]
    if result:
        msg = '\n[*] Evil URL detected: {}'.format(url)
        msg += '\n[*] Evil characters used: {}'.format(result)
    else:
        msg = '\n[*] Evil URL NOT detected: {}'.format(url)

    print(msg)
    '''
	url_detect(url)

def url_detect(url_detect):
	'''
    RED = '\033[31m'
    BLUE = "\033[34m"
    GREEN = "\033[23m"
    YELLOW = "\033[93m"
    ENDC = "\033[m"
    print(BLUE + "[+] Now we are in the URL Detection Engine" + ENDC)
    scanurl = "https://www.virustotal.com/vtapi/v2/url/scan"
    api_key = "10348d6a56143962c1646c73d805fa09be69469757b1b8e71c446b79356c43cc"
    r_scan = requests.post(scanurl, data={"apikey": api_key, "url": r})
    json_scan_data = json.loads(r_scan.text)
    resource = json_scan_data['scan_id']
    reporturl = "https://www.virustotal.com/vtapi/v2/url/report"
    r_report = requests.post(reporturl,
                             data={
                                 "apikey": api_key,
                                 "resource": resource,
                                 "scan": 1
                             })
    json_report_data = json.loads(r_report.text)

    for i in json_report_data['scans'].keys():
        if json_report_data['scans'][str(i)]['detected'] == True:
            print(RED + "{} detected the INPUT URL as MALICIOUS".format(i) +
                  ENDC)
        else:
            print(GREEN + "{} detected the INPUT URL as SAFE".format(i) + ENDC)
    now_time = time.time()
    print('Time of Completing the EXECUTION {}'.format(now_time))
    tt_exec = now_time - start_time

    # print(json.dumps(json_report_data, indent=4, sort_keys=True))
    print(YELLOW + 'Time Taken in the EXECUTION {:.2f}'.format(tt_exec) + ENDC)
    return json_report_data
"""Code snippets vol-59
   292-Unshorten a URL

   Download all snippets so far:
   https://wp.me/Pa5TU8-1yg
   Blog: stevepython.wordpress.com

Requirements:
pip3 install unshortenit

https://stackoverflow.com/questions/4201062/how-can-i-unshorten-a-url
"""
from unshortenit import UnshortenIt

unsh = UnshortenIt()
uri = unsh.unshorten('https://wp.me/Pa5TU8-2yD')
print(uri)
Example #12
0
def cli(module, url, follow_nested):
    unshortener = UnshortenIt()
    print(unshortener.unshorten(url, module, unshorten_nested=follow_nested))
Example #13
0
def run(chosenhandle, url):

    #frontloading basic tasks to improve efficiency
    urlexpander = UnshortenIt()

    #print("Number of Arguments: " + str(len(sys.argv)) + " arguments")
    #print("Argument List: " + str(sys.argv))

    #import keys from file
    with open("devkeys.txt",
              "rt") as getkeys:  # open file devkeys.txt for reading text data
        keys = getkeys.read()  # read the entire file into a string variable

    #use to see the contents of the key file
    #print("Your Twitter API Developer Keys are:")
    #print(keys)

    keylist = keys.splitlines()

    #use to test that the line split is working correctly
    #print(keylist[0])

    #this is redundant but verbose, consider removing
    apikey = keylist[0]
    apisecret = keylist[1]
    accesskey = keylist[2]
    accesssecret = keylist[3]

    auth = tweepy.OAuthHandler(apikey, apisecret)
    auth.set_access_token(accesskey, accesssecret)

    print(str(auth))

    api = tweepy.API(auth)

    class MyStreamListener(tweepy.StreamListener):
        def on_status(self, status):
            print("New tweet from monitored handle:")
            print(status.text)
            ifvoid = 0
            #finds "RT" in first 2 characters of a tweet (retweet)
            if ((str(status.text[:2]).find("RT")) != -1):
                print("This is a retweet, we'll ignore this!")
                ifvoid = 1

            #finds "https://" anywhere in the tweet"
            elif ((str(status.text).find("https://")) != -1 and ifvoid == 0):
                print("This tweet is an image or link.")

                #sends tweet contents to discord
                hook = Webhook(url)
                embed = Embed(description="New link/image from " + handle +
                              ":")

                embed.add_field(name="Tweet Contents", value=str(status.text))
                embed.set_footer(
                    text="Twitter Monitor -- github.com/izedout/twitter-monitor"
                )

                hook.send(embed=embed)

                print("Tweet Sent to Discord!")

                #finds and sends expanded url to discord
                foundurls = re.findall(r'(https?://\S+)', str(status.text))
                urlnumber = len(foundurls)
                print("Number of URLs in tweet: " + str(urlnumber))
                currenturl = 1
                while currenturl <= urlnumber:
                    uri = urlexpander.unshorten(foundurls[currenturl - 1])

                    hook = Webhook(url)

                    embed.add_field(name="Expanded URL:", value=uri)
                    embed.set_footer(
                        text=
                        "Twitter Monitor -- github.com/izedout/twitter-monitor"
                    )

                    hook.send(embed=embed)

                    currenturl = currenturl + 1
                    print("Expanded URL " + uri + " Sent To Discord!")
                try:
                    media = status.extended_entities.get('media', [])
                    #print(len(media))
                    i = 0
                    while (i < len(media)):

                        hook = Webhook(url)

                        media_files = (media[i]['media_url'])
                        embed.set_image(media_files)

                        hook.send(embed=embed)
                        i = i + 1
                except:
                    print("Wasnt an image")

                finally:
                    print("Finished sending any images in tweet")
            #finds "@" in the first character of a tweet (reply)
            elif ((str(status.text[:1]).find("@")) == 0 and ifvoid == 0):
                print("This is likely a reply, will not send")

            else:
                print("This is a regualr tweet, will send!")

                hook = Webhook(url)
                embed = Embed(description="New tweet from " + handle + ":")

                embed.add_field(name="Tweet Contents", value=str(status.text))
                embed.set_footer(
                    text="Twitter Monitor -- github.com/izedout/twitter-monitor"
                )

                hook.send(embed=embed)

    myStreamListener = MyStreamListener()
    myStream = tweepy.Stream(auth=api.auth, listener=myStreamListener)

    #TEST CODE FOR TWITTER API
    #public_tweets = api.home_timeline()
    #for tweet in public_tweets:
    #    print(tweet.text)

    handle = chosenhandle
    print("This threads handle is @" + chosenhandle)
    type(handle)
    user = api.get_user(screen_name=handle)
    client_id = user.id

    print("Now monitoring @" + handle + " for new tweets!")

    try:
        myStream.filter(follow=[str(client_id)])
    except KeyboardInterrupt:
        #stops monitor
        print("Monitor Stopped Successfully!")
        print("To monitor again, please re-run the script :)")
        #closes script, without "raise" the script restarts from the start of the while loop
        #raise
        raise SystemExit
Example #14
0
 def __init__(self):
     self.unshorten = UnshortenIt()
     self.session = requests.session()
Example #15
0
def carga(page):

    global configuracion
    global terminamos
    browser = webdriver.Chrome()
    browser2 = webdriver.Chrome()

    web = 'https://3d-load.net/page/' + str(page) + '/'
    print(' \n Accediendo a: ' + web + '\n' '')
    browser.get(web)
    assert '3DLOAD' in browser.title
    # sleeper(5)

    for a in browser.find_elements_by_class_name('link'):
        links = a.get_attribute('href')
        print(links)
        browser2.get(links)
        # browser2.minimize_window()
        # browser.minimize_window()
        if configuracion['Sitio.De.Descarga']['mega'] == 'si':
            for b in browser2.find_elements_by_class_name(
                    'shortcode.button.red.large'):
                alinks = b.get_attribute('href')
                # print(alinks + '\n')
                unshortener = UnshortenIt()
                uri = unshortener.unshorten(alinks)
                print(uri + '\n')
                igual = uri.find("=")
                total = len(uri)
                file_id = uri[igual + 1:total]
                #            print("Descargando: ", file_id +'\n\n')
                o = urlparse(links)
                #                print(o)
                #                print (o.path)
                oPath = o.path
                #            print("len")
                #            print(len(oPath))

                oPath = oPath[:len(oPath) - 1]
                #            print(oPath)
                a = 0
                while a != -1:
                    a = oPath.find("/")
                    oPath = oPath[a + 1:len(oPath)]
                    ocat = o.path[1:len(o.path)]
                # print(ocat)
                a = ocat.find("/")
                #            print(a)
                file_name = ocat[a + 1:len(ocat) - 1]
                ocat = ocat[0:a]

                #            print(ocat)
                #            print(file_name)
                #            print(file_id)

                listado = open('listado.ini', 'r')
                lista = listado.readlines()
                listado.close()
                # print(lista)
                # print(file_name)
                if file_name + '\n' not in lista:
                    print(" \n Descargando fichero: " + file_name + '\n')
                    os.system('mega-get --ignore-quota-warn ' + file_id +
                              ' ./download/' + file_name)
                    print('\nDescomprimiendo en el directorio \'extract\'')
                    #                        title2 = oPath.replace('-',' ')
                    #                        title2 = title2.title()
                    #                        print(title2)
                    if os.path.isfile(file_name):
                        listado = open("listado.ini",
                                       mode="a",
                                       encoding="utf-8")
                        listado.write(file_name + '\n')
                        listado.close()
                        os.system('unrar x -u "./download/"' + file_name +
                                  ' ./extract/')
                        print('\nBorrando el archivo \'.rar\'')
                        #                        borra = './download/' + title2 + '.rar'
                        #                        os.remove (borra)
                        try:
                            os.remove('./download/' + file_name)
                        except OSError as e:
                            print(e)
                        else:
                            print("File is deleted successfully")
                    with open('3d-load.cfg', 'w') as archivoconfig:
                        configuracion.write(archivoconfig)

                else:
                    print('si está')
                    ultconf = configuracion['Ult.Descargado']['ult.descargado']
                    if ultconf == file_name:
                        terminamos = 1
                        break

    browser.close()
    browser2.close()
Example #16
0
def cli(module, url, follow_nested):
    unshortener = UnshortenIt()
    print(unshortener.unshorten(url, module, unshorten_nested=follow_nested))
Example #17
0
 def __init__(self):
     self.unshortener = UnshortenIt()
def run(chosenhandle, url):

    #frontloading basic tasks to improve efficiency
    urlexpander = UnshortenIt()

    print("Number of Arguments: " + str(len(sys.argv)) + " arguments")
    print("Argument List: " + str(sys.argv))

    #import keys from file
    with open ("devkeys.txt", "rt") as getkeys: # open file lorem.txt for reading text data
        keys = getkeys.read()         # read the entire file into a string variable
    
    #use to see the contents of the key file
    #print("Your Twitter API Developer Keys are:")
    #print(keys)  		          # print contents

    keylist = keys.splitlines()

    #use to test that the line split is working correctly
    #print(keylist[0])

    #this is redundant but verbose, consider removing
    apikey = keylist[0]
    apisecret = keylist[1]
    accesskey = keylist[2]
    accesssecret = keylist[3]

    auth = tweepy.OAuthHandler(apikey, apisecret)
    auth.set_access_token(accesskey, accesssecret)

    print(str(auth))

    api = tweepy.API(auth)

    class MyStreamListener(tweepy.StreamListener):

        def on_status(self, status):
            print("New tweet from monitored handle:")
            print(status.text)
            ifvoid = 0
            #finds "RT" in first 2 characters of a tweet (retweet)
            if((str(status.text[:2]).find("RT")) != -1):
                print("This is a retweet or someone abusing the negative filter, we'll ignore this!")
                ifvoid = 1

            #finds "https://" anywhere in the tweet"
            elif((str(status.text).find("https://")) != -1 and ifvoid == 0):
                print("This tweet is an image or link!")

                #sends tweet contents to discord
                embed = Webhook(url)
                embed.set_desc("@everyone - NEW LINK/IMAGE FROM " + handle + ":")
                embed.add_field(name="Tweet Contents", value=str(status.text))
                embed.set_footer(text="Twitter Monitor by @__ized on twitter",ts=True)

                embed.post()

                print("Tweet Sent to Discord!")

                #finds and sends expanded url to discord
                foundurls = re.findall(r'(https?://\S+)', str(status.text))
                urlnumber = len(foundurls)
                print("Number of URLs in tweet: " + urlnumber)
                currenturl = 1
                while currenturl <= urlnumber:
                    uri = urlexpander.unshorten(foundurls[currenturl - 1])
                    
                    embed = Webhook(url)
                    embed.set_desc("Expanded URL:")
                    embed.add_field(name="-->", value=uri)
                    embed.set_footer(text="Twitter Monitor by @__ized on twitter",ts=True)

                    embed.post()

                    currenturl = currenturl + 1

                    print("Expanded URL " + uri + " Sent To Discord!")

            #finds "@" in the first character of a tweet (reply)
            elif((str(status.text[:1]).find("@")) == 0 and ifvoid == 0):
                print("This is likely a reply or other tweet, will not send")

            else:
                print("This is a regualr tweet, will send!")

                embed = Webhook(url)
                embed.set_desc("New tweet from " + handle + ":")
                embed.add_field(name="Tweet Contents", value=str(status.text))
                embed.set_footer(text="Twitter Monitor by @__ized on twitter",ts=True)

                embed.post()

    myStreamListener = MyStreamListener()
    myStream = tweepy.Stream(auth = api.auth, listener=myStreamListener)

    #TEST CODE FOR TWITTER API
    #public_tweets = api.home_timeline()
    #for tweet in public_tweets:
    #    print(tweet.text)

    handle = chosenhandle
    print("This threads handle is @" + chosenhandle)
    type(handle)
    user = api.get_user(screen_name = handle)
    client_id = user.id
    #defines cybers twitter id, could become a user prompt for an @ and then converted to desired twitter id

    print("Now monitoring @" + handle + " for new tweets! RELAXXX")

    try:
        myStream.filter(follow=[str(client_id)])
    except KeyboardInterrupt:
        #stops monitor
        print("Monitor Stopped Successfully!")
        print("To monitor again, please re-run the script :)")
        #closes script, without "raise" the script restarts from the start of the while loop
        #raise
        raise SystemExit
Example #19
0
def retrieveTweets(keyword, startDate, endDate, maxTweets, minimum_likes=0):
    ###
    # RETURN:
    #         A list of dictionaries with fields of tweet info
    #         - fields: string text , datetime datetime , int likeCount, int retweetCount
    #                   int replyCount, int quoteCount, (list links - only when outlinks exist)
    #     OR
    #         An Exception
    #
    # keyword: string, the hashtag keyword, without # - 'blacklivesmatter'
    # startDate: string, the first day of the query, including year - '2020-05-25'
    # endDate: string, last day of the query, including year - '2020-06-25'
    # maxTweets: int, maximum number of tweets to be retrieved - 1000
    # minimum_likes: int, default is 0, query filter for searching tweets with a minimum number of likes - 100000
    ###

    import snscrape.modules.twitter as sntwitter

    # initialze the unshortener
    from unshortenit import UnshortenIt
    unshortener = UnshortenIt(default_timeout=30)

    # Initiliaze the list of tweets to be returned
    tweets = []

    ## Limit the maxTweets retrieved here using min_lkes_filter
    min_likes_filter = ''
    if (minimum_likes > 0):
        min_likes_filter = "min_faves:{}".format(minimum_likes)

    ## Build scraper
    result = sntwitter.TwitterHashtagScraper(
        keyword + ' {} since:{} until:{} lang:en -filter:replies'.format(
            min_likes_filter, startDate, endDate)).get_items()
    # example queryString - 'blacklivesmatter min_faves:1 since:2020-06-01 until:2020-06-02
    #                        lang:en  -filter:replies'
    # queryString syntax: filter:links - search only tweets with links
    # queryString syntax: min_faves:1 - search tweets with a minimum of 1 like

    try:
        for i, tweet in enumerate(result):
            if (i >= maxTweets):
                break

            # Tweets without outlinks won't have the 'links' field

            tweet_data = {
                'text': tweet.content,
                'datetime': tweet.date,
                'likeCount': tweet.likeCount,
                'retweetCount': tweet.retweetCount,
                'replyCount': tweet.replyCount,
                'quoteCount': tweet.quoteCount
            }

            # Only create the 'links' field for tweets that have outlinks
            if (tweet.outlinks != []):
                links = unshortenLinks(tweet.outlinks, unshortener)
                tweet_data['links'] = links

            tweets.append(tweet_data)

    except Exception as e:
        print('Exception:')
        print(e)
        return e

    return tweets