示例#1
0
def download_reddit_submission(full_link):

    # Sets reddit API keys
    reddit = praw.Reddit(client_id=reddit_id,
                         client_secret=reddit_secret,
                         user_agent='reddit_to_twitter')

    # Grab reddit ID of given link for the API uses it to grab the reddit topic
    grab_id = re.search(r'(?<=comments/).\w*', full_link)
    topic_id = grab_id.group(0)

    # Uses the API to retrieve the link submitted in topic (image, gif or video)
    submission = reddit.submission(id=topic_id)
    url = submission.url

    # Discovers the file type based on the service it's uploaded on
    saved_file_type = ''
    if 'gfycat.com' in url:
        # Knowing that gfycat only has gifs, using service API grabs the version that has 5mb or less.
        gfycat_json = 'https://api.gfycat.com/v1/gfycats/' + url.rsplit(
            '/', 1)[1]
        request = requests.get(gfycat_json).text
        json_request = json.loads(request)
        gif_5mb = json_request['gfyItem']['max5mbGif']

        # Downloads gif and sets file type to GIF
        functions.download(gif_5mb, 'gif')
        saved_file_type = 'gif'

    elif 'redd.it' in url:
        # If it's hosted on reddit can be either a jpg or mp4 (only formats used there)
        extension = url.rsplit('.', 1)[1]

        # Knowing the right extension to name the file, downloads it
        if extension == 'jpg':
            functions.download(url, 'jpg')
            saved_file_type = 'jpg'
        else:
            functions.download_video(url)
            # returns = functions.download_video(url)
            # if not returns:
            saved_file_type = 'mp4'

    elif 'imgur.com' in url:
        # If it's hosted on imgur the possible extensions are JPG and GIFV/MP4 (both can be saved as mp4 file)
        url_breakdown = url.rsplit('.', 1)
        extension = url_breakdown[1]
        if extension == 'jpg':
            functions.download(url, 'jpg')
            saved_file_type = 'jpg'
        elif extension == 'gifv' or extension == 'mp4':
            url = url_breakdown[0] + '.mp4'
            functions.download(url, 'mp4')
            saved_file_type = 'mp4'

    return saved_file_type
示例#2
0
def obtain_data(source1, source2):

    data_sources = [source1, source2]
    try:
        for source in data_sources:
            url = 'https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/{}_crop.tar'.format(
                source)
            filename = '{}_crop.tar'.format(source)
            print('Downloading {}...'.format(source))
            fns.download(url, filename)
            print('Complete')
    except url.DoesNotExist:
        print('Data Download Failed')

    try:
        for source in data_sources:
            print('Extracting {}...'.format(source))
            with tarfile.open('{}_crop.tar'.format(source), 'r') as f:
                f.extractall()
            print('Complete')
    except extract_file.DoesNotExist:
        print('Data Extraction Failed')
示例#3
0
def downloadCheck():

    bestandsPad = os.path.join(weightsFolder, "yolov3.weights")

    if os.path.exists(bestandsPad):
        lokaleGrootte = int(os.stat(bestandsPad).st_size)
        externeGrootte = int(
            requests.get(preTrainedURL, stream=True).headers['content-length'])

        if lokaleGrootte == externeGrootte:
            print("Het lokale weights bestand klopt! Ga verder...")
            f.br()

        else:
            print(
                "Het lokale bestand klopt niet helemaal, hij wordt opnieuw gedownload..."
            )
            f.download(preTrainedURL, weightsFolder, (1024 * 16))

    else:
        print(
            'Programma is voor de eerste keer gestart, download wordt gestart...'
        )
        f.download(preTrainedURL, weightsFolder, (1024 * 16))
示例#4
0
def start_menu():
    menu = {}
    menu_items = [
        "Exit", "Create Build", "Restore Build", "List Builds",
        "Download Build", "Upload Build", "Delete Builds", "Settings"
    ]

    for i in range(len(menu_items)):
        menu[f"{i}"] = menu_items[i]

    # while True:
    print("Build Manager\n")
    for entry in menu:
        print(f"[{entry}]:", menu[entry])

    selection = input("\nPlease Select: ")

    functions.clear()
    if selection.isdigit():
        if 1 <= int(selection) <= 7:
            switcher = {
                1: lambda: functions.create_build(),
                2: lambda: functions.restore_build(),
                3: lambda: functions.list_builds(service_menu("local")),
                4: lambda: functions.download(service_menu("online")),
                5: lambda: functions.upload(service_menu("online")),
                6: lambda: functions.delete(service_menu("local")),
                7: lambda: settings_menu()
            }
            func = switcher.get(int(selection),
                                lambda: "Unknown option selected\n")
            return func()
        elif selection == "0":
            functions.clear()
            sys.exit()

    print("Unknown option selected\n")
示例#5
0
    url = base_path + str_volume

    for page in range(1, 9999):
        # HTML request + souping
        html = urllib.request.urlopen(
            url).read()  # We get the HTML of the page
        soup = BeautifulSoup(html, 'html.parser')  # We cook it into a soup
        image_path = soup.find("img", class_="picture")['src']  # Image URL
        abs_image_path = site + image_path

        # Naming file
        str_page = pad_zeros(str(page), 3)
        filename = folder_volume + '/Page ' + str_page + '.jpg'

        # Downloading page
        download(abs_image_path.replace(" ", "%20"), filename)
        print('Page ' + str(page) + ' of volume ' + str(n_volume) +
              ' has been downloaded')

        # Search for next page
        next_url = None
        tds = soup.find_all(["td"])  # Get all the td
        for td in tds:
            list_a = td.find_all("a")
            for a in list_a:
                if 'Next Page' in str(a):
                    next_url = a["href"]
                    break

        if next_url:  # There's a next page
            url = site + next_url
示例#6
0
import numpy as np
import functions as func

# Download images
X = func.load_images("train-images-idx3-ubyte.gz")

# Download labels
Y = func.download("train-labels-idx1-ubyte.gz")

# Transform to one hot vector
Y_one_hot = func.get_one_hot(Y)

# Get mini batch
X = func.get_mini_batch(X)
X = func.forward(X)
Y_one_hot = func.get_mini_batch(Y_one_hot)

# Calculate cross entropy loss
e = func.cross_entropy_loss(X, Y_one_hot)
print(f"Cross entropy loss: {e}")
示例#7
0
        functions.buildFromSource(github_version_tag)

    else:
        # Set download url
        ## main file
        gtdownload = 'https://github.com/go-gitea/gitea/releases/download/'+github_version_tag+'/gitea-'+github_version+'-'+settings.gtsystem+'.xz'
        print (gtdownload)
        ## sha256 file
        shadownload = 'https://github.com/go-gitea/gitea/releases/download/'+github_version_tag+'/gitea-'+github_version+'-'+settings.gtsystem+'.xz.sha256'
        print (shadownload)

        # Download file

        ## downloading sha
        print ("downloading sha256 hashsum")
        functions.download(shadownload, settings.tmpdir+'gitea.xz.sha256')
        ## downloading xz
        print ("downloading", github_version_tag+'gitea.xz')
        tmpxz = settings.tmpdir+'gitea-'+github_version+'-'+settings.gtsystem+'.xz'
        functions.download(gtdownload, tmpxz)

        # doing sha256 sum
        os.chdir(settings.tmpdir)
        #sha_value = os.system("sha256sum -c gitea.xz.sha256 > /dev/null")

        if os.system("sha256sum -c gitea.xz.sha256 > /dev/null") == 0:
        	print ("sha ok, extracting file to location")
        	# extracting download file
        	cmd = "xz -d "+tmpxz
        	print (cmd)
        	os.system(cmd)
server.bind((HOST, PORT))
print("[+] Socket binded to port!")
server.listen(5)
print("[+] Listening...")
connection, address = server.accept()
print("[+] Connected with IP | {}".format(address[0]))
while True:
    command = input("> ")
    if command == "terminate":
        connection.send(str.encode(command))
        connection.close()
        sys.exit()
    if command == "":
        continue
    if command[:9] == "download ":
        connection.send(str.encode(command))
        functions.download(command[9:], connection)
        continue
    if command == "screenshot" or command == "campic":
        connection.send(str.encode(command))
        functions.photo_capture(connection)
        continue
    if command == "webcam":
        connection.send(str.encode(command))
        functions.webcam()
        continue
    if command != "":
        connection.send(str.encode(command))
        response = connection.recv(409600)
        print(str(response, 'utf-8'))
示例#9
0
 # Generate the url
 url = base_path+str_volume
 
 for page in range(1,9999):
     # HTML request + souping
     html = urllib.request.urlopen(url).read() # We get the HTML of the page
     soup = BeautifulSoup(html, 'html.parser') # We cook it into a soup
     image_path = soup.find("img", class_="picture")['src'] # Image URL
     abs_image_path = site+image_path
     
     # Naming file
     str_page = pad_zeros(str(page),3)
     filename = folder_volume+'/Page '+str_page+'.jpg'
     
     # Downloading page
     download(abs_image_path.replace(" ","%20"),filename)
     print('Page '+str(page)+' of volume '
           +str(n_volume)+' has been downloaded')
     
     # Search for next page
     next_url = None
     tds = soup.find_all(["td"]) # Get all the td
     for td in tds:
         list_a = td.find_all("a")
         for a in list_a:
             if 'Next Page' in str(a):
                 next_url = a["href"]
                 break
     
     if next_url: # There's a next page        
         url = site+next_url
示例#10
0
    q = input(
        'One link/chapter or multiple chapters?\n1. One link\n2. Multiple\n')
    if q == '1':
        one = True
    elif q == '2':
        toc = True
    else:
        print('Input not accepted. Retry. Closing program...')
        sys.exit()

    toc_link = input('Link? ')

    parser, url = f.parser_choice(toc_link)
    toc_html = 'toc.html'

    f.download(toc_link, toc_html)

    info = f.get_info(parser, toc_html)

    imgs = []

    chapter_start = ''
    chapter_end = ''

    if toc:
        #if the request is just one link, skip all link list creation
        '''
        finished_flag = False
        if flag.lower() == 'n':
            a = input('Is the novel finished? Chapters range won\'t be shown in'
                ' filename if it\'s a finished series. y/n\n')
示例#11
0
import yaml
from functions import download, concat, delete

if __name__ == "__main__":
    with open("config.yaml") as file:
        config = yaml.load(file.read())

    if config["download"]:
        download(config)

    if config["concatenate"]:
        concat(config)

    if config["delete"]:
        delete(config)
示例#12
0
文件: qt.py 项目: zudzuka/VKFaceMash
 def dbutton(self):
     functions.download()
     self.person=functions.unpickle()
     self.update(2)
示例#13
0
        reqEpi= functions.calulations(name)
        print("donr 1 \n\n\n")
        if(reqEpi):
            sleep(10)

            res=requests.get(seriesLink,headers=headers)

            torrentPage = open(name+"Torrent Page.html",'wb')
            for i in res.iter_content(1000):
                torrentPage.write(i)
            torrentPage.close()
            reqEpi=functions.findReducedEpi(seasonNo,reqEpi)
            shortlisted= functions.findShortList(reqEpi,name)
            if(len(shortlisted) >0):
                finalUrl=functions.produceFinalUrl(functions.bestMatch(shortlisted)[0])
                functions.download(finalUrl)
            else:
                    print(" Sorry The Latest Episode Couldnt Be Found , Please try Option 2 ")
        else:
            print("Sorry No Match Found")
    else:
        fin = open(name+"Torrent Page.html" ,'r')
        if(fin):
            res= requests.get("https://kat.cr"+path+'/torrents/',headers=headers)
            for i in res.iter_content(1000):
                fin.write(i)
            fin.close()
        elif( properties(fin)):
            res= requests.get("https://kat.cr"+path+'/torrents/',headers=headers)
            for i in res.iter_content(1000):
                fin.write(i)