Esempio n. 1
0
def robot():
    try:
        print('\n>>> [Image Robot] Starting...')

        content = state.load()
        content = fetch_images_of_all_sentences(content)
        content = download_all_images(content)
        state.save(content)

        print('>>> [Image Robot] Stopping. Work done!')
    except Exception as e:
        print(f'\n> [Image Robot] Unexpected Error: {e}\n')
        exit_video_pymaker()
Esempio n. 2
0
def robot():
    try:
        print('\n>>> [YouTube Robot] Starting...')
        content = state.load()

        service = get_authenticated_service()
        video_id = upload_video(content, service)
        upload_thumbnail(video_id, service)

        state.save(content)
        print('>>> [YouTube Robot] Stopping. Work done!')
    except Exception as e:
        print(f'\n> [YouTube Robot] Unexpected Error: {e}\n')
        exit_video_pymaker()
Esempio n. 3
0
def robot():
    try:
        content = Content()

        content.search_term = ask_search_term()
        content.search_prefix = ask_search_prefix()

        print(f'> [Input Robot] Saving content state...')
        state.save(content)
    except ValueError as value_error:
        print(f'\n> [Input Robot] {value_error}')
        exit_video_pymaker()
    except Exception as e:
        print(f'\n> [Input Robot] Unexpected Error: {e}')
        exit_video_pymaker()
Esempio n. 4
0
def robot():
    """ Text Robot
        This function gives life to the text robot
    """
    print("Pre loading data from JSON...")

    content = load()

    fetch_content_from_wikipedia(content)
    clear_content(content)
    break_content_in_sentences(content)
    limit_max_sentences(content)
    fetch_sentences_keywords(content)

    save(content.__dict__)
Esempio n. 5
0
def robot():
    try:
        print('\n>>> [Video Robot] Starting...')
        content = state.load()

        convert_all_images(content)
        create_all_sentences_images(content)
        create_youtube_thumbnail()
        create_video_initial_images(content)
        create_video_with_moviepy()

        state.save(content)
        print('>>> [Video Robot] Stopping. Work done!')
    except Exception as e:
        print(f'\n> [Video Robot] Unexpected Error: {e}\n')
        exit_video_pymaker()
Esempio n. 6
0
def robot():
    try:
        print('\n>>> [Text Robot] Starting...')
        content = state.load()

        content = fetch_content_from_wikipedia(content)
        content = sanitize_content(content)
        content = break_content_into_sentences(content)
        content = fetch_keywords_of_all_sentences(content)

        state.save(content)
        print('>>> [Text Robot] Stopping. Work done!')
    except Algorithmia.errors.AlgorithmException as algorithm_exception:
        print(
            f'\n> [Text Robot] Error: video-pymaker was unable to fetch Wikipedia with this search term:',
            f'{content.search_term}.')
        print(f'> [Text Robot] See Algorithmia error below: ')
        print(f'> [Text Robot] Algorithmia Error: {algorithm_exception}')
        exit_video_pymaker()
    except Exception as e:
        print(f'\n> [Text Robot] Unexpected Error: {e}\n')
        exit_video_pymaker()
Esempio n. 7
0
def robot(content):
    # function designed to ask and return search term and prefix from the user
    content.max_sentences = 7

    def ask_and_return_search():
        # Asks for the Wikipedia search term and returns it
        return input("Type a Wikipedia search term:")

    def ask_and_return_prefix():
        # Lists the search prefix pre-defined options and asks for the user to choose between them
        prefixes = ['Who is', 'What is', 'The history of']
        for index in range(0, 3):
            print(f"[{ index + 1 }] - { prefixes[index] }")
        print("[0] - exit")
        selected_prefix_index = int(input("Choose one option:")) - 1
        if selected_prefix_index >= 0:
            return prefixes[selected_prefix_index]
        sys.exit(1)

    # Stores the data provided from the user
    content.search_term = ask_and_return_search()
    content.prefix = ask_and_return_prefix()

    save(content.__dict__)
Esempio n. 8
0
def robot():
    current_directory = os.path.dirname(__file__)
    content = load()

    def google_search(search_term, api_key, cse_id, **kwargs):
        service = build("customsearch", "v1", developerKey=api_key)
        response = service.cse().list(q=search_term, cx=cse_id,
                                      **kwargs).execute()
        return response['items']

    def fetch_google_and_return_images_links(query):
        images_url = []
        results = google_search(query,
                                settings.GOOGLE_SEARCH_API_KEY,
                                settings.SEARCH_ENGINE_ID,
                                searchType="image",
                                imgSize="huge",
                                num=2)

        for result in results:
            images_url.append(result['link'])

        return images_url

    def fetch_images_of_all_sentences(content):
        print("> Getting image URLs from Google Images...")

        for sentence in content.sentences:
            query = f"{ content.search_term } { sentence.keywords[0] }"
            sentence.images = fetch_google_and_return_images_links(query)

            sentence.google_search_query = query

    def download_and_save_image(image_url, file_name):
        response = requests.get(image_url, stream=True)
        response.raise_for_status()
        response.raw.decode_content = True  # Required to decompress gzip/deflate compressed responses

        with Image.open(response.raw) as img:
            img.save(f"{ current_directory }/../static/{ file_name }")

    def download_all_images(content):
        downloaded_images = []
        sentence_index = 0

        print("> Downloading images...")

        for sentence in content.sentences:
            images = sentence.images

            for image in images:
                image_url = image

                try:
                    if image_url in str(downloaded_images):
                        raise FileExistsError("Image already downloaded!")
                    download_and_save_image(image_url,
                                            f"{ sentence_index }_raw.png")
                    downloaded_images.append(image_url)
                    print(f"Image download successfuly! URL: { image_url }")
                    break
                except Exception as error:
                    print(
                        f"Error: Couldn't download image. URL: { image_url } - { error }"
                    )

            sentence_index += 1
        content.downloaded_images = downloaded_images

    fetch_images_of_all_sentences(content)
    download_all_images(content)
    save(content.__dict__)