Пример #1
0
def robotVoice():
    text_to_speech = TextToSpeechV1(
            iam_apikey= watsonApiKey['iam_apikey'],
            url=watsonApiKey['url']
        )
    def sentencesToVoice(sentence,filename):
        output = './content/{}-audio.wav'.format(filename)
        try:
            with open(output, 'wb') as audio_file:
                audio_file.write(
                    text_to_speech.synthesize(
                sentence,
                voice='en-US_AllisonVoice',
                accept='audio/wav'        
            ).get_result().content)
            return True
        except:
            return False
        
    
    def fetchVoicesOfAllSentences(content):
        print('> Fetching voices of all sentences...')
        for i,item in enumerate(content['sentences']):
            content['sentences'][i]['audio'] = sentencesToVoice(content['sentences'][i]['text'],i)
        print('> Fetch voices of all sentences concluded')
        return content
    
    content = loadContent()
    fetchVoicesOfAllSentences(content)
    saveContent(content)
Пример #2
0
def user():
    def askAndReturnSearchTerm():
        return str(input('Type a Wikipedia search term: ')).title()

    def redLine(lista, text):
        for i in range(len(lista)):
            print('[', i, ']', lista[i])
        return int(input(text))

    def askAndReturnPrefix(language):
        if (language == 'en'):
            prefixes = ['Who is', 'What is', 'The history of']
        else:
            prefixes = ['Quem é', 'Oque é', 'A história de']
        prefix = redLine(prefixes, 'Choose a option: ')
        return prefixes[prefix]

    def askAndReturnTemplate():
        prefixes = [
            'Know the world ', 'Senta que la vem historia - newsroom',
            'Senta que la vem historia - music epic',
            'Senta que la vem historia - music evolution',
            'Senta que la vem historia - music Missing My Girl'
        ]
        prefix = redLine(prefixes, 'Choose a Template option: ')
        return prefix + 1

    def askAndReturnLanguage():
        prefixes = ['English', 'Portuguese']
        language = ['en', 'pt']
        prefix = redLine(prefixes, 'Choose a Language option: ')
        return language[prefix]

    language = askAndReturnLanguage()

    content = {
        # 'language': 'en',
        'language': language,
        'searchTerm': askAndReturnSearchTerm(),
        # 'prefix': 'What is ',
        'prefix': askAndReturnPrefix(language),
        'maximumSentences': 7,
        'template': 1
        # 'template': askAndReturnTemplate()
    }
    saveContent(content)
Пример #3
0
def userEnv():
    def language():
        language = ['en', 'pt', 'pt-br']
        choose = getEnv('language')
        choose = language.index(choose)
        if (choose == -1):
            raise Exception("Language not accept")
        return language[choose]

    def getEnv(name=''):
        choose = os.environ[name]
        if (len(choose) == 0):
            raise Exception(f"{name} is empty")
        return choose

    content = {
        'language': language(),
        'searchTerm': getEnv('searchTerm'),
        'prefix': getEnv('prefix'),
        'maximumSentences': 7,
        'template': getEnv('template')
    }
    saveContent(content)
Пример #4
0
def robotText():
    service = NaturalLanguageUnderstandingV1(version="2018-04-05",
                                             url=watsonApiKey['url'],
                                             iam_apikey=watsonApiKey['apikey'])

    def fetchContentFromWikipedia(content):
        print('> Wikipedia content downloading')
        wikipediaContent, wikipediaUrl = apiWikipedia(content['searchTerm'],
                                                      content['language'])
        print('> Wikipedia content downloaded')
        return wikipediaContent, wikipediaUrl

    def sanitizeContent(sorceContentOriginal):
        def removeBlankLinesAndMarkdown(text):
            text = str(text)
            allLinesFirtState = list(
                filter(lambda x: x != '', text.split('\n')))
            allLines = list(
                filter(lambda x: not (x.startswith('==')), allLinesFirtState))
            allLines = ' '.join(allLines)
            allLines = allLines.replace(' ()', '')
            allLines = allLines.replace(' ( )', '')
            allLines = allLines.replace('[...]', '')

            return allLines

        withoutBlankLines = removeBlankLinesAndMarkdown(sorceContentOriginal)
        textNormalizede = normalize('NFKD', withoutBlankLines)
        return textNormalizede

    def breakContentSentences(text):
        conteudo = []
        sent_tokenizer = nltk.tokenize.PunktSentenceTokenizer()
        sentences = sent_tokenizer.tokenize(text)
        for i, _ in enumerate(sentences):
            a = {'text': sentences[i], 'keywords': [], 'images': []}
            conteudo.append(a)
        print('> Break content sentences concluded')
        return conteudo

    def limitMaximumSentences(content):
        content['sentences'] = content['sentences'][
            0:content['maximumSentences']]
        print('> Limit maximum sentences {}'.format(
            content['maximumSentences']))
        return content

    def fetchWatsonAndReturnKeywords(sentence):
        response = service.analyze(
            text=sentence,
            features=Features(entities=EntitiesOptions(),
                              keywords=KeywordsOptions())).get_result()
        # def filtro(value=[]):
        #     return value['text']
        # return list(map(filtro,response['keywords']))
        return [
            response['keywords'][i]['text']
            for i in range(len(response['keywords']))
        ]

    def fetchKeywordsOfAllSentences(content):
        print('> Fetching keywords of all sentences...')
        for i, _ in enumerate(content['sentences']):
            content['sentences'][i]['keywords'] = fetchWatsonAndReturnKeywords(
                content['sentences'][i]['text'])
        print('> Fetch keywords of all sentences concluded')
        return content

    content = loadContent()
    content['sourceContentOriginal'], content[
        'wikipediaUrl'] = fetchContentFromWikipedia(content)
    content['sourceContentSanitize'] = sanitizeContent(
        content['sourceContentOriginal'])
    content['sentences'] = breakContentSentences(
        content['sourceContentSanitize'])
    content = limitMaximumSentences(content)
    content = fetchKeywordsOfAllSentences(content)
    saveContent(content)
Пример #5
0
def robotVideo():
    def convertImage(sentenceIndex):
        def adjustImage(imageOriginal, widthDefault, heightDefault,
                        proportionDefault):
            width = imageOriginal.size[0]
            height = imageOriginal.size[1]
            proportion = width / height
            adjustWidth = widthDefault / proportionDefault
            adjustHeight = heightDefault / proportionDefault
            differenceWidth = width - widthDefault
            differenceHeight = height - heightDefault
            if round(proportion, 2) == round(proportionDefault, 2):
                imageOriginal.resize((int(widthDefault / proportionDefault),
                                      int(heightDefault / proportionDefault)))
            elif differenceWidth > differenceHeight:
                imageOriginal.resize(
                    (int(adjustWidth), int(height / proportion)))
            elif differenceWidth < differenceHeight:
                imageOriginal.resize(
                    (int(width / proportion), int(adjustHeight)))
            return imageOriginal

        inputFile = './content/{}-original.png'.format(sentenceIndex)
        outputFile = './content/{}-converted.png'.format(sentenceIndex)
        width = 1920
        height = 1080
        proportionDefault = width / height
        try:
            with Image.open(inputFile) as original:
                original = adjustImage(original, width, height,
                                       proportionDefault)
                size = [original.size[0] // 2, original.size[1] // 2]
                with original.copy() as copy:
                    copy = copy.resize((width, height))
                    copy = copy.filter(ImageFilter.GaussianBlur(5))
                    copy.paste(original, ((width // 2) - size[0],
                                          (height // 2) - size[1]))
                    copy.save(outputFile)

            print('> Image converted: {}'.format(outputFile))
        except RuntimeError:
            print('error {}'.format(sentenceIndex))

    def convertAllImages(content):
        print('> Converting all images...')
        for sentenceIndex in range(len((content['sentences']))):
            convertImage(sentenceIndex)
        print('> Converting all images completed')

    def adjustFontSentence(text):
        sizeSentence = len(text)
        if sizeSentence >= 150 and sizeSentence <= 250:
            return 1.8
        if sizeSentence >= 250:
            return 1
        else:
            return 2

    def adjustTextWratSentence(sentenceText, w, h):
        sizeSentence = len(sentenceText)

        if w >= 1080 and h == 1080:
            return 40
        if w <= 900 and h == 1080:
            return 20
        if w == 1920 and h <= 540:
            return 60
        else:
            return 20

    def writeText(filename, text, w, h):
        img = np.zeros((h, w, 4), dtype=np.uint8)
        height, width, channel = img.shape
        text_img = np.zeros((height, width, 4))
        font = cv2.FONT_HERSHEY_TRIPLEX
        wrapped_text = textwrap.wrap(text,
                                     width=adjustTextWratSentence(
                                         text, width, height))
        x, y = 10, 40
        font_size = adjustFontSentence(text)
        font_thickness = 2

        i = 0
        img_pil = Image.fromarray(img)
        draw = ImageDraw.Draw(img_pil)
        for line in wrapped_text:
            textsize = cv2.getTextSize(line, font, font_size,
                                       font_thickness)[0]

            gap = textsize[1] + 40
            y = int((img.shape[0] + textsize[1]) / 10) + i * gap
            x = int((img.shape[1] - textsize[0]) / 5) - 10
            # cv2.putText(img, line, (x, y), font,
            #             font_size,
            #             (255, 255, 255, 250),
            #             font_thickness,
            #             lineType=cv2.LINE_AA)
            # font1 = ImageFont.truetype(
            #     # './robots/fonts/simsun.ttc', 90)
            #     'robots/fonts/simsun.ttc', 90)
            # font1 = ImageFont.load_default()
            file = open("robots/fonts/verdana/verdanab.ttf", "rb")
            bytes_font = BytesIO(file.read())
            font1 = ImageFont.truetype(bytes_font, 80)
            draw.text((x, y), line, font=font1, fill=(255, 255, 255, 250))
            img = np.array(img_pil)
            i += 1
        cv2.imwrite(filename, img)

    def createSentenceImage(sentenceIndex, sentenceText, templateIndex):
        outputFile = './content/{}-sentence.png'.format(sentenceIndex)
        templateSettings = {
            0: {
                'size': '1920x400',
                'width': 1920,
                'height': 400,
                'gravity': 'center',
                'g': 5
            },
            1: {
                'size': '1920x1080',
                'width': 1920,
                'height': 1080,
                'gravity': 'center',
                'g': 5
            },
            2: {
                'size': '800x1080',
                'width': 800,
                'height': 1080,
                'gravity': 'west',
                'g': 4
            },
            3: {
                'size': '1920x400',
                'width': 1920,
                'height': 400,
                'gravity': 'center',
                'g': 5
            },
            4: {
                'size': '1920x1080',
                'width': 1920,
                'height': 1080,
                'gravity': 'center',
                'g': 5
            },
            5: {
                'size': '800x1080',
                'width': 800,
                'height': 1080,
                'gravity': 'west',
                'g': 4
            },
            6: {
                'size': '1920x400',
                'width': 1920,
                'height': 400,
                'gravity': 'center',
                'g': 5
            }
        }
        w = templateSettings[4]['width']
        h = templateSettings[4]['height']
        writeText(outputFile, sentenceText, w, h)

    def createAllSentenceImages(content):
        print('> Creating all sentences images...')
        templateIndex = 0
        for sentenceIndex in range(len((content['sentences']))):
            if templateIndex > 6:
                templateIndex = 0
            createSentenceImage(sentenceIndex,
                                content['sentences'][sentenceIndex]['text'],
                                templateIndex)
            templateIndex += 1
        print('> Creating all sentences images completed')

    def createYouTubeThumbnail():
        print('> Creating YouTube thumbnail')
        with Image.open('./content/0-converted.png') as img:
            img.save('content/youtube-thumbnail.png', "PNG")
        print('> Created YouTube thumbnail')

    def createImageVideo(imageIndex, imageIndexOutput):
        inputImage = f'./content/{imageIndex}-converted.png'
        inputImageSentence = f'./content/{imageIndex}-sentence.png'
        outputFile = f'./content/final/image{imageIndexOutput}.png'
        originalImage = Image.open(inputImage, 'r')
        originalImageSentence = Image.open(inputImageSentence, 'r')
        text_img = Image.new('RGBA', (1920, 1080), (0, 0, 0, 0))
        text_img.paste(originalImage, (0, 0))
        img = Image.new('RGBA', (1920, 1080), (0, 0, 0, 10))
        img.paste(originalImageSentence, (0, 0), mask=originalImageSentence)
        text_img = Image.blend(img, text_img, .2)
        text_img.save(outputFile, 'PNG')

    def createAllImagesVideo():
        print('> Creating all images of video...')
        for imageIndex in range(len(glob.glob('./content/*-converted.png'))):
            for i in range(10):
                a = f'0{imageIndex}{i}'
                createImageVideo(imageIndex, a)
        print('> Create all images of video completed')

    def createVideo():
        print('> Creating video...')
        imgArray = []
        for filename in glob.glob('./content/final/*.png'):
            img = cv2.imread(filename)
            height, width, layers = img.shape
            size = (width, height)
            imgArray.append(img)
            cv2.VideoWriter
        out = cv2.VideoWriter('./content/final/project.mp4',
                              cv2.VideoWriter_fourcc(*'MP4V'), 1, size)
        for i in range(len(imgArray)):
            out.write(imgArray[i])
        out.release()
        print('> Created video')

    def addAudioInVideo():
        print('> adding audio in video...')
        file = "./templates/3/bensound-epic.mp3"
        video = mp.VideoFileClip("./content/final/project.mp4")
        audio = mp.AudioFileClip("./templates/3/bensound-epic.mp3")
        video = video.set_audio(audio.set_duration(video.duration))
        video.write_videofile("./content/final/project_audio.mp4")
        print('> added audio in video...')

    content = loadContent()
    convertAllImages(content)
    createAllSentenceImages(content)
    createYouTubeThumbnail()
    saveContent(content)
    createAllImagesVideo()
    createVideo()
    addAudioInVideo()
Пример #6
0
def robotYoutube():
    youtube = None

    def createOAuthClient():
        credentials = './credential/youtube.json'

        SCOPES = [
            'https://www.googleapis.com/auth/youtube',
            'https://www.googleapis.com/auth/youtube.upload',
            'https://www.googleapis.com/auth/yt-analytics.readonly'
        ]

        OAuthClient = InstalledAppFlow.from_client_secrets_file(
            credentials, SCOPES)
        return OAuthClient

    def requestUserConsent(OAuthClient):
        credentials = OAuthClient.run_console(access_type='offline',
                                              include_granted_scopes='true',
                                              open_browser=False)
        # credentials = OAuthClient.run_local_server(
        #     port=5000,
        #     host='localhost',
        #     success_message='''
        #     Thank you!
        #     Now close this tab.
        #     ''',
        #     access_type='offline',
        #     include_granted_scopes='true',
        #     open_browser=False
        # )
        return credentials

    def setGlobalGoogleAuthentication(authorizationToken):
        youtube = build('youtube', 'v3', credentials=authorizationToken)
        youtubeAnalytics = build('youtubeAnalytics',
                                 'v2',
                                 credentials=authorizationToken)
        return youtube, youtubeAnalytics

    def uploadVideo(content):
        # def filtro(value=[]):
        #     return value['text']
        videoFilePath = './content/final/project_audio.mp4'
        # videoFilePath = './content/output.mp4'
        videoFileSize = size(getsize(videoFilePath))
        videoTitle = '{} {}'.format(content['prefix'], content['searchTerm'])
        videoTags = [content['searchTerm']]
        videoTags.extend(content['sentences'][0]['keywords'])
        # videoDescription = '\n\n'.join(list(map(filtro,content['sentences'])))
        videoDescription = '\n\n'.join([
            content['sentences'][i]['text']
            for i in range(len(content['sentences']))
        ])
        idealizer = 'https://www.youtube.com/channel/UCU5JicSrEM5A63jkJ2QvGYw'
        credits = '''\n\nCredits:
-Wikipedia: {}
-Images:
-{}
-{}
-{}
-{}
-{}
-{}
-{}
-Idealizer of the project: Filipe Deschamps
-{}
'''.format(content['wikipediaUrl'], content['downloadedImages'][0],
           content['downloadedImages'][1], content['downloadedImages'][2],
           content['downloadedImages'][3], content['downloadedImages'][4],
           content['downloadedImages'][5], content['downloadedImages'][6],
           idealizer)
        videoDescription += credits
        if content['template'] > 1:
            videoDescription += '\n-Music: https://www.bensound.com/royalty-free-music'

        requestParameters = {
            'part': 'snippet, status',
            'requestBody': {
                'snippet': {
                    'title': videoTitle,
                    'description': videoDescription,
                    'tags': videoTags
                },
                'status': {
                    'privacyStatus': 'public'
                }
            },
            'media': {
                'body':
                MediaFileUpload(videoFilePath, chunksize=-1, resumable=True)
            }
        }
        youtubeResponse = youtube.videos().insert(
            part=requestParameters['part'],
            body=requestParameters['requestBody'],
            media_body=requestParameters['media']['body'])
        print("> Uploading video file...", videoFileSize + "b")
        _, response = youtubeResponse.next_chunk()
        if 'id' in response:
            print('> Video available at: https://youtu.be/{}'.format(
                response['id']))
            return response['id']
        else:
            exit("The upload failed with an unexpected response: %s" %
                 response)

    def uploadThumbnail(videoInformation):
        videoId = videoInformation
        videoThumbnailFile = 'youtube-thumbnail.png'

        dirname = os.path.dirname(__file__).replace('robots', '')
        filePath = os.path.join(dirname, 'content', videoThumbnailFile)
        if (os.path.exists(filePath)):
            requestParameters = {
                'videoId': videoId,
                'media': {
                    'mimeType': 'image/jpeg',
                    'body': MediaFileUpload(filePath,
                                            chunksize=-1,
                                            resumable=True)
                }
            }
            print("> Uploading thumbnails file...")
            youtube.thumbnails().set(
                videoId=requestParameters['videoId'],
                media_body=requestParameters['media']['body']).execute()
            print("> Uploaded thumbnails")
        else:
            raise Exception("Arquivo não encontrado, thumbnail")

    def analitics(youtube):
        def execute_api_request(client_library_function, **kwargs):
            response = client_library_function(**kwargs).execute()
            return response

        return execute_api_request(
            youtubeAnalytics.reports().query,
            ids='channel==MINE',
            startDate='2019-04-28',
            endDate='2019-05-12',
            metrics='estimatedMinutesWatched,views,likes,subscribersGained',
            dimensions='day',
            sort='day')

    def insertPlaylist(videoInformation):
        print("> Inserting into the playlist")
        videoID = videoInformation
        playlistID = 'PL771Qy0TVPUh9Vdnk7ezLpED0F3aAiz7Z'
        youtube.playlistItems().insert(part="snippet",
                                       body={
                                           'snippet': {
                                               'playlistId': playlistID,
                                               'resourceId': {
                                                   'kind': 'youtube#video',
                                                   'videoId': videoID
                                               }
                                           }
                                       }).execute()
        print("> Inserted into the playlist")

    OAuthClient = createOAuthClient()
    authorizationToken = requestUserConsent(OAuthClient)
    youtube, youtubeAnalytics = setGlobalGoogleAuthentication(
        authorizationToken)
    content = loadContent()
    #     content['analitics'] = analitics(youtube)
    #     saveContent(content)
    content['videoId'] = uploadVideo(content)
    uploadThumbnail(content['videoId'])
    # insertPlaylist(videoInformation)
    saveContent(content)
Пример #7
0
def robotImages():
    def ajustFetchGoogle(service, query, sentenceIndex):
        if sentenceIndex > 0:
            response = service.cse().list(
                cx=googleSearchCredentials['searchEngineId'],
                q=query,
                searchType='image',
                num=10,
                filter='1').execute()
        else:
            response = service.cse().list(
                cx=googleSearchCredentials['searchEngineId'],
                q=query,
                searchType='image',
                num=10,
                filter='1',
                # imgSize='LARGE'
            ).execute()
        return response

    def fetchGoogleAndReturnImagesLinks(query, sentenceIndex):
        service = build("customsearch", "v1",
                        developerKey=googleSearchCredentials['apiKey'])
        response = ajustFetchGoogle(service, query, sentenceIndex)
        # def filtro(value=[]):
        #         return value['link']
        if not 'items' in response:
            return None
        else:
            return [response['items'][i]['link'] for i in range(len(response['items']))]
            # return list(map(filtro,response['items']))

    def ajustFetchImages(content, sentence):
        if content['searchTerm'].lower() in sentence.lower():
            return content['searchTerm']
        else:
            query = '{} {}'.format(content['searchTerm'], sentence)
            return query

    def interatorInAllKeyword(content, sentence, sentenceIndex):
        for keywordIndex, keyword in enumerate(sentence['keywords']):
            query = ajustFetchImages(
                content, sentence['keywords'][keywordIndex])
            sentence['images'] = fetchGoogleAndReturnImagesLinks(
                query, sentenceIndex)
            if(sentence['images'] != None):
                sentence['googleSeachQuery'] = query
                break

    def fetchImagesOfAllSentences(content):
        print('> Fetching images of all sentences...')
        for sentenceIndex, sentence in enumerate(content['sentences']):
            interatorInAllKeyword(content, sentence, sentenceIndex)
        print('> Fetch images of all sentences concluded')

    def downloadAndSave(url, fileName):
        fileName = 'content/'+fileName
        f = open(fileName, 'wb')
        f.write(requests.get(url).content)
        f.close()
        return url

    def checkSupportImage(imageUrl):
        try:
            response = requests.get(imageUrl, stream=True)
            if(response.status_code == 400):
                return False
            Image.open(requests.get(imageUrl, stream=True).raw)
            return True
        except:
            return False

    def viewImage(imageUrl):
        try:
            im = Image.open(requests.get(imageUrl, stream=True).raw)
            im.show()
            decition = str(input('Use this image?(y/n) '))
            if decition != 'y':
                saveBlackList(imageUrl)
                return True
        except:
            return True
        return False

    def checkTypeImage(imageUrl):
        contCarac = len(imageUrl)
        if 'png' in imageUrl[contCarac-3:contCarac]:
            return True
        return False

    def checkDuplicatedImage(sentenceIndex, imageUrl):

        def url_to_image(url):
            resp = requests.get(url)
            image = np.asarray(bytearray(resp.content), dtype="uint8")
            image = cv2.imdecode(image, cv2.IMREAD_COLOR)
            return image

        def ajustSize(filename, output):
            with Image.open(filename) as im:
                a, b = im.size
                scale = 0.25
                a = int(a * scale)
                b = int(b * scale)
                im.resize((a, b))
                im.save(output)

        def ajustSizeToAllImages():
            for i in range(sentenceIndex):
                filename = 'content/{}-original.png'.format(i)
                output = 'content/ajust/{}-original.png'.format(i)
                ajustSize(filename, output)

        def downloadImageAndAjust(url):
            filename = 'content/ajust/internetOriginal.png'
            output = 'content/ajust/internet.png'
            f = open(filename, 'wb')
            f.write(requests.get(url).content)
            f.close()
            ajustSize(filename, output)

        # image_to_compare = url_to_image(imageUrl)
        downloadImageAndAjust(imageUrl)
        image_to_compare = cv2.imread('content/ajust/internet.png')

        # for i in range(sentenceIndex):
        for fileName in glob.glob(os.path.join('content', 'ajust', '*.png')):
            if('internet' in fileName):
                pass
            else:
                ajustSizeToAllImages()
                # filename = 'content/ajust/{}-original.png'.format(i)
                original = cv2.imread(fileName)
                if original.shape == image_to_compare.shape:
                    difference = cv2.subtract(original, image_to_compare)
                    b, g, r = cv2.split(difference)
                    if cv2.countNonZero(b) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0:
                        return True
                detector = cv2.BRISK_create()
                kp_1, desc_1 = detector.detectAndCompute(original, None)
                kp_2, desc_2 = detector.detectAndCompute(
                    image_to_compare, None)
                desc_1 = desc_1.astype('float32')
                desc_2 = desc_2.astype('float32')
                index_params = dict(algorithm=0, trees=5)
                search_params = dict()
                flann = cv2.FlannBasedMatcher(index_params, search_params)
                matches = flann.knnMatch(desc_1, desc_2, k=2)
                good_points = []
                ratio = 0.6
                for m, n in matches:
                    if m.distance < ratio * n.distance:
                        good_points.append(m)
                if len(good_points) > 40:
                    return True
        return False

    def checkList(imageUrl, content, sentenceIndex, imageIndex):
        blackList = loadBlackList()['blackList']
        if imageUrl in (content['downloadedImages'] or blackList):
            print("> {} {} Erro imagem ja existe: {}".format(
                sentenceIndex, imageIndex, imageUrl))
            return True
        elif not checkSupportImage(imageUrl):
            print("> {} {} Erro nao foi possivel abrir a imagem: {}".format(
                sentenceIndex, imageIndex, imageUrl))
            # saveBlackList(imageUrl)
            return True
        elif sentenceIndex > 0:
            if checkDuplicatedImage(sentenceIndex-1, imageUrl):
                print("> {} {} Erro imagem duplicada: {}".format(
                    sentenceIndex, imageIndex, imageUrl))
                return True
        elif checkTypeImage(imageUrl):
            print(
                f'>{sentenceIndex} {imageIndex} Erro tipo de imagem incompativel: {imageUrl}')
            return True
        return False

    def downloadAllImages(content):
        print('> Downloading all images...')
        content['downloadedImages'] = []
        for sentenceIndex in range(len((content['sentences']))):
            images = content['sentences'][sentenceIndex]['images']
            for imageIndex in range(len(images)):
                imageUrl = images[imageIndex]
                blackList = loadBlackList()['blackList']
                if checkList(imageUrl, content, sentenceIndex, imageIndex):
                    continue
                try:
                    content['downloadedImages'].append(downloadAndSave(
                        imageUrl, '{}-original.png'.format(sentenceIndex)))
                    print("> {} {} Baixou imagem com sucesso: {}".format(
                        sentenceIndex, imageIndex, imageUrl))
                    break
                except:
                    print("> {} {} Erro ao baixar: {}".format(
                        sentenceIndex, imageIndex, imageUrl))
        print('> Downloaded all images...')

    content = loadContent()
    fetchImagesOfAllSentences(content)
    saveContent(content)
    content = loadContent()
    downloadAllImages(content)
    saveContent(content)