Exemple #1
0
def fetch_author(a_id, api_key):
    utils.p('fetch author')
    try:
        a_url = paths.get_api_url() + 'accounts/' + a_id + '/'
        headers = utils.get_headers(api_key)
        r = rerequests.get(a_url, headers=headers)
        if r.status_code == 200:
            adata = r.json()
            if not hasattr(adata, 'id'):
                utils.p(adata)
            # utils.p(adata)
            tasks_queue.add_task((write_author, (a_id, adata)))
            if adata.get('gravatarHash') is not None:
                gravatar_path = paths.get_temp_dir(
                    subdir='g/') + adata['gravatarHash'] + '.jpg'
                url = "https://www.gravatar.com/avatar/" + adata[
                    'gravatarHash'] + '?d=404'
                r = rerequests.get(url, stream=False)
                if r.status_code == 200:
                    with open(gravatar_path, 'wb') as f:
                        f.write(r.content)
                    adata['gravatarImg'] = gravatar_path
                elif r.status_code == '404':
                    adata['gravatarHash'] = None
                    utils.p('gravatar for author not available.')
    except Exception as e:
        utils.p(e)
    utils.p('finish fetch')
def get_assets_search():
    bpy.app.debug_value = 2

    results = []
    preferences = bpy.context.preferences.addons['blenderkit'].preferences
    url = paths.get_api_url() + 'search/all'
    i = 0
    while url is not None:
        headers = utils.get_headers(preferences.api_key)
        print('fetching assets from assets endpoint')
        print(url)
        retries = 0
        while retries < 3:
            r = rerequests.get(url, headers=headers)

            try:
                adata = r.json()
                url = adata.get('next')
                print(i)
                i += 1
            except Exception as e:
                print(e)
                print('failed to get next')
                if retries == 2:
                    url = None
            if adata.get('results') != None:
                results.extend(adata['results'])
                retries = 3
            print(f'fetched page {i}')
            retries += 1

    fpath = assets_db_path()
    with open(fpath, 'w') as s:
        json.dump(results, s)
Exemple #3
0
def fetch_categories(API_key, force=False):
    url = paths.get_api_url() + 'categories/'

    headers = utils.get_headers(API_key)

    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')
    if os.path.exists(categories_filepath):
        catfile_age = time.time() - os.path.getmtime(categories_filepath)
    else:
        catfile_age = 10000000

    # global catfetch_counter
    # catfetch_counter += 1
    # bk_logger.debug('fetching categories: ', catfetch_counter)
    # bk_logger.debug('age of cat file', catfile_age)
    try:
        # read categories only once per day maximum, or when forced to do so.
        if catfile_age > 86400 or force:
            bk_logger.debug('requesting categories from server')
            r = rerequests.get(url, headers=headers)
            rdata = r.json()
            categories = rdata['results']
            fix_category_counts(categories)
            # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off.
            with open(categories_filepath, 'w', encoding='utf-8') as s:
                json.dump(categories, s, ensure_ascii=False, indent=4)
        tasks_queue.add_task((load_categories, ()))
    except Exception as e:
        bk_logger.debug('category fetching failed')
        bk_logger.exception(e)
        if not os.path.exists(categories_filepath):
            source_path = paths.get_addon_file(subpath='data' + os.sep +
                                               'categories.json')
            shutil.copy(source_path, categories_filepath)
def get_rating(asset_id, headers):
    '''
    Retrieve ratings from BlenderKit server. Can be run from a thread
    Parameters
    ----------
    asset_id
    headers

    Returns
    -------
    ratings - dict of type:value ratings
    '''
    url = paths.get_api_url() + 'assets/' + asset_id + '/rating/'
    params = {}
    r = rerequests.get(url, params=params, verify=True, headers=headers)
    if r is None:
        return
    if r.status_code == 200:
        rj = r.json()
        ratings = {}
        # store ratings - send them to task queue
        for r in rj['results']:
            ratings[r['ratingType']] = r['score']
            tasks_queue.add_task(
                (store_rating_local, (asset_id, r['ratingType'], r['score'])))
            # store_rating_local(asset_id, type = r['ratingType'], value = r['score'])

        if len(rj['results']) == 0:
            # store empty ratings too, so that server isn't checked repeatedly
            tasks_queue.add_task((store_rating_local_empty, (asset_id, )))
Exemple #5
0
def request_profile(api_key):
    a_url = paths.get_api_url() + 'me/'
    headers = utils.get_headers(api_key)
    r = rerequests.get(a_url, headers=headers)
    adata = r.json()
    if adata.get('user') is None:
        utils.p(adata)
        utils.p('getting profile failed')
        return None
    return adata
def get_rating(asset_id):
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    api_key = user_preferences.api_key
    headers = utils.get_headers(api_key)
    rl = paths.get_api_url(
    ) + 'assets/' + asset['asset_data']['id'] + '/rating/'
    rtypes = ['quality', 'working_hours']
    for rt in rtypes:
        params = {'rating_type': rt}
        r = rerequests.get(r1, params=data, verify=True, headers=headers)
        print(r.text)
Exemple #7
0
def fetch_gravatar(adata):
    utils.p('fetch gravatar')
    if adata.get('gravatarHash') is not None:
        gravatar_path = paths.get_temp_dir(subdir='g/') + adata['gravatarHash'] + '.jpg'

        if os.path.exists(gravatar_path):
            tasks_queue.add_task((write_gravatar, (adata['id'], gravatar_path)))
            return;

        url = "https://www.gravatar.com/avatar/" + adata['gravatarHash'] + '?d=404'
        r = rerequests.get(url, stream=False)
        if r.status_code == 200:
            with open(gravatar_path, 'wb') as f:
                f.write(r.content)
            tasks_queue.add_task((write_gravatar, (adata['id'], gravatar_path)))
        elif r.status_code == '404':
            adata['gravatarHash'] = None
            utils.p('gravatar for author not available.')
Exemple #8
0
def get_download_url(asset_data, scene_id, api_key, tcom=None):
    ''''retrieves the download url. The server checks if user can download the item.'''
    mt = time.time()

    headers = utils.get_headers(api_key)

    data = {'scene_uuid': scene_id}
    r = None

    try:
        r = rerequests.get(asset_data['download_url'],
                           params=data,
                           headers=headers)
    except Exception as e:
        print(e)
        if tcom is not None:
            tcom.error = True
    if r == None:
        if tcom is not None:
            tcom.report = 'Connection Error'
            tcom.error = True
        return 'Connection Error'

    if r.status_code < 400:
        data = r.json()
        url = data['filePath']
        asset_data['url'] = url
        asset_data['file_name'] = paths.extract_filename_from_url(url)
        return True

    if r.status_code == 403:
        r = 'You need Full plan to get this item.'
        # r1 = 'All materials and brushes are available for free. Only users registered to Standard plan can use all models.'
        # tasks_queue.add_task((ui.add_report, (r1, 5, colors.RED)))
        if tcom is not None:
            tcom.report = r
            tcom.error = True

    elif r.status_code >= 500:
        utils.p(r.text)
        if tcom is not None:
            tcom.report = 'Server error'
            tcom.error = True
    return False
Exemple #9
0
def fetch_categories(API_key):
    url = paths.get_api_url() + 'categories/'

    headers = utils.get_headers(API_key)

    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')

    try:
        r = rerequests.get(url, headers=headers)
        rdata = r.json()
        categories = rdata['results']
        fix_category_counts(categories)
        # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off.
        with open(categories_filepath, 'w') as s:
            json.dump(categories, s, indent=4)
        tasks_queue.add_task((load_categories, ()))
    except Exception as e:
        utils.p('category fetching failed')
        utils.p(e)
        if not os.path.exists(categories_filepath):
            source_path = paths.get_addon_file(subpath='data' + os.sep + 'categories.json')
            shutil.copy(source_path, categories_filepath)
Exemple #10
0
    def run(self):
        maxthreads = 50
        query = self.query
        params = self.params
        global reports

        t = time.time()
        mt('search thread started')
        tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
        json_filepath = os.path.join(
            tempdir, '%s_searchresult.json' % query['asset_type'])

        headers = utils.get_headers(params['api_key'])

        rdata = {}
        rdata['results'] = []

        if params['get_next']:
            with open(json_filepath, 'r') as infile:
                try:
                    origdata = json.load(infile)
                    urlquery = origdata['next']
                    # rparameters = {}
                    if urlquery == None:
                        return
                except:
                    # in case no search results found on drive we don't do next page loading.
                    params['get_next'] = False
        if not params['get_next']:
            url = paths.get_api_url() + 'search/'

            urlquery = url

            # rparameters = query
            urlquery = self.query_to_url()
        try:
            utils.p(urlquery)
            r = rerequests.get(urlquery,
                               headers=headers)  # , params = rparameters)
            # print(r.url)
            reports = ''
            # utils.p(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            reports = e
            # props.report = e
            return
        mt('response is back ')
        try:
            rdata = r.json()
        except Exception as inst:
            reports = r.text
            print(inst)

        mt('data parsed ')

        # filter results here:
        # todo remove this in future
        nresults = []
        for d in rdata.get('results', []):
            # TODO this code is for filtering brush types, should vanish after we implement filter in Elastic
            mode = None
            if query['asset_type'] == 'brush':
                for p in d['parameters']:
                    if p['parameterType'] == 'mode':
                        mode = p['value']
            if query['asset_type'] != 'brush' or (query.get('mode') != None
                                                  and query['mode']) == mode:
                nresults.append(d)
        rdata['results'] = nresults

        # print('number of results: ', len(rdata.get('results', [])))
        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return

        mt('search finished')
        i = 0

        thumb_small_urls = []
        thumb_small_filepaths = []
        thumb_full_urls = []
        thumb_full_filepaths = []
        # END OF PARSING
        for d in rdata.get('results', []):

            get_author(d)

            for f in d['files']:
                # TODO move validation of published assets to server, too manmy checks here.
                if f['fileType'] == 'thumbnail' and f[
                        'fileThumbnail'] != None and f[
                            'fileThumbnailLarge'] != None:
                    if f['fileThumbnail'] == None:
                        f['fileThumbnail'] = 'NONE'
                    if f['fileThumbnailLarge'] == None:
                        f['fileThumbnailLarge'] = 'NONE'

                    thumb_small_urls.append(f['fileThumbnail'])
                    thumb_full_urls.append(f['fileThumbnailLarge'])

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnail'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_small_filepaths.append(imgpath)

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnailLarge'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_full_filepaths.append(imgpath)

        sml_thbs = zip(thumb_small_filepaths, thumb_small_urls)
        full_thbs = zip(thumb_full_filepaths, thumb_full_urls)

        # we save here because a missing thumbnail check is in the previous loop
        # we can also prepend previous results. These have downloaded thumbnails already...
        if params['get_next']:
            rdata['results'][0:0] = origdata['results']

        with open(json_filepath, 'w') as outfile:
            json.dump(rdata, outfile)

        killthreads_sml = []
        for k in thumb_sml_download_threads.keys():
            if k not in thumb_small_filepaths:
                killthreads_sml.append(k)  # do actual killing here?

        killthreads_full = []
        for k in thumb_full_download_threads.keys():
            if k not in thumb_full_filepaths:
                killthreads_full.append(k)  # do actual killing here?
        # TODO do the killing/ stopping here! remember threads might have finished inbetween!

        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return

        # this loop handles downloading of small thumbnails
        for imgpath, url in sml_thbs:
            if imgpath not in thumb_sml_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_sml_download_threads[imgpath] = thread
                # threads.append(thread)

                if len(thumb_sml_download_threads) > maxthreads:
                    while len(thumb_sml_download_threads) > maxthreads:
                        threads_copy = thumb_sml_download_threads.copy(
                        )  # because for loop can erase some of the items.
                        for tk, thread in threads_copy.items():
                            if not thread.is_alive():
                                thread.join()
                                # utils.p(x)
                                del (thumb_sml_download_threads[tk])
                                # utils.p('fetched thumbnail ', i)
                                i += 1
        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return
        idx = 0
        while len(thumb_sml_download_threads) > 0:
            threads_copy = thumb_sml_download_threads.copy(
            )  # because for loop can erase some of the items.
            for tk, thread in threads_copy.items():
                if not thread.is_alive():
                    thread.join()
                    del (thumb_sml_download_threads[tk])
                    i += 1

        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return

        # start downloading full thumbs in the end
        for imgpath, url in full_thbs:
            if imgpath not in thumb_full_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_full_download_threads[imgpath] = thread
        mt('thumbnails finished')
Exemple #11
0
 def run(self):
     r = rerequests.get(self.url, stream=False)
     if r.status_code == 200:
         with open(self.path, 'wb') as f:
             f.write(r.content)
    def run(self):
        maxthreads = 50
        query = self.query
        params = self.params
        global reports

        t = time.time()
        mt('search thread started')
        tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
        # json_filepath = os.path.join(tempdir, '%s_searchresult.json' % query['asset_type'])

        headers = utils.get_headers(params['api_key'])

        rdata = {}
        rdata['results'] = []

        if params['get_next']:
            urlquery = self.result['next']
        if not params['get_next']:
            urlquery = self.query_to_url()

        try:
            utils.p(urlquery)
            r = rerequests.get(urlquery,
                               headers=headers)  # , params = rparameters)
            # print(r.url)
            reports = ''
            # utils.p(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            reports = e
            # props.report = e
            return
        mt('response is back ')
        try:
            rdata = r.json()
        except Exception as inst:
            reports = r.text
            print(inst)

        mt('data parsed ')
        if not rdata.get('results'):
            utils.pprint(rdata)
            # if the result was converted to json and didn't return results,
            # it means it's a server error that has a clear message.
            # That's why it gets processed in the update timer, where it can be passed in messages to user.
            self.result = rdata
            return
        # print('number of results: ', len(rdata.get('results', [])))
        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return

        mt('search finished')
        i = 0

        thumb_small_urls = []
        thumb_small_filepaths = []
        thumb_full_urls = []
        thumb_full_filepaths = []
        # END OF PARSING
        for d in rdata.get('results', []):

            get_author(d)

            for f in d['files']:
                # TODO move validation of published assets to server, too manmy checks here.
                if f['fileType'] == 'thumbnail' and f[
                        'fileThumbnail'] != None and f[
                            'fileThumbnailLarge'] != None:
                    if f['fileThumbnail'] == None:
                        f['fileThumbnail'] = 'NONE'
                    if f['fileThumbnailLarge'] == None:
                        f['fileThumbnailLarge'] = 'NONE'

                    thumb_small_urls.append(f['fileThumbnail'])
                    thumb_full_urls.append(f['fileThumbnailLarge'])

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnail'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_small_filepaths.append(imgpath)

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnailLarge'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_full_filepaths.append(imgpath)

        sml_thbs = zip(thumb_small_filepaths, thumb_small_urls)
        full_thbs = zip(thumb_full_filepaths, thumb_full_urls)

        # we save here because a missing thumbnail check is in the previous loop
        # we can also prepend previous results. These have downloaded thumbnails already...
        if params['get_next']:
            rdata['results'][0:0] = self.result['results']
        self.result = rdata
        # with open(json_filepath, 'w') as outfile:
        #     json.dump(rdata, outfile)

        killthreads_sml = []
        for k in thumb_sml_download_threads.keys():
            if k not in thumb_small_filepaths:
                killthreads_sml.append(k)  # do actual killing here?

        killthreads_full = []
        for k in thumb_full_download_threads.keys():
            if k not in thumb_full_filepaths:
                killthreads_full.append(k)  # do actual killing here?
        # TODO do the killing/ stopping here! remember threads might have finished inbetween!

        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return

        # this loop handles downloading of small thumbnails
        for imgpath, url in sml_thbs:
            if imgpath not in thumb_sml_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_sml_download_threads[imgpath] = thread
                # threads.append(thread)

                if len(thumb_sml_download_threads) > maxthreads:
                    while len(thumb_sml_download_threads) > maxthreads:
                        threads_copy = thumb_sml_download_threads.copy(
                        )  # because for loop can erase some of the items.
                        for tk, thread in threads_copy.items():
                            if not thread.is_alive():
                                thread.join()
                                # utils.p(x)
                                del (thumb_sml_download_threads[tk])
                                # utils.p('fetched thumbnail ', i)
                                i += 1
        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return
        idx = 0
        while len(thumb_sml_download_threads) > 0:
            threads_copy = thumb_sml_download_threads.copy(
            )  # because for loop can erase some of the items.
            for tk, thread in threads_copy.items():
                if not thread.is_alive():
                    thread.join()
                    del (thumb_sml_download_threads[tk])
                    i += 1

        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return

        # start downloading full thumbs in the end
        for imgpath, url in full_thbs:
            if imgpath not in thumb_full_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_full_download_threads[imgpath] = thread
        mt('thumbnails finished')
def download_blenderkit_material(asset_ref):
    from blenderkit import paths, append_link, utils, version_checker, rerequests
    import requests

    def create_asset_data(rdata, asset_type):
        for r in rdata['results']:
            if r['assetType'] == asset_type and len(r['files']) > 0:
                furl = None
                tname = None
                allthumbs = []
                durl, tname = None, None
                for f in r['files']:
                    if f['fileType'] == 'thumbnail':
                        tname = paths.extract_filename_from_url(
                            f['fileThumbnailLarge'])
                        small_tname = paths.extract_filename_from_url(
                            f['fileThumbnail'])
                        allthumbs.append(
                            tname)  # TODO just first thumb is used now.
                    tdict = {}
                    for i, t in enumerate(allthumbs):
                        tdict['thumbnail_%i'] = t
                    if f['fileType'] == 'blend':
                        durl = f['downloadUrl'].split('?')[0]
                        # fname = paths.extract_filename_from_url(f['filePath'])
                if durl and tname:
                    tooltip = blenderkit.search.generate_tooltip(r)
                    r['author']['id'] = str(r['author']['id'])
                    asset_data = {
                        'thumbnail': tname,
                        'thumbnail_small': small_tname,
                        # 'thumbnails':allthumbs,
                        'download_url': durl,
                        'id': r['id'],
                        'asset_base_id': r['assetBaseId'],
                        'name': r['name'],
                        'asset_type': r['assetType'],
                        'tooltip': tooltip,
                        'tags': r['tags'],
                        'can_download': r.get('canDownload', True),
                        'verification_status': r['verificationStatus'],
                        'author_id': r['author']['id'],
                        # 'author': r['author']['firstName'] + ' ' + r['author']['lastName']
                        # 'description': r['description'],
                    }
                    asset_data['downloaded'] = 0
                    # parse extra params needed for blender here
                    params = utils.params_to_dict(r['parameters'])
                    if asset_type == 'model':
                        if params.get('boundBoxMinX') != None:
                            bbox = {
                                'bbox_min': (float(params['boundBoxMinX']),
                                             float(params['boundBoxMinY']),
                                             float(params['boundBoxMinZ'])),
                                'bbox_max': (float(params['boundBoxMaxX']),
                                             float(params['boundBoxMaxY']),
                                             float(params['boundBoxMaxZ']))
                            }
                        else:
                            bbox = {
                                'bbox_min': (-.5, -.5, 0),
                                'bbox_max': (.5, .5, 1)
                            }
                        asset_data.update(bbox)
                    if asset_type == 'material':
                        asset_data['texture_size_meters'] = params.get(
                            'textureSizeMeters', 1.0)
                    asset_data.update(tdict)
            r.update(asset_data)

    # main
    asset_base_id_str, asset_type_str = asset_ref.split()
    asset_type = asset_type_str.split(':')[1]
    scene_id = blenderkit.download.get_scene_id()
    reqstr = '?query=%s+%s+order:_score' % (asset_base_id_str, asset_type_str)
    reqstr += '&addon_version=%s' % version_checker.get_addon_version()
    reqstr += '&scene_uuid=%s' % scene_id
    url = paths.get_api_url() + 'search/' + reqstr
    api_key = user_preferences = C.preferences.addons[
        'blenderkit'].preferences.api_key
    headers = utils.get_headers(api_key)
    r = rerequests.get(url, headers=headers)
    rdata = r.json()
    create_asset_data(rdata, asset_type)
    asset_data = rdata['results'][0]
    has_url = blenderkit.download.get_download_url(asset_data, scene_id,
                                                   api_key)
    file_names = paths.get_download_filenames(asset_data)
    file_name = file_names[0]
    if not os.path.exists(file_name):
        with open(file_name, "wb") as f:
            print("Downloading %s" % file_name)
            response = requests.get(asset_data['url'], stream=True)
            total_length = response.headers.get('Content-Length')
            if total_length is None:  # no content length header
                f.write(response.content)
            else:
                dl = 0
                for data in response.iter_content(chunk_size=4096):
                    dl += len(data)
                    f.write(data)
    material = append_link.append_material(file_names[-1])
    return material