Esempio n. 1
0
def fetch_author(a_id, api_key):
    utils.p('fetch author')
    try:
        a_url = paths.get_api_url() + 'accounts/' + a_id + '/'
        headers = utils.get_headers(api_key)
        r = requests.get(a_url, headers=headers)
        if r.status_code == 200:
            adata = r.json()
            if not hasattr(adata, 'id'):
                utils.p(adata)
            # utils.p(adata)
            tasks_queue.add_task((write_author, (a_id, adata)))
            if adata.get('gravatarHash') is not None:
                gravatar_path = paths.get_temp_dir(
                    subdir=None) + adata['gravatarHash'] + '.jpg'
                url = "https://www.gravatar.com/avatar/" + adata[
                    'gravatarHash'] + '?d=404'
                r = requests.get(url, stream=False)
                if r.status_code == 200:
                    with open(gravatar_path, 'wb') as f:
                        f.write(r.content)
                    adata['gravatarImg'] = gravatar_path
    except Exception as e:
        utils.p(e)
    utils.p('finish fetch')
Esempio n. 2
0
def update_ratings_work_hours(self, context):
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    api_key = user_preferences.api_key
    headers = utils.get_headers(api_key)
    if not (hasattr(self, 'rating_work_hours')):
        # first option is for rating of assets that are from scene
        asset = self.id_data
        bkit_ratings = asset.bkit_ratings
        asset_id = asset['asset_data']['id']
    else:
        # this part is for operator rating:
        bkit_ratings = self
        asset_id = self.asset_id

    if bkit_ratings.rating_work_hours > 0.45:
        url = paths.get_api_url() + f'assets/{asset_id}/rating/'

        store_rating_local(asset_id,
                           type='working_hours',
                           value=bkit_ratings.rating_work_hours)

        ratings = [('working_hours', round(bkit_ratings.rating_work_hours, 1))]
        tasks_queue.add_task(
            (send_rating_to_thread_work_hours, (url, ratings, headers)),
            wait=2.5,
            only_last=True)
Esempio n. 3
0
    def execute(self, context):
        user_preferences = bpy.context.preferences.addons[
            'blenderkit'].preferences
        api_key = user_preferences.api_key
        headers = utils.get_headers(api_key)

        url = paths.get_api_url() + f'assets/{self.asset_id}/rating/'

        rtgs = []

        if self.rating_quality_ui == '':
            self.rating_quality = 0
        else:
            self.rating_quality = int(self.rating_quality_ui)

        if self.rating_quality > 0.1:
            rtgs = (('quality', self.rating_quality), )
            tasks_queue.add_task(
                (send_rating_to_thread_quality, (url, rtgs, headers)),
                wait=2.5,
                only_last=True)

        if self.rating_work_hours > 0.1:
            rtgs = (('working_hours', round(self.rating_work_hours, 1)), )
            tasks_queue.add_task(
                (send_rating_to_thread_work_hours, (url, rtgs, headers)),
                wait=2.5,
                only_last=True)
        return {'FINISHED'}
Esempio n. 4
0
def download_file(asset_data):
    # this is a simple non-threaded way to download files for background resolution genenration tool
    file_name = paths.get_download_filenames(asset_data)[
        0]  # prefer global dir if possible.

    if check_existing(asset_data):
        # this sends the thread for processing, where another check should occur, since the file might be corrupted.
        utils.p('not downloading, already in db')
        return file_name
    preferences = bpy.context.preferences.addons['blenderkit'].preferences
    api_key = preferences.api_key

    with open(file_name, "wb") as f:
        print("Downloading %s" % file_name)
        headers = utils.get_headers(api_key)

        response = requests.get(asset_data['url'], stream=True)
        total_length = response.headers.get('Content-Length')

        if total_length is None:  # no content length header
            f.write(response.content)
        else:
            dl = 0
            for data in response.iter_content(chunk_size=4096):
                dl += len(data)
                print(dl)
                f.write(data)
    return file_name
Esempio n. 5
0
def fetch_categories(API_key, force=False):
    url = paths.get_api_url() + 'categories/'

    headers = utils.get_headers(API_key)

    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')
    if os.path.exists(categories_filepath):
        catfile_age = time.time() - os.path.getmtime(categories_filepath)
    else:
        catfile_age = 10000000

    # global catfetch_counter
    # catfetch_counter += 1
    # bk_logger.debug('fetching categories: ', catfetch_counter)
    # bk_logger.debug('age of cat file', catfile_age)
    try:
        # read categories only once per day maximum, or when forced to do so.
        if catfile_age > 86400 or force:
            bk_logger.debug('requesting categories from server')
            r = rerequests.get(url, headers=headers)
            rdata = r.json()
            categories = rdata['results']
            fix_category_counts(categories)
            # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off.
            with open(categories_filepath, 'w', encoding='utf-8') as s:
                json.dump(categories, s, ensure_ascii=False, indent=4)
        tasks_queue.add_task((load_categories, ()))
    except Exception as e:
        bk_logger.debug('category fetching failed')
        bk_logger.exception(e)
        if not os.path.exists(categories_filepath):
            source_path = paths.get_addon_file(subpath='data' + os.sep +
                                               'categories.json')
            shutil.copy(source_path, categories_filepath)
Esempio n. 6
0
def get_assets_search():
    bpy.app.debug_value = 2

    results = []
    preferences = bpy.context.preferences.addons['blenderkit'].preferences
    url = paths.get_api_url() + 'search/all'
    i = 0
    while url is not None:
        headers = utils.get_headers(preferences.api_key)
        print('fetching assets from assets endpoint')
        print(url)
        retries = 0
        while retries < 3:
            r = rerequests.get(url, headers=headers)

            try:
                adata = r.json()
                url = adata.get('next')
                print(i)
                i += 1
            except Exception as e:
                print(e)
                print('failed to get next')
                if retries == 2:
                    url = None
            if adata.get('results') != None:
                results.extend(adata['results'])
                retries = 3
            print(f'fetched page {i}')
            retries += 1

    fpath = assets_db_path()
    with open(fpath, 'w') as s:
        json.dump(results, s)
    def run(self):
        '''try to download file from blenderkit'''
        asset_data = self.asset_data
        tcom = self.tcom
        scene_id = self.scene_id
        api_key = self.api_key

        # TODO get real link here...
        has_url = get_download_url(asset_data, scene_id, api_key, tcom=tcom)

        if not has_url:
            tasks_queue.add_task(
                (ui.add_report,
                 ('Failed to obtain download URL for %s.' % asset_data['name'],
                  5, colors.RED)))
            return
        if tcom.error:
            return
        # only now we can check if the file already exists. This should have 2 levels, for materials and for brushes
        # different than for the non free content. delete is here when called after failed append tries.
        if check_existing(asset_data) and not tcom.passargs.get('delete'):
            # this sends the thread for processing, where another check should occur, since the file might be corrupted.
            tcom.downloaded = 100
            utils.p('not downloading, trying to append again')
            return

        file_name = paths.get_download_filenames(asset_data)[
            0]  # prefer global dir if possible.
        # for k in asset_data:
        #    print(asset_data[k])
        if self.stopped():
            utils.p('stopping download: ' + asset_data['name'])
            return

        with open(file_name, "wb") as f:
            print("Downloading %s" % file_name)
            headers = utils.get_headers(api_key)

            response = requests.get(asset_data['url'], stream=True)
            total_length = response.headers.get('Content-Length')

            if total_length is None:  # no content length header
                f.write(response.content)
            else:
                tcom.file_size = int(total_length)
                dl = 0
                totdata = []
                for data in response.iter_content(
                        chunk_size=4096 *
                        32):  #crashed here... why? investigate:
                    dl += len(data)
                    tcom.downloaded = dl
                    tcom.progress = int(100 * tcom.downloaded / tcom.file_size)
                    f.write(data)
                    if self.stopped():
                        utils.p('stopping download: ' + asset_data['name'])
                        os.remove(file_name)
                        return
Esempio n. 8
0
def rerequest(method, url, recursion=0, **kwargs):
    # first get any additional args from kwargs
    immediate = False
    if kwargs.get('immediate'):
        immediate = kwargs['immediate']
        kwargs.pop('immediate')
    # first normal attempt
    try:
        response = requests.request(method, url, **kwargs)
    except Exception as e:
        print(e)
        return None

    bk_logger.debug(url + str(kwargs))
    bk_logger.debug(response.status_code)

    if response.status_code == 401:
        try:
            rdata = response.json()
        except:
            rdata = {}

        tasks_queue.add_task((ui.add_report, (method + ' request Failed.' + str(rdata.get('detail')),)))

        if rdata.get('detail') == 'Invalid token.':
            user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
            if user_preferences.api_key != '':
                if user_preferences.enable_oauth and user_preferences.api_key_refresh != '':
                    tasks_queue.add_task((ui.add_report, (
                        'refreshing token. If this fails, please login in BlenderKit Login panel.', 10)))
                    refresh_url = paths.get_bkit_url()
                    auth_token, refresh_token, oauth_response = bkit_oauth.refresh_token(
                        user_preferences.api_key_refresh, refresh_url)

                    # bk_logger.debug(auth_token, refresh_token)
                    if auth_token is not None:
                        if immediate == True:
                            # this can write tokens occasionally into prefs. used e.g. in upload. Only possible
                            #  in non-threaded tasks
                            bpy.context.preferences.addons['blenderkit'].preferences.api_key = auth_token
                            bpy.context.preferences.addons['blenderkit'].preferences.api_key_refresh = refresh_token
                        else:
                            tasks_queue.add_task((bkit_oauth.write_tokens, (auth_token, refresh_token, oauth_response)))

                        kwargs['headers'] = utils.get_headers(auth_token)
                        response = requests.request(method, url, **kwargs)
                        bk_logger.debug('reresult', response.status_code)
                        if response.status_code >= 400:
                            bk_logger.debug('reresult', response.text)
                            tasks_queue.add_task((ui.add_report, (
                                response.text, 10)))

                    else:
                        tasks_queue.add_task((ui.add_report, (
                            'Refreshing token failed.Please login manually.', 10)))
                        # tasks_queue.add_task((bkit_oauth.write_tokens, ('', '', '')))
                        tasks_queue.add_task((bpy.ops.wm.blenderkit_login, ('INVOKE_DEFAULT',)), fake_context=True)
    return response
Esempio n. 9
0
def request_profile(api_key):
    a_url = paths.get_api_url() + 'me/'
    headers = utils.get_headers(api_key)
    r = requests.get(a_url, headers=headers)
    adata = r.json()
    if adata.get('user') is None:
        utils.p(adata)
        utils.p('getting profile failed')
        return None
    return adata
Esempio n. 10
0
def patch_individual_metadata(asset_id, metadata_dict, api_key):
    upload_data = metadata_dict
    url = paths.get_api_url() + 'assets/' + str(asset_id) + '/'
    headers = utils.get_headers(api_key)
    try:
        r = rerequests.patch(url, json=upload_data, headers=headers, verify=True)  # files = files,
    except requests.exceptions.RequestException as e:
        print(e)
        return {'CANCELLED'}
    return {'FINISHED'}
Esempio n. 11
0
def get_rating(asset_id):
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    api_key = user_preferences.api_key
    headers = utils.get_headers(api_key)
    rl = paths.get_api_url(
    ) + 'assets/' + asset['asset_data']['id'] + '/rating/'
    rtypes = ['quality', 'working_hours']
    for rt in rtypes:
        params = {'rating_type': rt}
        r = rerequests.get(r1, params=data, verify=True, headers=headers)
        print(r.text)
Esempio n. 12
0
def upload_file(upload_data, f):
    headers = utils.get_headers(upload_data['token'])
    version_id = upload_data['id']
    bg_blender.progress(
        f"uploading {f['type']} {os.path.basename(f['file_path'])}")
    upload_info = {
        'assetId': version_id,
        'fileType': f['type'],
        'fileIndex': f['index'],
        'originalFilename': os.path.basename(f['file_path'])
    }
    upload_create_url = paths.get_api_url() + 'uploads/'
    upload = rerequests.post(upload_create_url,
                             json=upload_info,
                             headers=headers,
                             verify=True)
    upload = upload.json()
    #
    chunk_size = 1024 * 1024 * 2
    # utils.pprint(upload)
    # file gets uploaded here:
    uploaded = False
    # s3 upload is now the only option
    for a in range(0, 5):
        if not uploaded:
            try:
                upload_response = requests.put(upload['s3UploadUrl'],
                                               data=upload_in_chunks(
                                                   f['file_path'], chunk_size,
                                                   f['type']),
                                               stream=True,
                                               verify=True)

                if 250 > upload_response.status_code > 199:
                    uploaded = True
                else:
                    print(upload_response.text)
                    bg_blender.progress(f'Upload failed, retry. {a}')
            except Exception as e:
                print(e)
                bg_blender.progress('Upload %s failed, retrying' % f['type'])
                time.sleep(1)

            # confirm single file upload to bkit server
            print(upload)
            upload_done_url = paths.get_api_url(
            ) + 'uploads_s3/' + upload['id'] + '/upload-file/'
            upload_response = rerequests.post(upload_done_url,
                                              headers=headers,
                                              verify=True)

    bg_blender.progress('finished uploading')

    return uploaded
Esempio n. 13
0
def verification_status_change_thread(asset_id, state, api_key):
    upload_data = {
        "verificationStatus": state
    }
    url = paths.get_api_url() + 'assets/' + str(asset_id) + '/'
    headers = utils.get_headers(api_key)
    try:
        r = rerequests.patch(url, json=upload_data, headers=headers, verify=True)  # files = files,
    except requests.exceptions.RequestException as e:
        print(e)
        return {'CANCELLED'}
    return {'FINISHED'}
Esempio n. 14
0
def fetch_author(a_id, api_key):
    utils.p('fetch author')
    try:
        a_url = paths.get_api_url() + 'accounts/' + a_id + '/'
        headers = utils.get_headers(api_key)
        r = requests.get(a_url, headers=headers)
        adata = r.json()
        if not hasattr(adata, 'id'):
            utils.p(adata)
        # utils.p(adata)
        tasks_queue.add_task((write_author, (a_id, adata)))
    except Exception as e:
        utils.p(e)
    utils.p('finish fetch')
Esempio n. 15
0
def rerequest(method, url, **kwargs):
    # first get any additional args from kwargs
    immediate = False
    if kwargs.get('immediate'):
        immediate = kwargs['immediate']
        kwargs.pop('immediate')
    # first normal attempt
    response = requests.request(method, url, **kwargs)

    utils.p(url)
    utils.p(response.status_code)

    if response.status_code == 401:
        try:
            rdata = response.json()
        except:
            rdata = {}

        tasks_queue.add_task(
            (ui.add_report,
             (method + ' request Failed.' + str(rdata.get('detail')), )))

        if rdata.get('detail') == 'Invalid token.':
            user_preferences = bpy.context.preferences.addons[
                'blenderkit'].preferences
            if user_preferences.api_key != '':
                if user_preferences.enable_oauth and user_preferences.api_key_refresh != '':
                    tasks_queue.add_task((ui.add_report, (
                        'refreshing token. If this fails, please login in BlenderKit Login panel.',
                        10)))
                    refresh_url = paths.get_bkit_url()
                    auth_token, refresh_token, oauth_response = bkit_oauth.refresh_token(
                        user_preferences.api_key_refresh, refresh_url)

                    # utils.p(auth_token, refresh_token)
                    if auth_token is not None:
                        if immediate == True:
                            # this can write tokens occasionally into prefs. used e.g. in upload. Only possible
                            #  in non-threaded tasks
                            bpy.context.preferences.addons[
                                'blenderkit'].preferences.api_key = auth_token
                            bpy.context.preferences.addons[
                                'blenderkit'].preferences.api_key_refresh = refresh_token

                        kwargs['headers'] = utils.get_headers(auth_token)
                        response = requests.request(method, url, **kwargs)
                        utils.p('reresult', response.status_code)
                        if response.status_code >= 400:
                            utils.p('reresult', response.text)
    return response
Esempio n. 16
0
def upload_files(filepath, upload_data, files):
    headers = utils.get_headers(upload_data['token'])

    version_id = upload_data['id']
    for f in files:
        bg_blender.progress('uploading %s' % f['type'])
        upload_info = {
            'assetId': version_id,
            'fileType': f['type'],
            'fileIndex': f['index'],
            'originalFilename': os.path.basename(f['file_path'])
        }
        upload_create_url = paths.get_bkit_url() + 'uploads/'
        upload = requests.post(upload_create_url, json=upload_info, headers=headers, verify=True)
        upload = upload.json()

        # upheaders = {
        #     "accept": "application/json",
        #     "Authorization": "Bearer %s" % upload_data['token'],
        #     "Content-Type": "multipart/form-data",
        #     "Content-Disposition": 'form-data; name="file"; filename=%s' % f['file_path']
        #
        # }
        chunk_size = 1024 * 256

        # file gets uploaded here:
        uploaded = False
        # s3 upload is now the only option
        for a in range(0, 20):
            if not uploaded:
                try:
                    upload_response = requests.put(upload['s3UploadUrl'],
                                                   data=upload_in_chunks(f['file_path'], chunk_size, f['type']),
                                                   stream=True, verify=True)

                    if upload_response.status_code == 200:
                        uploaded = True
                    else:
                        bg_blender.progress('Upload failed, retry.')
                except Exception as e:
                    bg_blender.progress('Upload %s failed, retrying' % f['type'])
                    time.sleep(1)

                # confirm single file upload to bkit server
                upload_done_url = paths.get_bkit_url() + 'uploads_s3/' + upload['id'] + '/upload-file/'
                upload_response = requests.post(upload_done_url, headers=headers, verify=True)

        bg_blender.progress('finished uploading')

    return {'FINISHED'}
Esempio n. 17
0
def verification_status_change(self, context, asset_id, state):
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    upload_data = {"verificationStatus": state}
    url = paths.get_api_url() + 'assets/' + str(asset_id) + '/'
    headers = utils.get_headers(user_preferences.api_key)
    try:
        r = requests.patch(url, json=upload_data, headers=headers,
                           verify=True)  # files = files,
        #print('changed status ')
        #print(r.text)
    except requests.exceptions.RequestException as e:
        print(e)
        return {'CANCELLED'}
    return {'FINISHED'}
Esempio n. 18
0
def update_ratings_work_hours(self, context):
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    api_key = user_preferences.api_key
    headers = utils.get_headers(api_key)
    asset = self.id_data
    bkit_ratings = asset.bkit_ratings
    url = paths.get_api_url(
    ) + 'assets/' + asset['asset_data']['id'] + '/rating/'

    if bkit_ratings.rating_quality > 0.1:
        ratings = [('working_hours', round(bkit_ratings.rating_work_hours, 1))]
        tasks_queue.add_task(
            (send_rating_to_thread_work_hours, (url, ratings, headers)),
            wait=1,
            only_last=True)
Esempio n. 19
0
def upload_rating(asset):
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    api_key = user_preferences.api_key
    headers = utils.get_headers(api_key)

    bkit_ratings = asset.bkit_ratings
    # print('rating asset', asset_data['name'], asset_data['assetBaseId'])
    url = paths.get_api_url(
    ) + 'assets/' + asset['asset_data']['id'] + '/rating/'

    ratings = []

    if bkit_ratings.rating_quality > 0.1:
        ratings = (('quality', bkit_ratings.rating_quality), )
        tasks_queue.add_task(
            (send_rating_to_thread_quality, (url, ratings, headers)),
            wait=2.5,
            only_last=True)
    if bkit_ratings.rating_work_hours > 0.1:
        ratings = (('working_hours', round(bkit_ratings.rating_work_hours,
                                           1)), )
        tasks_queue.add_task(
            (send_rating_to_thread_work_hours, (url, ratings, headers)),
            wait=2.5,
            only_last=True)

    thread = threading.Thread(target=upload_rating_thread,
                              args=(url, ratings, headers))
    thread.start()

    url = paths.get_api_url(
    ) + 'assets/' + asset['asset_data']['id'] + '/review'

    reviews = {
        'reviewText': bkit_ratings.rating_compliments,
        'reviewTextProblems': bkit_ratings.rating_problems,
    }
    if not (bkit_ratings.rating_compliments == ''
            and bkit_ratings.rating_compliments == ''):
        thread = threading.Thread(target=upload_review_thread,
                                  args=(url, reviews, headers))
        thread.start()

    # the info that the user rated an item is stored in the scene
    s = bpy.context.scene
    s['assets rated'] = s.get('assets rated', {})
    if bkit_ratings.rating_quality > 0.1 and bkit_ratings.rating_work_hours > 0.1:
        s['assets rated'][asset['asset_data']['assetBaseId']] = True
Esempio n. 20
0
def update_ratings_work_hours(self, context):
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    api_key = user_preferences.api_key
    headers = utils.get_headers(api_key)
    asset = self.id_data
    if asset:
        bkit_ratings = asset.bkit_ratings
        url = paths.get_api_url() + 'assets/' + asset['asset_data']['id'] + '/rating/'
    else:
        # this part is for operator rating:
        bkit_ratings = self
        url = paths.get_api_url() + f'assets/{self.asset_id}/rating/'

    if bkit_ratings.rating_work_hours > 0.45:
        ratings = [('working_hours', round(bkit_ratings.rating_work_hours, 1))]
        tasks_queue.add_task((send_rating_to_thread_work_hours, (url, ratings, headers)), wait=2.5, only_last=True)
Esempio n. 21
0
def patch_asset_empty(asset_id, api_key):
    '''
        This function patches the asset for the purpose of it getting a reindex.
        Should be removed once this is fixed on the server and
        the server is able to reindex after uploads of resolutions
        Returns
        -------
    '''
    upload_data = {
    }
    url = paths.get_api_url() + 'assets/' + str(asset_id) + '/'
    headers = utils.get_headers(api_key)
    try:
        r = rerequests.patch(url, json=upload_data, headers=headers, verify=True)  # files = files,
    except requests.exceptions.RequestException as e:
        print(e)
        return {'CANCELLED'}
    return {'FINISHED'}
Esempio n. 22
0
def get_download_url(asset_data, scene_id, api_key, tcom=None):
    ''''retrieves the download url. The server checks if user can download the item.'''
    mt = time.time()

    headers = utils.get_headers(api_key)

    data = {'scene_uuid': scene_id}
    r = None
    try:
        r = requests.get(asset_data['download_url'],
                         params=data,
                         headers=headers)
    except Exception as e:
        print(e)
        if tcom is not None:
            tcom.error = True

    if r == None:
        tcom.report = 'Connection Error'
        tcom.error = True
        return 'Connection Error'

    if r.status_code < 400:
        data = r.json()
        url = data['filePath']
        asset_data['url'] = url
        asset_data['file_name'] = paths.extract_filename_from_url(url)
        return True

    if r.status_code == 403:
        r = 'You need Standard plan to get this item.'
        tcom.report = r
        r1 = 'All materials and brushes are aviable for free. Only users registered to Standart plan can use all models.'
        tasks_queue.add_task((ui.add_report, (r1, 5, colors.RED)))
        tcom.error = True

    if r.status_code == 401:
        tcom.report = 'Invalid API key'
        tcom.error = True
        return 'Invalid API key'
    elif r.status_code >= 500:
        tcom.report = 'Server error'
        tcom.error = True
    return False
Esempio n. 23
0
    def execute(self, context):
        user_preferences = bpy.context.preferences.addons[
            'blenderkit'].preferences
        api_key = user_preferences.api_key
        headers = utils.get_headers(api_key)

        url = paths.get_api_url() + f'assets/{self.asset_id}/rating/'

        rtgs = []

        self.rating_quality = int(self.rating_quality_ui)

        if self.rating_quality > 0.1:
            rtgs.append(('quality', self.rating_quality))
        if self.rating_work_hours > 0.1:
            rtgs.append(('working_hours', round(self.rating_work_hours, 1)))

        thread = threading.Thread(target=upload_rating_thread,
                                  args=(url, rtgs, headers))
        thread.start()
        return {'FINISHED'}
Esempio n. 24
0
def get_download_url(asset_data, scene_id, api_key, tcom=None):
    ''''retrieves the download url. The server checks if user can download the item.'''
    mt = time.time()

    headers = utils.get_headers(api_key)

    data = {'scene_uuid': scene_id}
    r = None
    try:
        r = requests.get(asset_data['download_url'],
                         params=data,
                         headers=headers)
    except Exception as e:
        print(e)
        if tcom is not None:
            tcom.error = True

    if r == None:
        tcom.report = 'Connection Error'
        tcom.error = True
        return 'Connection Error'
    if r.status_code < 400:
        data = r.json()
        url = data['filePath']
        asset_data['url'] = url
        asset_data['file_name'] = paths.extract_filename_from_url(url)
        return True

    if r.status_code == 403:
        tcom.report = 'Available only in higher plans.'
        tcom.error = True
        return 'Available only in higher plans.'
    if r.status_code == 401:
        tcom.report = 'Invalid API key'
        tcom.error = True
        return 'Invalid API key'
    elif r.status_code >= 500:
        tcom.report = 'Server error'
        tcom.error = True
Esempio n. 25
0
def mark_for_validation(self, context, asset_type):
    props = utils.get_upload_props()
    props.upload_state = 'marking for validation'
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences

    upload_data = {"verificationStatus": "ready"}

    url = paths.get_api_url() + 'assets/'

    headers = utils.get_headers(user_preferences.api_key)

    url += props.id + '/'

    try:
        r = requests.patch(url, json=upload_data, headers=headers,
                           verify=True)  # files = files,
        props.upload_state = 'marked for validation'
    except requests.exceptions.RequestException as e:
        props.upload_state = str(e)
        props.uploading = False
        return {'CANCELLED'}
    return {'FINISHED'}
Esempio n. 26
0
def fetch_categories(API_key):
    url = paths.get_api_url() + 'categories/'

    headers = utils.get_headers(API_key)

    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')

    try:
        r = rerequests.get(url, headers=headers)
        rdata = r.json()
        categories = rdata['results']
        fix_category_counts(categories)
        # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off.
        with open(categories_filepath, 'w') as s:
            json.dump(categories, s, indent=4)
        tasks_queue.add_task((load_categories, ()))
    except Exception as e:
        utils.p('category fetching failed')
        utils.p(e)
        if not os.path.exists(categories_filepath):
            source_path = paths.get_addon_file(subpath='data' + os.sep + 'categories.json')
            shutil.copy(source_path, categories_filepath)
Esempio n. 27
0
def fetch_categories(API_key):
    BLENDERKIT_API_MAIN = "https://www.blenderkit.com/api/v1/"

    url = paths.get_bkit_url() + 'categories/'

    headers = utils.get_headers(API_key)

    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')

    try:
        r = requests.get(url, headers=headers)
        rdata = r.json()
        categories = rdata['results']
        fix_category_counts(categories)
        # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off.
        with open(categories_filepath, 'w') as s:
            json.dump(categories, s, indent=4)
    except:
        # print('category fetching failed')
        if not os.path.exists(categories_filepath):
            source_path = paths.get_addon_file(subpath='data' + os.sep +
                                               'categories.json')
            shutil.copy(source_path, categories_filepath)
Esempio n. 28
0
def report_usages():
    '''report the usage of assets to the server.'''
    mt = time.time()
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    api_key = user_preferences.api_key
    sid = get_scene_id()
    headers = utils.get_headers(api_key)
    url = paths.get_api_url() + paths.BLENDERKIT_REPORT_URL

    assets = {}
    asset_obs = []
    scene = bpy.context.scene
    asset_usages = {}

    for ob in scene.collection.objects:
        if ob.get('asset_data') != None:
            asset_obs.append(ob)

    for ob in asset_obs:
        asset_data = ob['asset_data']
        abid = asset_data['asset_base_id']

        if assets.get(abid) is None:
            asset_usages[abid] = {'count': 1}
            assets[abid] = asset_data
        else:
            asset_usages[abid]['count'] += 1

    # brushes
    for b in bpy.data.brushes:
        if b.get('asset_data') != None:
            abid = b['asset_data']['asset_base_id']
            asset_usages[abid] = {'count': 1}
            assets[abid] = b['asset_data']
    # materials
    for ob in scene.collection.objects:
        for ms in ob.material_slots:
            m = ms.material

            if m is not None and m.get('asset_data') is not None:

                abid = m['asset_data']['asset_base_id']
                if assets.get(abid) is None:
                    asset_usages[abid] = {'count': 1}
                    assets[abid] = m['asset_data']
                else:
                    asset_usages[abid]['count'] += 1

    assets_list = []
    assets_reported = scene.get('assets reported', {})

    new_assets_count = 0
    for k in asset_usages.keys():
        if k not in assets_reported.keys():
            data = asset_usages[k]
            list_item = {
                'asset': k,
                'usageCount': data['count'],
                'proximitySet': data.get('proximity', [])
            }
            assets_list.append(list_item)
            new_assets_count += 1
        if k not in assets_reported.keys():
            assets_reported[k] = True

    scene['assets reported'] = assets_reported

    if new_assets_count == 0:
        utils.p('no new assets were added')
        return
    usage_report = {
        'scene': sid,
        'reportType': 'save',
        'assetusageSet': assets_list
    }

    au = scene.get('assets used', {})
    ad = scene.get('assets deleted', {})

    ak = assets.keys()
    for k in au.keys():
        if k not in ak:
            ad[k] = au[k]
        else:
            if k in ad:
                ad.pop(k)

    # scene['assets used'] = {}
    for k in ak:  # rewrite assets used.
        scene['assets used'][k] = assets[k]

    ###########check ratings herer too:
    scene['assets rated'] = scene.get('assets rated', {})
    for k in assets.keys():
        scene['assets rated'][k] = scene['assets rated'].get(k, False)
    thread = threading.Thread(target=utils.requests_post_thread,
                              args=(url, usage_report, headers))
    thread.start()
    mt = time.time() - mt
    print('report generation:                ', mt)
Esempio n. 29
0
def start_upload(self, context, asset_type, as_new, metadata_only):
    '''start upload process, by processing data'''
    props = utils.get_upload_props()
    storage_quota_ok = check_storage_quota(props)
    if not storage_quota_ok:
        self.report({'ERROR_INVALID_INPUT'}, props.report)
        return {'CANCELLED'}

    location = get_upload_location(props)
    props.upload_state = 'preparing upload'

    auto_fix(asset_type = asset_type)

    # do this for fixing long tags in some upload cases
    props.tags = props.tags[:]


    props.name = props.name.strip()
    # TODO  move this to separate function
    # check for missing metadata
    if asset_type == 'MODEL':
        get_missing_data_model(props)
    if asset_type == 'SCENE':
        get_missing_data_scene(props)
    elif asset_type == 'MATERIAL':
        get_missing_data_material(props)
    elif asset_type == 'BRUSH':
        get_missing_data_brush(props)

    if props.report != '':
        self.report({'ERROR_INVALID_INPUT'}, props.report)
        return {'CANCELLED'}

    if as_new:
        props.asset_base_id = ''
        props.id = ''
    export_data, upload_data, eval_path_computing, eval_path_state, eval_path, props = get_upload_data(self, context,
                                                                                                       asset_type)
    # utils.pprint(upload_data)
    upload_data['parameters'] = params_to_dict(
        upload_data['parameters'])  # weird array conversion only for upload, not for tooltips.

    binary_path = bpy.app.binary_path
    script_path = os.path.dirname(os.path.realpath(__file__))
    basename, ext = os.path.splitext(bpy.data.filepath)
    # if not basename:
    #     basename = os.path.join(basename, "temp")
    if not ext:
        ext = ".blend"
    tempdir = tempfile.mkdtemp()
    source_filepath = os.path.join(tempdir, "export_blenderkit" + ext)
    clean_file_path = paths.get_clean_filepath()
    data = {
        'clean_file_path': clean_file_path,
        'source_filepath': source_filepath,
        'temp_dir': tempdir,
        'export_data': export_data,
        'upload_data': upload_data,
        'debug_value': bpy.app.debug_value,
    }
    datafile = os.path.join(tempdir, BLENDERKIT_EXPORT_DATA_FILE)

    # check if thumbnail exists:

    if not os.path.exists(export_data["thumbnail_path"]):
        props.upload_state = 'Thumbnail not found'
        props.uploading = False
        return {'CANCELLED'}

    # first upload metadata to server, so it can be saved inside the current file
    url = paths.get_api_url() + 'assets/'

    headers = utils.get_headers(upload_data['token'])

    # upload_data['license'] = 'ovejajojo'
    json_metadata = upload_data  # json.dumps(upload_data, ensure_ascii=False).encode('utf8')
    global reports
    if props.asset_base_id == '':
        try:
            r = requests.post(url, json=json_metadata, headers=headers, verify=True)  # files = files,
            props.upload_state = 'uploaded metadata'
            utils.p(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            props.upload_state = str(e)
            props.uploading = False
            return {'CANCELLED'}

    else:
        url += props.id + '/'
        try:
            if not metadata_only:
                json_metadata["verificationStatus"] = "uploading"
            r = requests.put(url, json=json_metadata, headers=headers, verify=True)  # files = files,
            props.upload_state = 'uploaded metadata'
            # parse the reqest
            # print('uploaded metadata')
            # print(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            props.upload_state = str(e)
            props.uploading = False
            return {'CANCELLED'}

    # props.upload_state = 'step 1'
    if metadata_only:
        props.uploading = False

        return {'FINISHED'}
    try:
        rj = r.json()
        if props.asset_base_id == '':
            props.asset_base_id = rj['assetBaseId']
            props.id = rj['id']

        upload_data['assetBaseId'] = props.asset_base_id
        upload_data['id'] = props.id

        bpy.ops.wm.save_mainfile()
        # fa
        props.uploading = True
        # save a copy of actual scene but don't interfere with the users models
        bpy.ops.wm.save_as_mainfile(filepath=source_filepath, compress=False, copy=True)

        with open(datafile, 'w') as s:
            json.dump(data, s)

        proc = subprocess.Popen([
            binary_path,
            "--background",
            "-noaudio",
            clean_file_path,
            "--python", os.path.join(script_path, "upload_bg.py"),
            "--", datafile  # ,filepath, tempdir
        ], bufsize=5000, stdout=subprocess.PIPE, stdin=subprocess.PIPE)

        bg_blender.add_bg_process(eval_path_computing=eval_path_computing, eval_path_state=eval_path_state,
                                  eval_path=eval_path, process_type='UPLOAD', process=proc, location=location)

    except Exception as e:
        props.upload_state = str(e)
        props.uploading = False
        return {'CANCELLED'}

    return {'FINISHED'}
Esempio n. 30
0
    def run(self):
        maxthreads = 300
        maximages = 50
        query = self.query
        params = self.params
        global reports

        t = time.time()
        mt('search thread started')
        tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
        json_filepath = os.path.join(
            tempdir, '%s_searchresult.json' % query['asset_type'])

        headers = utils.get_headers(params['api_key'])

        rdata = {}
        rdata['results'] = []

        if params['get_next']:
            with open(json_filepath, 'r') as infile:
                try:
                    origdata = json.load(infile)
                    urlquery = origdata['next']
                    if urlquery == None:
                        return
                except:
                    # in case no search results found on drive we don't do next page loading.
                    params['get_next'] = False
        if not params['get_next']:
            # build a new request
            url = paths.get_api_url() + 'search/'

            # build request manually
            # TODO use real queries
            requeststring = '?query=' + query['keywords'].lower() + '+'
            #
            for i, q in enumerate(query):
                requeststring += q + ':' + str(query[q]).lower()
                if i < len(query) - 1:
                    requeststring += '+'

            requeststring += '&addon_version=%s' % params['addon_version']
            if params.get('scene_uuid') is not None:
                requeststring += '&scene_uuid=%s' % params['scene_uuid']

            urlquery = url + requeststring

        try:
            utils.p(urlquery)
            r = requests.get(urlquery, headers=headers)
            reports = ''
            # utils.p(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            reports = e
            # props.report = e
            return
        mt('response is back ')
        try:
            rdata = r.json()
        except Exception as inst:
            reports = r.text
            print(inst)

        mt('data parsed ')

        # filter results here:
        # todo remove this in future
        nresults = []
        for d in rdata.get('results', []):
            # TODO this code is for filtering brush types, should vanish after we implement filter in Elastic
            mode = None
            if query['asset_type'] == 'brush':
                for p in d['parameters']:
                    if p['parameterType'] == 'mode':
                        mode = p['value']
            if query['asset_type'] != 'brush' or (
                    query.get('brushType') != None
                    and query['brushType']) == mode:
                nresults.append(d)
        rdata['results'] = nresults

        # print('number of results: ', len(rdata.get('results', [])))
        if self.stopped():
            utils.p('stopping search : ' + query['keywords'])
            return

        mt('search finished')
        i = 0

        thumb_small_urls = []
        thumb_small_filepaths = []
        thumb_full_urls = []
        thumb_full_filepaths = []
        # END OF PARSING
        getting_authors = {}
        for d in rdata.get('results', []):
            if getting_authors.get(d['author']['id']) is None:
                get_author(d)
                getting_authors[d['author']['id']] = True

            for f in d['files']:
                # TODO move validation of published assets to server, too manmy checks here.
                if f['fileType'] == 'thumbnail' and f[
                        'fileThumbnail'] != None and f[
                            'fileThumbnailLarge'] != None:
                    if f['fileThumbnail'] == None:
                        f['fileThumbnail'] = 'NONE'
                    if f['fileThumbnailLarge'] == None:
                        f['fileThumbnailLarge'] = 'NONE'

                    thumb_small_urls.append(f['fileThumbnail'])
                    thumb_full_urls.append(f['fileThumbnailLarge'])

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnail'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_small_filepaths.append(imgpath)

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnailLarge'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_full_filepaths.append(imgpath)

        sml_thbs = zip(thumb_small_filepaths, thumb_small_urls)
        full_thbs = zip(thumb_full_filepaths, thumb_full_urls)

        # we save here because a missing thumbnail check is in the previous loop
        # we can also prepend previous results. These have allready thumbnails downloaded...
        if params['get_next']:
            rdata['results'][0:0] = origdata['results']

        with open(json_filepath, 'w') as outfile:
            json.dump(rdata, outfile)

        killthreads_sml = []
        for k in thumb_sml_download_threads.keys():
            if k not in thumb_small_filepaths:
                killthreads_sml.append(k)  # do actual killing here?

        killthreads_full = []
        for k in thumb_full_download_threads.keys():
            if k not in thumb_full_filepaths:
                killthreads_full.append(k)  # do actual killing here?
        # TODO do the killing/ stopping here! remember threads might have finished inbetween!

        if self.stopped():
            utils.p('stopping search : ' + query['keywords'])
            return

        # this loop handles downloading of small thumbnails
        for imgpath, url in sml_thbs:
            if imgpath not in thumb_sml_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_sml_download_threads[imgpath] = thread
                # threads.append(thread)

                if len(thumb_sml_download_threads) > maxthreads:
                    while len(thumb_sml_download_threads) > maxthreads:
                        threads_copy = thumb_sml_download_threads.copy(
                        )  # because for loop can erase some of the items.
                        for tk, thread in threads_copy.items():
                            if not thread.is_alive():
                                thread.join()
                                # utils.p(x)
                                del (thumb_sml_download_threads[tk])
                                # utils.p('fetched thumbnail ', i)
                                i += 1
        if self.stopped():
            utils.p('stopping search : ' + query['keywords'])
            return
        idx = 0
        while len(thumb_sml_download_threads) > 0:
            threads_copy = thumb_sml_download_threads.copy(
            )  # because for loop can erase some of the items.
            for tk, thread in threads_copy.items():
                if not thread.is_alive():
                    thread.join()
                    del (thumb_sml_download_threads[tk])
                    i += 1

        if self.stopped():
            utils.p('stopping search : ' + query['keywords'])
            return

        # start downloading full thumbs in the end
        for imgpath, url in full_thbs:
            if imgpath not in thumb_full_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_full_download_threads[imgpath] = thread
        mt('thumbnails finished')