def send_message(self, message): message = str(message).replace("'", "") # this adds a UI report but also writes above the upload panel fields. tasks_queue.add_task((ui.add_report, (message, ))) estring = f"{self.export_data['eval_path_state']} = '{message}'" tasks_queue.add_task((exec, (estring, )))
def get_temp_dir(subdir=None): user_preferences = bpy.context.preferences.addons['blenderkit'].preferences # tempdir = user_preferences.temp_dir tempdir = os.path.join(tempfile.gettempdir(), 'bkit_temp') if tempdir.startswith('//'): tempdir = bpy.path.abspath(tempdir) try: if not os.path.exists(tempdir): os.makedirs(tempdir) if subdir is not None: tempdir = os.path.join(tempdir, subdir) if not os.path.exists(tempdir): os.makedirs(tempdir) cleanup_old_folders() except: tasks_queue.add_task( (ui.add_report, ('Cache directory not found. Resetting Cache folder path.', ))) p = default_global_dict() if p == user_preferences.global_dir: message = 'Global dir was already default, plese set a global directory in addon preferences to a dir where you have write permissions.' tasks_queue.add_task((ui.add_report, (message, ))) return None user_preferences.global_dir = p tempdir = get_temp_dir(subdir=subdir) return tempdir
def get_rating(asset_id, headers): ''' Retrieve ratings from BlenderKit server. Can be run from a thread Parameters ---------- asset_id headers Returns ------- ratings - dict of type:value ratings ''' url = paths.get_api_url() + 'assets/' + asset_id + '/rating/' params = {} r = rerequests.get(url, params=params, verify=True, headers=headers) if r is None: return if r.status_code == 200: rj = r.json() ratings = {} # store ratings - send them to task queue for r in rj['results']: ratings[r['ratingType']] = r['score'] tasks_queue.add_task( (store_rating_local, (asset_id, r['ratingType'], r['score']))) # store_rating_local(asset_id, type = r['ratingType'], value = r['score']) if len(rj['results']) == 0: # store empty ratings too, so that server isn't checked repeatedly tasks_queue.add_task((store_rating_local_empty, (asset_id, )))
def update_ratings_work_hours(self, context): user_preferences = bpy.context.preferences.addons['blenderkit'].preferences api_key = user_preferences.api_key headers = utils.get_headers(api_key) if not (hasattr(self, 'rating_work_hours')): # first option is for rating of assets that are from scene asset = self.id_data bkit_ratings = asset.bkit_ratings asset_id = asset['asset_data']['id'] else: # this part is for operator rating: bkit_ratings = self asset_id = self.asset_id if bkit_ratings.rating_work_hours > 0.45: url = paths.get_api_url() + f'assets/{asset_id}/rating/' store_rating_local(asset_id, type='working_hours', value=bkit_ratings.rating_work_hours) ratings = [('working_hours', round(bkit_ratings.rating_work_hours, 1))] tasks_queue.add_task( (send_rating_to_thread_work_hours, (url, ratings, headers)), wait=2.5, only_last=True)
def execute(self, context): user_preferences = bpy.context.preferences.addons['blenderkit'].preferences props = bpy.context.scene.blenderkitUI if self.subcategory1 != 'NONE': category = self.subcategory1 elif self.subcategory != 'NONE': category = self.subcategory else: category = self.category utils.update_tags(self, context) mdict = { 'category': category, 'displayName': self.name, 'description': self.description, 'tags': comma2array(self.tags), 'isPrivate': self.is_private == 'PRIVATE', 'license': self.license, } thread = threading.Thread(target=patch_individual_metadata, args=(self.asset_id, mdict, user_preferences.api_key)) thread.start() tasks_queue.add_task((ui.add_report, (f'Uploading metadata for {self.name}. ' f'Refreash search results to see that changes applied correctly.', 8,))) return {'FINISHED'}
def fetch_categories(API_key, force=False): url = paths.get_api_url() + 'categories/' headers = utils.get_headers(API_key) tempdir = paths.get_temp_dir() categories_filepath = os.path.join(tempdir, 'categories.json') if os.path.exists(categories_filepath): catfile_age = time.time() - os.path.getmtime(categories_filepath) else: catfile_age = 10000000 # global catfetch_counter # catfetch_counter += 1 # bk_logger.debug('fetching categories: ', catfetch_counter) # bk_logger.debug('age of cat file', catfile_age) try: # read categories only once per day maximum, or when forced to do so. if catfile_age > 86400 or force: bk_logger.debug('requesting categories from server') r = rerequests.get(url, headers=headers) rdata = r.json() categories = rdata['results'] fix_category_counts(categories) # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off. with open(categories_filepath, 'w', encoding='utf-8') as s: json.dump(categories, s, ensure_ascii=False, indent=4) tasks_queue.add_task((load_categories, ())) except Exception as e: bk_logger.debug('category fetching failed') bk_logger.exception(e) if not os.path.exists(categories_filepath): source_path = paths.get_addon_file(subpath='data' + os.sep + 'categories.json') shutil.copy(source_path, categories_filepath)
def fetch_author(a_id, api_key): utils.p('fetch author') try: a_url = paths.get_api_url() + 'accounts/' + a_id + '/' headers = utils.get_headers(api_key) r = requests.get(a_url, headers=headers) if r.status_code == 200: adata = r.json() if not hasattr(adata, 'id'): utils.p(adata) # utils.p(adata) tasks_queue.add_task((write_author, (a_id, adata))) if adata.get('gravatarHash') is not None: gravatar_path = paths.get_temp_dir( subdir=None) + adata['gravatarHash'] + '.jpg' url = "https://www.gravatar.com/avatar/" + adata[ 'gravatarHash'] + '?d=404' r = requests.get(url, stream=False) if r.status_code == 200: with open(gravatar_path, 'wb') as f: f.write(r.content) adata['gravatarImg'] = gravatar_path except Exception as e: utils.p(e) utils.p('finish fetch')
def refresh_token(api_key_refresh): authenticator = oauth.SimpleOAuthAuthenticator( server_url=paths.get_bkit_url(), client_id=CLIENT_ID, ports=PORTS) auth_token, refresh_token = authenticator.get_refreshed_token( api_key_refresh) if auth_token is not None and refresh_token is not None: tasks_queue.add_task((write_tokens, (auth_token, refresh_token)))
def execute(self, context): user_preferences = bpy.context.preferences.addons[ 'blenderkit'].preferences api_key = user_preferences.api_key headers = utils.get_headers(api_key) url = paths.get_api_url() + f'assets/{self.asset_id}/rating/' rtgs = [] if self.rating_quality_ui == '': self.rating_quality = 0 else: self.rating_quality = int(self.rating_quality_ui) if self.rating_quality > 0.1: rtgs = (('quality', self.rating_quality), ) tasks_queue.add_task( (send_rating_to_thread_quality, (url, rtgs, headers)), wait=2.5, only_last=True) if self.rating_work_hours > 0.1: rtgs = (('working_hours', round(self.rating_work_hours, 1)), ) tasks_queue.add_task( (send_rating_to_thread_work_hours, (url, rtgs, headers)), wait=2.5, only_last=True) return {'FINISHED'}
def login(signup, url, r_url): authenticator = oauth.SimpleOAuthAuthenticator(server_url=url, client_id=CLIENT_ID, ports=PORTS) auth_token, refresh_token = authenticator.get_new_token(register=signup, redirect_url=r_url) utils.p('tokens retrieved') tasks_queue.add_task((write_tokens, (auth_token, refresh_token)))
def fetch_profile(api_key): utils.p('fetch profile') try: adata = request_profile(api_key) if adata is not None: tasks_queue.add_task((write_profile, (adata, ))) except Exception as e: utils.p(e)
def run(self): '''try to download file from blenderkit''' asset_data = self.asset_data tcom = self.tcom scene_id = self.scene_id api_key = self.api_key # TODO get real link here... has_url = get_download_url(asset_data, scene_id, api_key, tcom=tcom) if not has_url: tasks_queue.add_task( (ui.add_report, ('Failed to obtain download URL for %s.' % asset_data['name'], 5, colors.RED))) return if tcom.error: return # only now we can check if the file already exists. This should have 2 levels, for materials and for brushes # different than for the non free content. delete is here when called after failed append tries. if check_existing(asset_data) and not tcom.passargs.get('delete'): # this sends the thread for processing, where another check should occur, since the file might be corrupted. tcom.downloaded = 100 utils.p('not downloading, trying to append again') return file_name = paths.get_download_filenames(asset_data)[ 0] # prefer global dir if possible. # for k in asset_data: # print(asset_data[k]) if self.stopped(): utils.p('stopping download: ' + asset_data['name']) return with open(file_name, "wb") as f: print("Downloading %s" % file_name) headers = utils.get_headers(api_key) response = requests.get(asset_data['url'], stream=True) total_length = response.headers.get('Content-Length') if total_length is None: # no content length header f.write(response.content) else: tcom.file_size = int(total_length) dl = 0 totdata = [] for data in response.iter_content( chunk_size=4096 * 32): #crashed here... why? investigate: dl += len(data) tcom.downloaded = dl tcom.progress = int(100 * tcom.downloaded / tcom.file_size) f.write(data) if self.stopped(): utils.p('stopping download: ' + asset_data['name']) os.remove(file_name) return
def upload_files(upload_data, files): '''uploads several files in one run''' uploaded_all = True for f in files: uploaded = upload_file(upload_data, f) if not uploaded: uploaded_all = False tasks_queue.add_task((ui.add_report, (f"Uploaded all files for asset {upload_data['name']}",))) return uploaded_all
def refresh_token(api_key_refresh, url): authenticator = oauth.SimpleOAuthAuthenticator(server_url=url, client_id=CLIENT_ID, ports=PORTS) auth_token, refresh_token, oauth_response = authenticator.get_refreshed_token( api_key_refresh) if auth_token is not None and refresh_token is not None: tasks_queue.add_task( (write_tokens, (auth_token, refresh_token, oauth_response))) return auth_token, refresh_token, oauth_response
def get_texture_ui(tpath, iname): tex = bpy.data.textures.get(iname) if tpath.startswith('//'): tpath = bpy.path.abspath(tpath) if not tex or not tex.image or not tex.image.filepath == tpath: tasks_queue.add_task((utils.get_hidden_image, (tpath, iname)), only_last=True) tasks_queue.add_task((utils.get_hidden_texture, (iname,)), only_last=True) return None return tex
def rerequest(method, url, **kwargs): # first get any additional args from kwargs immediate = False if kwargs.get('immediate'): immediate = kwargs['immediate'] kwargs.pop('immediate') # first normal attempt response = requests.request(method, url, **kwargs) utils.p(url) utils.p(response.status_code) if response.status_code == 401: try: rdata = response.json() except: rdata = {} tasks_queue.add_task( (ui.add_report, (method + ' request Failed.' + str(rdata.get('detail')), ))) if rdata.get('detail') == 'Invalid token.': user_preferences = bpy.context.preferences.addons[ 'blenderkit'].preferences if user_preferences.api_key != '': if user_preferences.enable_oauth and user_preferences.api_key_refresh != '': tasks_queue.add_task((ui.add_report, ( 'refreshing token. If this fails, please login in BlenderKit Login panel.', 10))) refresh_url = paths.get_bkit_url() auth_token, refresh_token, oauth_response = bkit_oauth.refresh_token( user_preferences.api_key_refresh, refresh_url) # utils.p(auth_token, refresh_token) if auth_token is not None: if immediate == True: # this can write tokens occasionally into prefs. used e.g. in upload. Only possible # in non-threaded tasks bpy.context.preferences.addons[ 'blenderkit'].preferences.api_key = auth_token bpy.context.preferences.addons[ 'blenderkit'].preferences.api_key_refresh = refresh_token kwargs['headers'] = utils.get_headers(auth_token) response = requests.request(method, url, **kwargs) utils.p('reresult', response.status_code) if response.status_code >= 400: utils.p('reresult', response.text) return response
def fetch_author(a_id, api_key): utils.p('fetch author') try: a_url = paths.get_api_url() + 'accounts/' + a_id + '/' headers = utils.get_headers(api_key) r = requests.get(a_url, headers=headers) adata = r.json() if not hasattr(adata, 'id'): utils.p(adata) # utils.p(adata) tasks_queue.add_task((write_author, (a_id, adata))) except Exception as e: utils.p(e) utils.p('finish fetch')
def __iter__(self): with open(self.filename, 'rb') as file: while True: data = file.read(self.chunksize) if not data: sys.stderr.write("\n") break self.readsofar += len(data) percent = self.readsofar * 1e2 / self.totalsize tasks_queue.add_task((ui.add_report, (f"Uploading {self.report_name} {percent}%",))) # bg_blender.progress('uploading %s' % self.report_name, percent) # sys.stderr.write("\r{percent:3.0f}%".format(percent=percent)) yield data
def update_ratings_work_hours(self, context): user_preferences = bpy.context.preferences.addons['blenderkit'].preferences api_key = user_preferences.api_key headers = utils.get_headers(api_key) asset = self.id_data bkit_ratings = asset.bkit_ratings url = paths.get_api_url( ) + 'assets/' + asset['asset_data']['id'] + '/rating/' if bkit_ratings.rating_quality > 0.1: ratings = [('working_hours', round(bkit_ratings.rating_work_hours, 1))] tasks_queue.add_task( (send_rating_to_thread_work_hours, (url, ratings, headers)), wait=1, only_last=True)
def upload_rating(asset): user_preferences = bpy.context.preferences.addons['blenderkit'].preferences api_key = user_preferences.api_key headers = utils.get_headers(api_key) bkit_ratings = asset.bkit_ratings # print('rating asset', asset_data['name'], asset_data['assetBaseId']) url = paths.get_api_url( ) + 'assets/' + asset['asset_data']['id'] + '/rating/' ratings = [] if bkit_ratings.rating_quality > 0.1: ratings = (('quality', bkit_ratings.rating_quality), ) tasks_queue.add_task( (send_rating_to_thread_quality, (url, ratings, headers)), wait=2.5, only_last=True) if bkit_ratings.rating_work_hours > 0.1: ratings = (('working_hours', round(bkit_ratings.rating_work_hours, 1)), ) tasks_queue.add_task( (send_rating_to_thread_work_hours, (url, ratings, headers)), wait=2.5, only_last=True) thread = threading.Thread(target=upload_rating_thread, args=(url, ratings, headers)) thread.start() url = paths.get_api_url( ) + 'assets/' + asset['asset_data']['id'] + '/review' reviews = { 'reviewText': bkit_ratings.rating_compliments, 'reviewTextProblems': bkit_ratings.rating_problems, } if not (bkit_ratings.rating_compliments == '' and bkit_ratings.rating_compliments == ''): thread = threading.Thread(target=upload_review_thread, args=(url, reviews, headers)) thread.start() # the info that the user rated an item is stored in the scene s = bpy.context.scene s['assets rated'] = s.get('assets rated', {}) if bkit_ratings.rating_quality > 0.1 and bkit_ratings.rating_work_hours > 0.1: s['assets rated'][asset['asset_data']['assetBaseId']] = True
def update_ratings_work_hours(self, context): user_preferences = bpy.context.preferences.addons['blenderkit'].preferences api_key = user_preferences.api_key headers = utils.get_headers(api_key) asset = self.id_data if asset: bkit_ratings = asset.bkit_ratings url = paths.get_api_url() + 'assets/' + asset['asset_data']['id'] + '/rating/' else: # this part is for operator rating: bkit_ratings = self url = paths.get_api_url() + f'assets/{self.asset_id}/rating/' if bkit_ratings.rating_work_hours > 0.45: ratings = [('working_hours', round(bkit_ratings.rating_work_hours, 1))] tasks_queue.add_task((send_rating_to_thread_work_hours, (url, ratings, headers)), wait=2.5, only_last=True)
def fetch_gravatar(adata): utils.p('fetch gravatar') if adata.get('gravatarHash') is not None: gravatar_path = paths.get_temp_dir(subdir='g/') + adata['gravatarHash'] + '.jpg' if os.path.exists(gravatar_path): tasks_queue.add_task((write_gravatar, (adata['id'], gravatar_path))) return; url = "https://www.gravatar.com/avatar/" + adata['gravatarHash'] + '?d=404' r = rerequests.get(url, stream=False) if r.status_code == 200: with open(gravatar_path, 'wb') as f: f.write(r.content) tasks_queue.add_task((write_gravatar, (adata['id'], gravatar_path))) elif r.status_code == '404': adata['gravatarHash'] = None utils.p('gravatar for author not available.')
def get_download_url(asset_data, scene_id, api_key, tcom=None): ''''retrieves the download url. The server checks if user can download the item.''' mt = time.time() headers = utils.get_headers(api_key) data = {'scene_uuid': scene_id} r = None try: r = requests.get(asset_data['download_url'], params=data, headers=headers) except Exception as e: print(e) if tcom is not None: tcom.error = True if r == None: tcom.report = 'Connection Error' tcom.error = True return 'Connection Error' if r.status_code < 400: data = r.json() url = data['filePath'] asset_data['url'] = url asset_data['file_name'] = paths.extract_filename_from_url(url) return True if r.status_code == 403: r = 'You need Standard plan to get this item.' tcom.report = r r1 = 'All materials and brushes are aviable for free. Only users registered to Standart plan can use all models.' tasks_queue.add_task((ui.add_report, (r1, 5, colors.RED))) tcom.error = True if r.status_code == 401: tcom.report = 'Invalid API key' tcom.error = True return 'Invalid API key' elif r.status_code >= 500: tcom.report = 'Server error' tcom.error = True return False
def upload_file(upload_data, f): headers = utils.get_headers(upload_data['token']) version_id = upload_data['id'] message = f"uploading {f['type']} {os.path.basename(f['file_path'])}" tasks_queue.add_task((ui.add_report, (message,))) upload_info = { 'assetId': version_id, 'fileType': f['type'], 'fileIndex': f['index'], 'originalFilename': os.path.basename(f['file_path']) } upload_create_url = paths.get_api_url() + 'uploads/' upload = rerequests.post(upload_create_url, json=upload_info, headers=headers, verify=True) upload = upload.json() # chunk_size = 1024 * 1024 * 2 # utils.pprint(upload) # file gets uploaded here: uploaded = False # s3 upload is now the only option for a in range(0, 5): if not uploaded: try: upload_response = requests.put(upload['s3UploadUrl'], data=upload_in_chunks(f['file_path'], chunk_size, f['type']), stream=True, verify=True) if 250 > upload_response.status_code > 199: uploaded = True else: print(upload_response.text) message = f"Upload failed, retry. File : {f['type']} {os.path.basename(f['file_path'])}" tasks_queue.add_task((ui.add_report, (message,))) except Exception as e: print(e) message = f"Upload failed, retry. File : {f['type']} {os.path.basename(f['file_path'])}" tasks_queue.add_task((ui.add_report, (message,))) time.sleep(1) # confirm single file upload to bkit server print(upload) upload_done_url = paths.get_api_url() + 'uploads_s3/' + upload['id'] + '/upload-file/' upload_response = rerequests.post(upload_done_url, headers=headers, verify=True) tasks_queue.add_task((ui.add_report, (f"Finished file upload{os.path.basename(f['file_path'])}",))) return uploaded
def fetch_categories(API_key): url = paths.get_api_url() + 'categories/' headers = utils.get_headers(API_key) tempdir = paths.get_temp_dir() categories_filepath = os.path.join(tempdir, 'categories.json') try: r = rerequests.get(url, headers=headers) rdata = r.json() categories = rdata['results'] fix_category_counts(categories) # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off. with open(categories_filepath, 'w') as s: json.dump(categories, s, indent=4) tasks_queue.add_task((load_categories, ())) except Exception as e: utils.p('category fetching failed') utils.p(e) if not os.path.exists(categories_filepath): source_path = paths.get_addon_file(subpath='data' + os.sep + 'categories.json') shutil.copy(source_path, categories_filepath)
def login(signup, url, r_url, authenticator): auth_token, refresh_token = authenticator.get_new_token(register=signup, redirect_url=r_url) utils.p('tokens retrieved') tasks_queue.add_task((write_tokens, (auth_token, refresh_token)))
def timer_update(): # this makes a first search after opening blender. showing latest assets. global first_time preferences = bpy.context.preferences.addons['blenderkit'].preferences if first_time: # first time first_time = False if preferences.show_on_start: # TODO here it should check if there are some results, and only open assetbar if this is the case, not search. # if bpy.context.scene.get('search results') is None: search() # preferences.first_run = False if preferences.tips_on_start: utils.get_largest_area() ui.update_ui_size(ui.active_area, ui.active_region) ui.add_report(text='BlenderKit Tip: ' + random.choice(rtips), timeout=12, color=colors.GREEN) return 3.0 # if preferences.first_run: # search() # preferences.first_run = False # check_clipboard() global search_threads if len(search_threads) == 0: return 1.0 # don't do anything while dragging - this could switch asset during drag, and make results list length different, # causing a lot of throuble. if bpy.context.scene.blenderkitUI.dragging: return 0.5 for thread in search_threads: # TODO this doesn't check all processes when one gets removed, # but most of the time only one is running anyway if not thread[0].is_alive(): search_threads.remove(thread) # icons_dir = thread[1] scene = bpy.context.scene # these 2 lines should update the previews enum and set the first result as active. s = bpy.context.scene asset_type = thread[2] if asset_type == 'model': props = scene.blenderkit_models # json_filepath = os.path.join(icons_dir, 'model_searchresult.json') if asset_type == 'scene': props = scene.blenderkit_scene # json_filepath = os.path.join(icons_dir, 'scene_searchresult.json') if asset_type == 'material': props = scene.blenderkit_mat # json_filepath = os.path.join(icons_dir, 'material_searchresult.json') if asset_type == 'brush': props = scene.blenderkit_brush # json_filepath = os.path.join(icons_dir, 'brush_searchresult.json') search_name = f'bkit {asset_type} search' s[search_name] = [] global reports if reports != '': props.report = str(reports) return .2 rdata = thread[0].result result_field = [] ok, error = check_errors(rdata) if ok: bpy.ops.object.run_assetbar_fix_context() for r in rdata['results']: asset_data = parse_result(r) if asset_data != None: result_field.append(asset_data) # results = rdata['results'] s[search_name] = result_field s['search results'] = result_field s[search_name + ' orig'] = copy.deepcopy(rdata) s['search results orig'] = s[search_name + ' orig'] load_previews() ui_props = bpy.context.scene.blenderkitUI if len(result_field) < ui_props.scrolloffset: ui_props.scrolloffset = 0 props.is_searching = False props.search_error = False props.report = 'Found %i results. ' % ( s['search results orig']['count']) if len(s['search results']) == 0: tasks_queue.add_task( (ui.add_report, ('No matching results found.', ))) else: print('error', error) props.report = error props.search_error = True # print('finished search thread') mt('preview loading finished') return .3
def search(category='', get_next=False, author_id=''): ''' initialize searching''' global search_start_time user_preferences = bpy.context.preferences.addons['blenderkit'].preferences search_start_time = time.time() mt('start') scene = bpy.context.scene uiprops = scene.blenderkitUI if uiprops.asset_type == 'MODEL': if not hasattr(scene, 'blenderkit'): return props = scene.blenderkit_models query = build_query_model() if uiprops.asset_type == 'SCENE': if not hasattr(scene, 'blenderkit_scene'): return props = scene.blenderkit_scene query = build_query_scene() if uiprops.asset_type == 'MATERIAL': if not hasattr(scene, 'blenderkit_mat'): return props = scene.blenderkit_mat query = build_query_material() if uiprops.asset_type == 'TEXTURE': if not hasattr(scene, 'blenderkit_tex'): return # props = scene.blenderkit_tex # query = build_query_texture() if uiprops.asset_type == 'BRUSH': if not hasattr(scene, 'blenderkit_brush'): return props = scene.blenderkit_brush query = build_query_brush() if props.is_searching and get_next == True: return if category != '': query['category_subtree'] = category if author_id != '': query['author_id'] = author_id # utils.p('searching') props.is_searching = True params = { 'scene_uuid': bpy.context.scene.get('uuid', None), 'addon_version': version_checker.get_addon_version(), 'api_key': user_preferences.api_key, 'get_next': get_next } # if free_only: # query['keywords'] += '+is_free:true' add_search_process(query, params) tasks_queue.add_task((ui.add_report, ('BlenderKit searching....', 2))) props.report = 'BlenderKit searching....'
def timer_update(): # TODO might get moved to handle all blenderkit stuff. global search_threads # don't do anything while dragging - this could switch asset type during drag, and make results list lenght different, # causing a lot of throuble literally. if len(search_threads) == 0 or bpy.context.scene.blenderkitUI.dragging: return 1 for thread in search_threads: # TODO this doesn't check all processess when removal... mostly 1 process will be running however. if not thread[0].is_alive(): search_threads.remove(thread) # icons_dir = thread[1] scene = bpy.context.scene # these 2 lines should update the previews enum and set the first result as active. s = bpy.context.scene asset_type = thread[2] if asset_type == 'model': props = scene.blenderkit_models json_filepath = os.path.join(icons_dir, 'model_searchresult.json') search_name = 'bkit model search' if asset_type == 'scene': props = scene.blenderkit_scene json_filepath = os.path.join(icons_dir, 'scene_searchresult.json') search_name = 'bkit scene search' if asset_type == 'material': props = scene.blenderkit_mat json_filepath = os.path.join(icons_dir, 'material_searchresult.json') search_name = 'bkit material search' if asset_type == 'brush': props = scene.blenderkit_brush json_filepath = os.path.join(icons_dir, 'brush_searchresult.json') search_name = 'bkit brush search' s[search_name] = [] global reports if reports != '': props.report = str(reports) return .2 with open(json_filepath, 'r') as data_file: rdata = json.load(data_file) result_field = [] ok, error = check_errors(rdata) if ok: for r in rdata['results']: if r['assetType'] == asset_type: # utils.pprint(r) if len(r['files']) > 0: furl = None tname = None allthumbs = [] durl, tname = None, None for f in r['files']: if f['fileType'] == 'thumbnail': tname = paths.extract_filename_from_url( f['fileThumbnailLarge']) small_tname = paths.extract_filename_from_url( f['fileThumbnail']) allthumbs.append( tname ) # TODO just first thumb is used now. tdict = {} for i, t in enumerate(allthumbs): tdict['thumbnail_%i'] = t if f['fileType'] == 'blend': durl = f['downloadUrl'].split('?')[0] # fname = paths.extract_filename_from_url(f['filePath']) if durl and tname: tooltip = generate_tooltip(r) # utils.pprint(print(r)) asset_data = { 'thumbnail': tname, 'thumbnail_small': small_tname, # 'thumbnails':allthumbs, 'download_url': durl, 'id': r['id'], 'asset_base_id': r['assetBaseId'], 'name': r['name'], 'asset_type': r['assetType'], 'tooltip': tooltip, 'tags': r['tags'], 'can_download': r.get('canDownload', True), 'verification_status': r['verificationStatus'], 'author_id': str(r['author']['id']) # 'author': r['author']['firstName'] + ' ' + r['author']['lastName'] # 'description': r['description'], # 'author': r['description'], } asset_data['downloaded'] = 0 # parse extra params needed for blender here params = params_to_dict(r['parameters']) if asset_type == 'model': if params.get('boundBoxMinX') != None: bbox = { 'bbox_min': (float(params['boundBoxMinX']), float(params['boundBoxMinY']), float(params['boundBoxMinZ'])), 'bbox_max': (float(params['boundBoxMaxX']), float(params['boundBoxMaxY']), float(params['boundBoxMaxZ'])) } else: bbox = { 'bbox_min': (-.5, -.5, 0), 'bbox_max': (.5, .5, 1) } asset_data.update(bbox) if asset_type == 'material': asset_data[ 'texture_size_meters'] = params.get( 'textureSizeMeters', 1.0) asset_data.update(tdict) if r['assetBaseId'] in scene.get( 'assets used', {}).keys(): asset_data['downloaded'] = 100 result_field.append(asset_data) # results = rdata['results'] s[search_name] = result_field s['search results'] = result_field s[search_name + ' orig'] = rdata s['search results orig'] = rdata load_previews() ui_props = bpy.context.scene.blenderkitUI if len(result_field) < ui_props.scrolloffset: ui_props.scrolloffset = 0 props.is_searching = False props.search_error = False props.report = 'Open assetbar to see %i results. ' % len( s['search results']) if len(s['search results']) == 0: tasks_queue.add_task( (ui.add_report, ('No matching results found.', ))) # (rdata['next']) # if rdata['next'] != None: # search(False, get_next = True) else: print('error', error) props.report = error props.search_error = True # print('finished search thread') mt('preview loading finished') return .2
def timer_update(): # this makes a first search after opening blender. showing latest assets. global first_time preferences = bpy.context.preferences.addons['blenderkit'].preferences if first_time: # first time first_time = False if preferences.show_on_start or preferences.first_run: # TODO here it should check if there are some results, and only open assetbar if this is the case, not search. #if bpy.context.scene.get('search results') is None: search() preferences.first_run = False if preferences.tips_on_start: ui.get_largest_3dview() ui.update_ui_size(ui.active_area, ui.active_region) ui.add_report(text='BlenderKit Tip: ' + random.choice(rtips), timeout=12, color=colors.GREEN) check_clipboard() global search_threads # don't do anything while dragging - this could switch asset during drag, and make results list length different, # causing a lot of throuble. if len(search_threads) == 0 or bpy.context.scene.blenderkitUI.dragging: return 1 for thread in search_threads: # TODO this doesn't check all processes when one gets removed, # but most of the time only one is running anyway if not thread[0].is_alive(): search_threads.remove(thread) # icons_dir = thread[1] scene = bpy.context.scene # these 2 lines should update the previews enum and set the first result as active. s = bpy.context.scene asset_type = thread[2] if asset_type == 'model': props = scene.blenderkit_models json_filepath = os.path.join(icons_dir, 'model_searchresult.json') search_name = 'bkit model search' if asset_type == 'scene': props = scene.blenderkit_scene json_filepath = os.path.join(icons_dir, 'scene_searchresult.json') search_name = 'bkit scene search' if asset_type == 'material': props = scene.blenderkit_mat json_filepath = os.path.join(icons_dir, 'material_searchresult.json') search_name = 'bkit material search' if asset_type == 'brush': props = scene.blenderkit_brush json_filepath = os.path.join(icons_dir, 'brush_searchresult.json') search_name = 'bkit brush search' s[search_name] = [] global reports if reports != '': props.report = str(reports) return .2 with open(json_filepath, 'r') as data_file: rdata = json.load(data_file) result_field = [] ok, error = check_errors(rdata) if ok: bpy.ops.object.run_assetbar_fix_context() for r in rdata['results']: # TODO remove this fix when filesSize is fixed. # this is a temporary fix for too big numbers from the server. try: r['filesSize'] = int(r['filesSize'] / 1024) except: utils.p('asset with no files-size') if r['assetType'] == asset_type: if len(r['files']) > 0: furl = None tname = None allthumbs = [] durl, tname = None, None for f in r['files']: if f['fileType'] == 'thumbnail': tname = paths.extract_filename_from_url( f['fileThumbnailLarge']) small_tname = paths.extract_filename_from_url( f['fileThumbnail']) allthumbs.append( tname ) # TODO just first thumb is used now. tdict = {} for i, t in enumerate(allthumbs): tdict['thumbnail_%i'] = t if f['fileType'] == 'blend': durl = f['downloadUrl'].split('?')[0] # fname = paths.extract_filename_from_url(f['filePath']) if durl and tname: tooltip = generate_tooltip(r) #for some reason, the id was still int on some occurances. investigate this. r['author']['id'] = str(r['author']['id']) asset_data = { 'thumbnail': tname, 'thumbnail_small': small_tname, # 'thumbnails':allthumbs, 'download_url': durl, 'id': r['id'], 'asset_base_id': r['assetBaseId'], 'name': r['name'], 'asset_type': r['assetType'], 'tooltip': tooltip, 'tags': r['tags'], 'can_download': r.get('canDownload', True), 'verification_status': r['verificationStatus'], 'author_id': r['author']['id'], # 'author': r['author']['firstName'] + ' ' + r['author']['lastName'] # 'description': r['description'], } asset_data['downloaded'] = 0 # parse extra params needed for blender here params = utils.params_to_dict(r['parameters']) if asset_type == 'model': if params.get('boundBoxMinX') != None: bbox = { 'bbox_min': (float(params['boundBoxMinX']), float(params['boundBoxMinY']), float(params['boundBoxMinZ'])), 'bbox_max': (float(params['boundBoxMaxX']), float(params['boundBoxMaxY']), float(params['boundBoxMaxZ'])) } else: bbox = { 'bbox_min': (-.5, -.5, 0), 'bbox_max': (.5, .5, 1) } asset_data.update(bbox) if asset_type == 'material': asset_data[ 'texture_size_meters'] = params.get( 'textureSizeMeters', 1.0) asset_data.update(tdict) if r['assetBaseId'] in scene.get( 'assets used', {}).keys(): asset_data['downloaded'] = 100 result_field.append(asset_data) # results = rdata['results'] s[search_name] = result_field s['search results'] = result_field s[search_name + ' orig'] = rdata s['search results orig'] = rdata load_previews() ui_props = bpy.context.scene.blenderkitUI if len(result_field) < ui_props.scrolloffset: ui_props.scrolloffset = 0 props.is_searching = False props.search_error = False props.report = 'Found %i results. ' % ( s['search results orig']['count']) if len(s['search results']) == 0: tasks_queue.add_task( (ui.add_report, ('No matching results found.', ))) # (rdata['next']) # if rdata['next'] != None: # search(False, get_next = True) else: print('error', error) props.report = error props.search_error = True # print('finished search thread') mt('preview loading finished') return .3