def download_file(asset_data): # this is a simple non-threaded way to download files for background resolution genenration tool file_name = paths.get_download_filenames(asset_data)[ 0] # prefer global dir if possible. if check_existing(asset_data): # this sends the thread for processing, where another check should occur, since the file might be corrupted. utils.p('not downloading, already in db') return file_name preferences = bpy.context.preferences.addons['blenderkit'].preferences api_key = preferences.api_key with open(file_name, "wb") as f: print("Downloading %s" % file_name) headers = utils.get_headers(api_key) response = requests.get(asset_data['url'], stream=True) total_length = response.headers.get('Content-Length') if total_length is None: # no content length header f.write(response.content) else: dl = 0 for data in response.iter_content(chunk_size=4096): dl += len(data) print(dl) f.write(data) return file_name
def get_download_filepaths(asset_data, resolution='blend', can_return_others=False): '''Get all possible paths of the asset and resolution. Usually global and local directory.''' dirs = get_download_dirs(asset_data['assetType']) res_file, resolution = get_res_file( asset_data, resolution, find_closest_with_url=can_return_others) name_slug = slugify(asset_data['name']) asset_folder_name = f"{name_slug}_{asset_data['id']}" # utils.pprint('get download filenames ', dict(res_file)) file_names = [] if not res_file: return file_names # fn = asset_data['file_name'].replace('blend_', '') if res_file.get('url') is not None: #Tweak the names a bit: # remove resolution and blend words in names # fn = extract_filename_from_url(res_file['url']) n = server_2_local_filename(asset_data, fn) for d in dirs: asset_folder_path = os.path.join(d, asset_folder_name) if not os.path.exists(asset_folder_path): os.makedirs(asset_folder_path) file_name = os.path.join(asset_folder_path, n) file_names.append(file_name) utils.p('file paths', file_names) return file_names
def modelProxy(): s = bpy.context.scene ao = bpy.context.active_object if utils.is_linked_asset(ao): utils.activate(ao) g = ao.instance_collection rigs = [] for ob in g.objects: if ob.type == 'ARMATURE': rigs.append(ob) if len(rigs) == 1: ao.instance_collection = None bpy.ops.object.duplicate() new_ao = bpy.context.view_layer.objects.active new_ao.instance_collection = g new_ao.empty_display_type = 'SPHERE' new_ao.empty_display_size *= 0.1 bpy.ops.object.proxy_make(object=rigs[0].name) proxy = bpy.context.active_object bpy.context.view_layer.objects.active = ao ao.select_set(True) new_ao.select_set(True) new_ao.use_extra_recalc_object = True new_ao.use_extra_recalc_data = True bpy.ops.object.parent_set(type='OBJECT', keep_transform=True) return True else: # TODO report this to ui utils.p('not sure what to proxify') return False
def fetch_author(a_id, api_key): utils.p('fetch author') try: a_url = paths.get_api_url() + 'accounts/' + a_id + '/' headers = utils.get_headers(api_key) r = rerequests.get(a_url, headers=headers) if r.status_code == 200: adata = r.json() if not hasattr(adata, 'id'): utils.p(adata) # utils.p(adata) tasks_queue.add_task((write_author, (a_id, adata))) if adata.get('gravatarHash') is not None: gravatar_path = paths.get_temp_dir( subdir='g/') + adata['gravatarHash'] + '.jpg' url = "https://www.gravatar.com/avatar/" + adata[ 'gravatarHash'] + '?d=404' r = rerequests.get(url, stream=False) if r.status_code == 200: with open(gravatar_path, 'wb') as f: f.write(r.content) adata['gravatarImg'] = gravatar_path elif r.status_code == '404': adata['gravatarHash'] = None utils.p('gravatar for author not available.') except Exception as e: utils.p(e) utils.p('finish fetch')
def search_update(self, context): utils.p('search updater') # if self.search_keywords != '': ui_props = bpy.context.scene.blenderkitUI if ui_props.down_up != 'SEARCH': ui_props.down_up = 'SEARCH' # here we tweak the input if it comes form the clipboard. we need to get rid of asset type and set it to sprops = utils.get_search_props() instr = 'asset_base_id:' atstr = 'asset_type:' kwds = sprops.search_keywords idi = kwds.find(instr) ati = kwds.find(atstr) # if the asset type already isn't there it means this update function # was triggered by it's last iteration and needs to cancel if idi > -1 and ati == -1: return if ati > -1: at = kwds[ati:].lower() # uncertain length of the remaining string - find as better method to check the presence of asset type if at.find('model') > -1: ui_props.asset_type = 'MODEL' elif at.find('material') > -1: ui_props.asset_type = 'MATERIAL' elif at.find('brush') > -1: ui_props.asset_type = 'BRUSH' # now we trim the input copypaste by anything extra that is there, # this is also a way for this function to recognize that it already has parsed the clipboard # the search props can have changed and this needs to transfer the data to the other field # this complex behaviour is here for the case where the user needs to paste manually into blender? sprops = utils.get_search_props() sprops.search_keywords = kwds[:ati].rstrip() search()
def download(asset_data, **kwargs): '''start the download thread''' user_preferences = bpy.context.preferences.addons['blenderkit'].preferences api_key = user_preferences.api_key scene_id = get_scene_id() tcom = ThreadCom() tcom.passargs = kwargs if kwargs.get('retry_counter', 0) > 3: sprops = utils.get_search_props() report = f"Maximum retries exceeded for {asset_data['name']}" sprops.report = report ui.add_report(report, 5, colors.RED) utils.p(sprops.report) return # incoming data can be either directly dict from python, or blender id property # (recovering failed downloads on reload) if type(asset_data) == dict: asset_data = copy.deepcopy(asset_data) else: asset_data = asset_data.to_dict() readthread = Downloader(asset_data, tcom, scene_id, api_key) readthread.start() global download_threads download_threads.append([readthread, asset_data, tcom])
def search_update(self, context): utils.p('search updater') # if self.search_keywords != '': ui_props = bpy.context.scene.blenderkitUI if ui_props.down_up != 'SEARCH': ui_props.down_up = 'SEARCH' search()
def refresh_token_timer(): ''' this timer gets run every time the token needs refresh. It refreshes tokens and also categories.''' utils.p('refresh timer') user_preferences = bpy.context.preferences.addons['blenderkit'].preferences fetch_server_data() categories.load_categories() return max(3600, user_preferences.api_key_life - 3600)
def fetch_profile(api_key): utils.p('fetch profile') try: adata = request_profile(api_key) if adata is not None: tasks_queue.add_task((write_profile, (adata, ))) except Exception as e: utils.p(e)
def login(signup, url, r_url): authenticator = oauth.SimpleOAuthAuthenticator(server_url=url, client_id=CLIENT_ID, ports=PORTS) auth_token, refresh_token = authenticator.get_new_token(register=signup, redirect_url=r_url) utils.p('tokens retrieved') tasks_queue.add_task((write_tokens, (auth_token, refresh_token)))
def request_profile(api_key): a_url = paths.get_api_url() + 'me/' headers = utils.get_headers(api_key) r = requests.get(a_url, headers=headers) adata = r.json() if adata.get('user') is None: utils.p(adata) utils.p('getting profile failed') return None return adata
def write_tokens(auth_token, refresh_token): utils.p('writing tokens') preferences = bpy.context.preferences.addons['blenderkit'].preferences preferences.api_key_refresh = refresh_token preferences.api_key = auth_token preferences.login_attempt = False props = utils.get_search_props() props.report = 'Login success!' search.get_profile() categories.fetch_categories_thread(auth_token)
def search_by_author(self, asset_index): sr = bpy.context.scene['search results'] asset_data = sr[asset_index] a = asset_data['author']['id'] if a is not None: sprops = utils.get_search_props() sprops.search_keywords = '' sprops.search_verification_status = 'ALL' utils.p('author:', a) search.search(author_id=a) return True
def check_errors(rdata): if rdata.get('statusCode') == 401: utils.p(rdata) if rdata.get('detail') == 'Invalid token.': user_preferences = bpy.context.preferences.addons['blenderkit'].preferences if user_preferences.api_key != '': if user_preferences.enable_oauth: bkit_oauth.refresh_token_thread() return False, rdata.get('detail') return False, 'Missing or wrong api_key in addon preferences' return True, ''
def write_profile(adata): utils.p('writing profile') user = adata['user'] # we have to convert to MiB here, numbers too big for python int type if user.get('sumAssetFilesSize') is not None: user['sumAssetFilesSize'] /= (1024 * 1024) if user.get('sumPrivateAssetFilesSize') is not None: user['sumPrivateAssetFilesSize'] /= (1024 * 1024) if user.get('remainingPrivateQuota') is not None: user['remainingPrivateQuota'] /= (1024 * 1024) bpy.context.window_manager['bkit profile'] = adata
def check_errors(rdata): if rdata.get('statusCode') == 401: utils.p(rdata) if rdata.get('detail') == 'Invalid token.': user_preferences = bpy.context.preferences.addons[ 'blenderkit'].preferences if user_preferences.api_key != '': if user_preferences.enable_oauth: bkit_oauth.refresh_token_thread() return False, rdata.get('detail') return False, 'Use login panel to connect your profile.' return True, ''
def write_tokens(auth_token, refresh_token, oauth_response): utils.p('writing tokens') preferences = bpy.context.preferences.addons['blenderkit'].preferences preferences.api_key_refresh = refresh_token preferences.api_key = auth_token preferences.api_key_timeout = time.time() + oauth_response['expires_in'] preferences.api_key_life = oauth_response['expires_in'] preferences.login_attempt = False preferences.refresh_in_progress = False props = utils.get_search_props() if props is not None: props.report = '' ui.add_report('BlenderKit Re-Login success') search.get_profile() categories.fetch_categories_thread(auth_token)
def run(self): '''try to download file from blenderkit''' asset_data = self.asset_data tcom = self.tcom scene_id = self.scene_id api_key = self.api_key # TODO get real link here... get_download_url(asset_data, scene_id, api_key, tcom=tcom) if tcom.error: return # only now we can check if the file allready exists. This should have 2 levels, for materials and for brushes # different than for the non free content. delete is here when called after failed append tries. if check_existing(asset_data) and not tcom.passargs.get('delete'): # this sends the thread for processing, where another check should occur, since the file might be corrupted. tcom.downloaded = 100 print('not downloading, trying to append again') return file_name = paths.get_download_filenames(asset_data)[ 0] # prefer global dir if possible. # for k in asset_data: # print(asset_data[k]) if self.stopped(): utils.p('stopping download: ' + asset_data['name']) return with open(file_name, "wb") as f: print("Downloading %s" % file_name) headers = utils.get_headers(api_key) response = requests.get(asset_data['url'], stream=True) total_length = response.headers.get('Content-Length') if total_length is None: # no content length header f.write(response.content) else: tcom.file_size = int(total_length) dl = 0 for data in response.iter_content(chunk_size=4096): dl += len(data) tcom.downloaded = dl tcom.progress = int(100 * tcom.downloaded / tcom.file_size) f.write(data) if self.stopped(): utils.p('stopping download: ' + asset_data['name']) f.close() os.remove(file_name) return
def fetch_gravatar(adata): utils.p('fetch gravatar') if adata.get('gravatarHash') is not None: gravatar_path = paths.get_temp_dir(subdir='g/') + adata['gravatarHash'] + '.jpg' if os.path.exists(gravatar_path): tasks_queue.add_task((write_gravatar, (adata['id'], gravatar_path))) return; url = "https://www.gravatar.com/avatar/" + adata['gravatarHash'] + '?d=404' r = rerequests.get(url, stream=False) if r.status_code == 200: with open(gravatar_path, 'wb') as f: f.write(r.content) tasks_queue.add_task((write_gravatar, (adata['id'], gravatar_path))) elif r.status_code == '404': adata['gravatarHash'] = None utils.p('gravatar for author not available.')
def get_download_url(asset_data, scene_id, api_key, tcom=None): ''''retrieves the download url. The server checks if user can download the item.''' mt = time.time() headers = utils.get_headers(api_key) data = {'scene_uuid': scene_id} r = None try: r = rerequests.get(asset_data['download_url'], params=data, headers=headers) except Exception as e: print(e) if tcom is not None: tcom.error = True if r == None: if tcom is not None: tcom.report = 'Connection Error' tcom.error = True return 'Connection Error' if r.status_code < 400: data = r.json() url = data['filePath'] asset_data['url'] = url asset_data['file_name'] = paths.extract_filename_from_url(url) return True if r.status_code == 403: r = 'You need Full plan to get this item.' # r1 = 'All materials and brushes are available for free. Only users registered to Standard plan can use all models.' # tasks_queue.add_task((ui.add_report, (r1, 5, colors.RED))) if tcom is not None: tcom.report = r tcom.error = True elif r.status_code >= 500: utils.p(r.text) if tcom is not None: tcom.report = 'Server error' tcom.error = True return False
def rerequest(method, url, **kwargs): # first get any additional args from kwargs immediate = False if kwargs.get('immediate'): immediate = kwargs['immediate'] kwargs.pop('immediate') # first normal attempt response = requests.request(method, url, **kwargs) utils.p(url, kwargs) utils.p(response.status_code) if response.status_code == 401: try: rdata = response.json() except: rdata = {} tasks_queue.add_task( (ui.add_report, (method + ' request Failed.' + str(rdata.get('detail')), ))) if rdata.get('detail') == 'Invalid token.': user_preferences = bpy.context.preferences.addons[ 'blenderkit'].preferences if user_preferences.api_key != '': if user_preferences.enable_oauth and user_preferences.api_key_refresh != '': tasks_queue.add_task((ui.add_report, ( 'refreshing token. If this fails, please login in BlenderKit Login panel.', 10))) refresh_url = paths.get_bkit_url() auth_token, refresh_token, oauth_response = bkit_oauth.refresh_token( user_preferences.api_key_refresh, refresh_url) # utils.p(auth_token, refresh_token) if auth_token is not None: if immediate == True: # this can write tokens occasionally into prefs. used e.g. in upload. Only possible # in non-threaded tasks bpy.context.preferences.addons[ 'blenderkit'].preferences.api_key = auth_token bpy.context.preferences.addons[ 'blenderkit'].preferences.api_key_refresh = refresh_token else: tasks_queue.add_task( (bkit_oauth.write_tokens, (auth_token, refresh_token, oauth_response))) kwargs['headers'] = utils.get_headers(auth_token) response = requests.request(method, url, **kwargs) utils.p('reresult', response.status_code) if response.status_code >= 400: utils.p('reresult', response.text) return response
def upload_rating_thread(url, ratings, headers): ''' Upload rating thread function / disconnected from blender data.''' utils.p('upload rating', url, ratings) for rating_name, score in ratings: if (score != -1 and score != 0): rating_url = url + rating_name + '/' data = { "score": score, # todo this kind of mixing is too much. Should have 2 bkit structures, upload, use } try: r = rerequests.put(rating_url, data=data, verify=True, headers=headers) except requests.exceptions.RequestException as e: print('ratings upload failed: %s' % str(e))
def check_existing(asset_data): ''' check if the object exists on the hard drive''' fexists = False file_names = paths.get_download_filenames(asset_data) utils.p('check if file already exists') if len(file_names) == 2: # TODO this should check also for failed or running downloads. # If download is running, assign just the running thread. if download isn't running but the file is wrong size, # delete file and restart download (or continue downoad? if possible.) if os.path.isfile(file_names[0]) and not os.path.isfile(file_names[1]): shutil.copy(file_names[0], file_names[1]) elif not os.path.isfile(file_names[0]) and os.path.isfile( file_names[1]): # only in case of changed settings or deleted/moved global dict. shutil.copy(file_names[1], file_names[0]) if len(file_names) > 0 and os.path.isfile(file_names[0]): fexists = True return fexists
def fetch_categories(API_key, force=False): url = paths.get_api_url() + 'categories/' headers = utils.get_headers(API_key) tempdir = paths.get_temp_dir() categories_filepath = os.path.join(tempdir, 'categories.json') catfile_age = time.time() - os.path.getmtime(categories_filepath) # global catfetch_counter # catfetch_counter += 1 # utils.p('fetching categories: ', catfetch_counter) # utils.p('age of cat file', catfile_age) try: # read categories only once per day maximum, or when forced to do so. if catfile_age > 86400 or force: utils.p('requesting categories') r = rerequests.get(url, headers=headers) rdata = r.json() categories = rdata['results'] fix_category_counts(categories) # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off. with open(categories_filepath, 'w') as s: json.dump(categories, s, indent=4) tasks_queue.add_task((load_categories, ())) except Exception as e: utils.p('category fetching failed') utils.p(e) if not os.path.exists(categories_filepath): source_path = paths.get_addon_file(subpath='data' + os.sep + 'categories.json') shutil.copy(source_path, categories_filepath)
def fetch_author(a_id, api_key): utils.p('fetch author') try: a_url = paths.get_api_url() + 'accounts/' + a_id + '/' headers = utils.get_headers(api_key) r = requests.get(a_url, headers=headers) adata = r.json() if not hasattr(adata, 'id'): utils.p(adata) # utils.p(adata) tasks_queue.add_task((write_author, (a_id, adata))) except Exception as e: utils.p(e) utils.p('finish fetch')
def fetch_categories(API_key): url = paths.get_api_url() + 'categories/' headers = utils.get_headers(API_key) tempdir = paths.get_temp_dir() categories_filepath = os.path.join(tempdir, 'categories.json') try: r = rerequests.get(url, headers=headers) rdata = r.json() categories = rdata['results'] fix_category_counts(categories) # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off. with open(categories_filepath, 'w') as s: json.dump(categories, s, indent=4) tasks_queue.add_task((load_categories, ())) except Exception as e: utils.p('category fetching failed') utils.p(e) if not os.path.exists(categories_filepath): source_path = paths.get_addon_file(subpath='data' + os.sep + 'categories.json') shutil.copy(source_path, categories_filepath)
def try_finished_append(asset_data, **kwargs): # location=None, material_target=None): ''' try to append asset, if not successfully delete source files. This means probably wrong download, so download should restart''' file_names = paths.get_download_filenames(asset_data) done = False utils.p('try to append already existing asset') if len(file_names) > 0: if os.path.isfile(file_names[-1]): kwargs['name'] = asset_data['name'] try: append_asset(asset_data, **kwargs) done = True except Exception as e: print(e) for f in file_names: try: os.remove(f) except: e = sys.exc_info()[0] print(e) pass; done = False return done
def queue_worker(): #utils.p('timer queue worker') time_step = 2.0 q = get_queue() back_to_queue = [] #delayed events stashed = {} # first round we get all tasks that are supposed to be stashed and run only once (only_last option) # stashing finds tasks with the property only_last and same command and executes only the last one. while not q.empty(): # print('queue while 1') task = q.get() if task.only_last: #this now makes the keys not only by task, but also first argument. # by now stashing is only used for ratings, where the first argument is url. # This enables fast rating of multiple assets while allowing larger delay for uploading of ratings. # this avoids a duplicate request error on the server stashed[str(task.command) + str(task.arguments[0])] = task else: back_to_queue.append(task) if len(stashed.keys()) > 1: print(stashed) #return tasks to que except for stashed for task in back_to_queue: q.put(task) #return stashed tasks to queue for k in stashed.keys(): q.put(stashed[k]) #second round, execute or put back waiting tasks. back_to_queue = [] while not q.empty(): # print('window manager', bpy.context.window_manager) task = q.get() if task.wait > 0: task.wait -= time_step back_to_queue.append(task) else: utils.p('as a task: ') utils.p(task.command, task.arguments) try: if task.fake_context: fc = utils.get_fake_context( bpy.context, area_type=task.fake_context_area) task.command(fc, *task.arguments) else: task.command(*task.arguments) except Exception as e: utils.p('task failed:') print(e) # print('queue while 2') for task in back_to_queue: q.put(task) return 2.0
def queue_worker(): q = get_queue() while not q.empty(): utils.p('as a task: ') # print('window manager', bpy.context.window_manager) task = q.get() utils.p(task) try: task[0](*task[1]) except Exception as e: utils.p('task failed:') print(e) return 2.0
def queue_worker(): time_step = 2.0 q = get_queue() back_to_queue = [] #delayed events stashed = {} # first round we get all tasks that are supposed to be stashed and run only once (only_last option) # stashing finds tasks with the property only_last and same command and executes only the last one. while not q.empty(): task = q.get() if task.only_last: stashed[task.command] = task else: back_to_queue.append(task) #return tasks to que except for stashed for task in back_to_queue: q.put(task) #return stashed tasks to queue for k in stashed.keys(): q.put(stashed[k]) #second round, execute or put back waiting tasks. back_to_queue = [] while not q.empty(): # print('window manager', bpy.context.window_manager) task = q.get() if task.wait > 0: task.wait -= time_step back_to_queue.append(task) else: utils.p('as a task: ') utils.p(task.command, task.arguments) try: if task.fake_context: fc = utils.get_fake_context( bpy.context, area_type=task.fake_context_area) task.command(fc, *task.arguments) else: task.command(*task.arguments) except Exception as e: utils.p('task failed:') print(e) for task in back_to_queue: q.put(task) return 2.0