コード例 #1
0
def fetch_categories(API_key, force=False):
    url = paths.get_api_url() + 'categories/'

    headers = utils.get_headers(API_key)

    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')
    if os.path.exists(categories_filepath):
        catfile_age = time.time() - os.path.getmtime(categories_filepath)
    else:
        catfile_age = 10000000

    # global catfetch_counter
    # catfetch_counter += 1
    # bk_logger.debug('fetching categories: ', catfetch_counter)
    # bk_logger.debug('age of cat file', catfile_age)
    try:
        # read categories only once per day maximum, or when forced to do so.
        if catfile_age > 86400 or force:
            bk_logger.debug('requesting categories from server')
            r = rerequests.get(url, headers=headers)
            rdata = r.json()
            categories = rdata['results']
            fix_category_counts(categories)
            # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off.
            with open(categories_filepath, 'w', encoding='utf-8') as s:
                json.dump(categories, s, ensure_ascii=False, indent=4)
        tasks_queue.add_task((load_categories, ()))
    except Exception as e:
        bk_logger.debug('category fetching failed')
        bk_logger.exception(e)
        if not os.path.exists(categories_filepath):
            source_path = paths.get_addon_file(subpath='data' + os.sep +
                                               'categories.json')
            shutil.copy(source_path, categories_filepath)
コード例 #2
0
ファイル: categories.py プロジェクト: sambler/myblenderaddons
def fetch_categories(API_key):
    BLENDERKIT_API_MAIN = "https://www.blenderkit.com/api/v1/"

    url = paths.get_bkit_url() + 'categories/'

    headers = {
        "accept": "application/json",
        "Authorization": "Bearer %s" % API_key
    }
    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')

    try:
        r = requests.get(url, headers=headers)
        rdata = r.json()
        categories = rdata['results']
        fix_category_counts(categories)
        # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off.
        with open(categories_filepath, 'w') as s:
            json.dump(categories, s, indent=4)
    except:
        # print('category fetching failed')
        if not os.path.exists(categories_filepath):
            source_path = paths.get_addon_file(subpath='data' + os.sep + 'categories.json')
            shutil.copy(source_path, categories_filepath)
コード例 #3
0
def fetch_categories(API_key):
    BLENDERKIT_API_MAIN = "https://www.blenderkit.com/api/v1/"

    url = paths.get_bkit_url() + 'categories/'

    headers = {
        "accept": "application/json",
        "Authorization": "Bearer %s" % API_key
    }
    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')

    try:
        r = requests.get(url, headers=headers)
        rdata = r.json()
        categories = rdata['results']
        fix_category_counts(categories)
        # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off.
        with open(categories_filepath, 'w') as s:
            json.dump(categories, s, indent=4)
    except:
        print('category fetching failed')
        if not os.path.exists(categories_filepath):
            source_path = paths.get_addon_file(subpath='data' + os.sep +
                                               'categories.json')
            shutil.copy(source_path, categories_filepath)
コード例 #4
0
def fetch_author(a_id, api_key):
    utils.p('fetch author')
    try:
        a_url = paths.get_api_url() + 'accounts/' + a_id + '/'
        headers = utils.get_headers(api_key)
        r = requests.get(a_url, headers=headers)
        if r.status_code == 200:
            adata = r.json()
            if not hasattr(adata, 'id'):
                utils.p(adata)
            # utils.p(adata)
            tasks_queue.add_task((write_author, (a_id, adata)))
            if adata.get('gravatarHash') is not None:
                gravatar_path = paths.get_temp_dir(
                    subdir=None) + adata['gravatarHash'] + '.jpg'
                url = "https://www.gravatar.com/avatar/" + adata[
                    'gravatarHash'] + '?d=404'
                r = requests.get(url, stream=False)
                if r.status_code == 200:
                    with open(gravatar_path, 'wb') as f:
                        f.write(r.content)
                    adata['gravatarImg'] = gravatar_path
    except Exception as e:
        utils.p(e)
    utils.p('finish fetch')
コード例 #5
0
ファイル: search.py プロジェクト: silexcorp/blender-addons
def load_previews():

    scene = bpy.context.scene
    # FIRST START SEARCH
    props = scene.blenderkitUI
    directory = paths.get_temp_dir('%s_search' % props.asset_type.lower())
    s = bpy.context.scene
    results = s.get('search results')
    #
    if results is not None:
        inames = []
        tpaths = []

        i = 0
        for r in results:

            tpath = os.path.join(directory, r['thumbnail_small'])

            iname = utils.previmg_name(i)

            if os.path.exists(tpath):  # sometimes we are unlucky...
                img = bpy.data.images.get(iname)
                if img is None:
                    img = bpy.data.images.load(tpath)
                    img.name = iname
                elif img.filepath != tpath:
                    # had to add this check for autopacking files...
                    if img.packed_file is not None:
                        img.unpack(method='USE_ORIGINAL')
                    img.filepath = tpath
                    img.reload()
                img.colorspace_settings.name = 'sRGB'
            i += 1
コード例 #6
0
def copy_categories():
    # this creates the categories system on only
    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')
    if not os.path.exists(categories_filepath):
        source_path = paths.get_addon_file(subpath='data' + os.sep + 'categories.json')
        # print('attempt to copy categories from: %s to %s' % (categories_filepath, source_path))
        try:
            shutil.copy(source_path, categories_filepath)
        except:
            print("couldn't copy categories file")
コード例 #7
0
ファイル: categories.py プロジェクト: sambler/myblenderaddons
def copy_categories():
    # this creates the categories system on only
    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')
    if not os.path.exists(categories_filepath):
        source_path = paths.get_addon_file(subpath='data' + os.sep + 'categories.json')
        # print('attempt to copy categories from: %s to %s' % (categories_filepath, source_path))
        try:
            shutil.copy(source_path, categories_filepath)
        except:
            print("couldn't copy categories file")
コード例 #8
0
def load_categories():
    categories.copy_categories()
    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')

    wm = bpy.context.window_manager
    with open(categories_filepath, 'r') as catfile:
        wm['bkit_categories'] = json.load(catfile)

    wm['active_category'] = {
        'MODEL': ['model'],
        'SCENE': ['scene'],
        'MATERIAL': ['material'],
        'BRUSH': ['brush'],
    }
コード例 #9
0
def add_search_process(query, params):
    global search_threads

    while (len(search_threads) > 0):
        old_thread = search_threads.pop(0)
        old_thread[0].stop()
        # TODO CARE HERE FOR ALSO KILLING THE THREADS...AT LEAST NOW SEARCH DONE FIRST WON'T REWRITE AN OLDER ONE

    tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
    thread = Searcher(query, params)
    thread.start()

    search_threads.append([thread, tempdir, query['asset_type']])

    mt('thread started')
コード例 #10
0
ファイル: utils.py プロジェクト: sambler/myblenderaddons
def load_categories():
    categories.copy_categories()
    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')

    wm = bpy.context.window_manager
    with open(categories_filepath, 'r') as catfile:
        wm['bkit_categories'] = json.load(catfile)

    wm['active_category'] = {
        'MODEL': ['model'],
        'SCENE': ['scene'],
        'MATERIAL': ['material'],
        'BRUSH': ['brush'],
    }
コード例 #11
0
ファイル: ui_panels.py プロジェクト: ringsce/Rings3D
def draw_rating_asset(self,context,asset):
    layout = self.layout
    col = layout.box()
    # split = layout.split(factor=0.5)
    # col1 = split.column()
    # col2 = split.column()
    directory = paths.get_temp_dir('%s_search' % asset['asset_data']['assetType'])
    tpath = os.path.join(directory, asset['asset_data']['thumbnail_small'])
    for image in bpy.data.images:
        if image.filepath == tpath:
            # split = row.split(factor=1.0, align=False)
            col.template_icon(icon_value=image.preview.icon_id, scale=6.0)
            break;
        # layout.label(text = '', icon_value=image.preview.icon_id, scale = 10)
    col.label(text=asset.name)
    draw_ratings(col, context, asset=asset)
コード例 #12
0
ファイル: search.py プロジェクト: sambler/myblenderaddons
def add_search_process(query, params):
    global search_threads

    while (len(search_threads) > 0):
        old_thread = search_threads.pop(0)
        old_thread[0].stop()
        # TODO CARE HERE FOR ALSO KILLING THE THREADS...AT LEAST NOW SEARCH DONE FIRST WON'T REWRITE AN OLDER ONE

    tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
    thread = Searcher(query, params)
    # thread = threading.Thread(target=Searcher, args=([query]), daemon=True)
    thread.start()

    search_threads.append([thread, tempdir, query['asset_type']])

    mt('thread started')
コード例 #13
0
def fetch_gravatar(adata):
    utils.p('fetch gravatar')
    if adata.get('gravatarHash') is not None:
        gravatar_path = paths.get_temp_dir(subdir='g/') + adata['gravatarHash'] + '.jpg'

        if os.path.exists(gravatar_path):
            tasks_queue.add_task((write_gravatar, (adata['id'], gravatar_path)))
            return;

        url = "https://www.gravatar.com/avatar/" + adata['gravatarHash'] + '?d=404'
        r = rerequests.get(url, stream=False)
        if r.status_code == 200:
            with open(gravatar_path, 'wb') as f:
                f.write(r.content)
            tasks_queue.add_task((write_gravatar, (adata['id'], gravatar_path)))
        elif r.status_code == '404':
            adata['gravatarHash'] = None
            utils.p('gravatar for author not available.')
コード例 #14
0
def check_version(url, api_key, module):
    headers = {
        "accept": "application/json",
        "Authorization": "Bearer %s" % api_key
    }

    print('checking online version of module %s' % str(module.bl_info['name']))
    try:
        r = requests.get(url, headers=headers)
        data = r.json()
        ver_online = {'addonVersion2.8': data['addonVersion']}
        tempdir = paths.get_temp_dir()

        ver_filepath = os.path.join(tempdir, 'addon_version.json')
        with open(ver_filepath, 'w') as s:
            json.dump(ver_online, s, indent=4)
    except:
        print("couldn't check online for version updates")
コード例 #15
0
def load_categories():
    copy_categories()
    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')

    wm = bpy.context.window_manager
    try:
        with open(categories_filepath, 'r', encoding='utf-8') as catfile:
            wm['bkit_categories'] = json.load(catfile)

        wm['active_category'] = {
            'MODEL': ['model'],
            'SCENE': ['scene'],
            'HDR': ['hdr'],
            'MATERIAL': ['material'],
            'BRUSH': ['brush'],
        }
    except:
        print('categories failed to read')
コード例 #16
0
def compare_versions(module):
    try:
        ver_local = module.bl_info['version']
        ver_local_float = ver_local[0] + .01 * ver_local[1] + .0001 * ver_local[2]

        tempdir = paths.get_temp_dir()
        ver_filepath = os.path.join(tempdir, 'addon_version.json')
        with open(ver_filepath, 'r') as s:
            data = json.load(s)

        ver_online = data['addonVersion2.8'].split('.')
        ver_online_float = int(ver_online[0]) + .01 * int(ver_online[1]) + .0001 * int(ver_online[2])

        # print('versions: installed-%s, online-%s' % (str(ver_local_float), str(ver_online_float)))
        if ver_online_float > ver_local_float:
            return True
    except:
        print("couldn't compare addon versions")
    return False
コード例 #17
0
def check_version(url, api_key, module):
    headers = {
        "accept": "application/json",
        "Authorization": "Bearer %s" % api_key}

    print('checking online version of module %s' % str(module.bl_info['name']))
    try:
        r = requests.get(url, headers=headers)
        data = r.json()
        ver_online = {
            'addonVersion2.8': data['addonVersion']
        }
        tempdir = paths.get_temp_dir()

        ver_filepath = os.path.join(tempdir, 'addon_version.json')
        with open(ver_filepath, 'w') as s:
            json.dump(ver_online, s, indent=4)
    except:
        print("couldn't check online for version updates")
コード例 #18
0
def load_previews():
    mappingdict = {
        'MODEL': 'model',
        'SCENE': 'scene',
        'MATERIAL': 'material',
        'TEXTURE': 'texture',
        'BRUSH': 'brush'
    }
    scene = bpy.context.scene
    # FIRST START SEARCH
    props = scene.blenderkitUI

    directory = paths.get_temp_dir('%s_search' % mappingdict[props.asset_type])
    s = bpy.context.scene
    results = s.get('search results')
    #
    if results is not None:
        inames = []
        tpaths = []

        i = 0
        for r in results:

            tpath = os.path.join(directory, r['thumbnail_small'])

            iname = utils.previmg_name(i)

            if os.path.exists(tpath):  # sometimes we are unlucky...
                img = bpy.data.images.get(iname)
                if img is None:
                    img = bpy.data.images.load(tpath)
                    img.name = iname
                elif img.filepath != tpath:
                    # had to add this check for autopacking files...
                    if img.packed_file is not None:
                        img.unpack(method='USE_ORIGINAL')
                    img.filepath = tpath
                    img.reload()

            i += 1
    print('previews loaded')
コード例 #19
0
def compare_versions(module):
    try:
        ver_local = module.bl_info['version']
        ver_local_float = ver_local[
            0] + .01 * ver_local[1] + .0001 * ver_local[2]

        tempdir = paths.get_temp_dir()
        ver_filepath = os.path.join(tempdir, 'addon_version.json')
        with open(ver_filepath, 'r') as s:
            data = json.load(s)

        ver_online = data['addonVersion2.8'].split('.')
        ver_online_float = int(ver_online[0]) + .01 * int(
            ver_online[1]) + .0001 * int(ver_online[2])

        # print('versions: installed-%s, online-%s' % (str(ver_local_float), str(ver_online_float)))
        if ver_online_float > ver_local_float:
            return True
    except:
        print("couldn't compare addon versions")
    return False
コード例 #20
0
ファイル: search.py プロジェクト: sambler/myblenderaddons
def load_previews():
    mappingdict = {
        'MODEL': 'model',
        'SCENE': 'scene',
        'MATERIAL': 'material',
        'TEXTURE': 'texture',
        'BRUSH': 'brush'
    }
    scene = bpy.context.scene
    # FIRST START SEARCH
    props = scene.blenderkitUI

    directory = paths.get_temp_dir('%s_search' % mappingdict[props.asset_type])
    s = bpy.context.scene
    results = s.get('search results')
    #
    if results is not None:
        inames = []
        tpaths = []

        i = 0
        for r in results:

            tpath = os.path.join(directory, r['thumbnail_small'])

            iname = utils.previmg_name(i)

            if os.path.exists(tpath):  # sometimes we are unlucky...
                img = bpy.data.images.get(iname)
                if img is None:
                    img = bpy.data.images.load(tpath)
                    img.name = iname
                elif img.filepath != tpath:
                    # had to add this check for autopacking files...
                    if img.packed_file is not None:
                        img.unpack(method='USE_ORIGINAL')
                    img.filepath = tpath
                    img.reload()

            i += 1
コード例 #21
0
def fetch_categories(API_key):
    url = paths.get_api_url() + 'categories/'

    headers = utils.get_headers(API_key)

    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')

    try:
        r = rerequests.get(url, headers=headers)
        rdata = r.json()
        categories = rdata['results']
        fix_category_counts(categories)
        # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off.
        with open(categories_filepath, 'w') as s:
            json.dump(categories, s, indent=4)
        tasks_queue.add_task((load_categories, ()))
    except Exception as e:
        utils.p('category fetching failed')
        utils.p(e)
        if not os.path.exists(categories_filepath):
            source_path = paths.get_addon_file(subpath='data' + os.sep + 'categories.json')
            shutil.copy(source_path, categories_filepath)
コード例 #22
0
    def run(self):
        maxthreads = 300
        maximages = 50
        query = self.query
        params = self.params
        global reports

        t = time.time()
        mt('search thread started')
        tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
        json_filepath = os.path.join(
            tempdir, '%s_searchresult.json' % query['asset_type'])

        headers = utils.get_headers(params['api_key'])

        rdata = {}
        rdata['results'] = []

        if params['get_next']:
            with open(json_filepath, 'r') as infile:
                try:
                    origdata = json.load(infile)
                    urlquery = origdata['next']
                    if urlquery == None:
                        return
                except:
                    # in case no search results found on drive we don't do next page loading.
                    params['get_next'] = False
        if not params['get_next']:
            # build a new request
            url = paths.get_api_url() + 'search/'

            # build request manually
            # TODO use real queries
            requeststring = '?query=' + query['keywords'].lower() + '+'
            #
            for i, q in enumerate(query):
                requeststring += q + ':' + str(query[q]).lower()
                if i < len(query) - 1:
                    requeststring += '+'

            requeststring += '&addon_version=%s' % params['addon_version']
            if params.get('scene_uuid') is not None:
                requeststring += '&scene_uuid=%s' % params['scene_uuid']

            urlquery = url + requeststring

        try:
            utils.p(urlquery)
            r = requests.get(urlquery, headers=headers)
            reports = ''
            # utils.p(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            reports = e
            # props.report = e
            return
        mt('response is back ')
        try:
            rdata = r.json()
        except Exception as inst:
            reports = r.text
            print(inst)

        mt('data parsed ')

        # filter results here:
        # todo remove this in future
        nresults = []
        for d in rdata.get('results', []):
            # TODO this code is for filtering brush types, should vanish after we implement filter in Elastic
            mode = None
            if query['asset_type'] == 'brush':
                for p in d['parameters']:
                    if p['parameterType'] == 'mode':
                        mode = p['value']
            if query['asset_type'] != 'brush' or (
                    query.get('brushType') != None
                    and query['brushType']) == mode:
                nresults.append(d)
        rdata['results'] = nresults

        # print('number of results: ', len(rdata.get('results', [])))
        if self.stopped():
            utils.p('stopping search : ' + query['keywords'])
            return

        mt('search finished')
        i = 0

        thumb_small_urls = []
        thumb_small_filepaths = []
        thumb_full_urls = []
        thumb_full_filepaths = []
        # END OF PARSING
        getting_authors = {}
        for d in rdata.get('results', []):
            if getting_authors.get(d['author']['id']) is None:
                get_author(d)
                getting_authors[d['author']['id']] = True

            for f in d['files']:
                # TODO move validation of published assets to server, too manmy checks here.
                if f['fileType'] == 'thumbnail' and f[
                        'fileThumbnail'] != None and f[
                            'fileThumbnailLarge'] != None:
                    if f['fileThumbnail'] == None:
                        f['fileThumbnail'] = 'NONE'
                    if f['fileThumbnailLarge'] == None:
                        f['fileThumbnailLarge'] = 'NONE'

                    thumb_small_urls.append(f['fileThumbnail'])
                    thumb_full_urls.append(f['fileThumbnailLarge'])

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnail'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_small_filepaths.append(imgpath)

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnailLarge'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_full_filepaths.append(imgpath)

        sml_thbs = zip(thumb_small_filepaths, thumb_small_urls)
        full_thbs = zip(thumb_full_filepaths, thumb_full_urls)

        # we save here because a missing thumbnail check is in the previous loop
        # we can also prepend previous results. These have allready thumbnails downloaded...
        if params['get_next']:
            rdata['results'][0:0] = origdata['results']

        with open(json_filepath, 'w') as outfile:
            json.dump(rdata, outfile)

        killthreads_sml = []
        for k in thumb_sml_download_threads.keys():
            if k not in thumb_small_filepaths:
                killthreads_sml.append(k)  # do actual killing here?

        killthreads_full = []
        for k in thumb_full_download_threads.keys():
            if k not in thumb_full_filepaths:
                killthreads_full.append(k)  # do actual killing here?
        # TODO do the killing/ stopping here! remember threads might have finished inbetween!

        if self.stopped():
            utils.p('stopping search : ' + query['keywords'])
            return

        # this loop handles downloading of small thumbnails
        for imgpath, url in sml_thbs:
            if imgpath not in thumb_sml_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_sml_download_threads[imgpath] = thread
                # threads.append(thread)

                if len(thumb_sml_download_threads) > maxthreads:
                    while len(thumb_sml_download_threads) > maxthreads:
                        threads_copy = thumb_sml_download_threads.copy(
                        )  # because for loop can erase some of the items.
                        for tk, thread in threads_copy.items():
                            if not thread.is_alive():
                                thread.join()
                                # utils.p(x)
                                del (thumb_sml_download_threads[tk])
                                # utils.p('fetched thumbnail ', i)
                                i += 1
        if self.stopped():
            utils.p('stopping search : ' + query['keywords'])
            return
        idx = 0
        while len(thumb_sml_download_threads) > 0:
            threads_copy = thumb_sml_download_threads.copy(
            )  # because for loop can erase some of the items.
            for tk, thread in threads_copy.items():
                if not thread.is_alive():
                    thread.join()
                    del (thumb_sml_download_threads[tk])
                    i += 1

        if self.stopped():
            utils.p('stopping search : ' + query['keywords'])
            return

        # start downloading full thumbs in the end
        for imgpath, url in full_thbs:
            if imgpath not in thumb_full_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_full_download_threads[imgpath] = thread
        mt('thumbnails finished')
コード例 #23
0
ファイル: download.py プロジェクト: hana3d/blender2u
def append_asset2(asset_data, **kwargs):
    '''Link asset to the scene'''

    file_names = paths.get_download_filenames(asset_data)
    scene = bpy.context.scene

    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences

    if user_preferences.api_key == '':
        user_preferences.asset_counter += 1

    if asset_data['asset_type'] == 'scene':
        scene = append_link.append_scene(file_names[0],
                                         link=False,
                                         fake_user=False)
        parent = scene

    if asset_data['asset_type'] == 'model':
        s = bpy.context.scene
        downloaders = kwargs.get('downloaders')
        s = bpy.context.scene
        sprops = s.blenderkit_models
        if sprops.append_method == 'LINK_COLLECTION':
            sprops.append_link = 'LINK'
            sprops.import_as = 'GROUP'
        else:
            sprops.append_link = 'APPEND'
            sprops.import_as = 'INDIVIDUAL'

        al = sprops.append_link
        ain = asset_in_scene(asset_data)
        if ain is not False:
            if ain == 'LINKED':
                al = 'LINK'
            else:
                al = 'APPEND'

        link = al == 'LINK'
        if downloaders:
            for downloader in downloaders:
                if link is True:
                    parent, newobs = append_link.link_collection(
                        file_names[-1],
                        location=downloader['location'],
                        rotation=downloader['rotation'],
                        link=link,
                        name=asset_data['name'],
                        parent=kwargs.get('parent'))
                else:
                    parent, newobs = append_link.append_objects(
                        file_names[-1],
                        location=downloader['location'],
                        rotation=downloader['rotation'],
                        link=link,
                        name=asset_data['name'],
                        parent=kwargs.get('parent'))

                if parent.type == 'EMPTY' and link:
                    bmin = asset_data['bbox_min']
                    bmax = asset_data['bbox_max']
                    size_min = min(1.0, (bmax[0] - bmin[0] + bmax[1] -
                                         bmin[1] + bmax[2] - bmin[2]) / 3)
                    parent.empty_display_size = size_min

        elif kwargs.get('model_location') is not None:
            if link is True:
                parent, newobs = append_link.link_collection(
                    file_names[-1],
                    location=kwargs['model_location'],
                    rotation=kwargs['model_rotation'],
                    link=link,
                    name=asset_data['name'],
                    parent=kwargs.get('parent'))
            else:
                parent, newobs = append_link.append_objects(
                    file_names[-1],
                    location=kwargs['model_location'],
                    rotation=kwargs['model_rotation'],
                    link=link,
                    parent=kwargs.get('parent'))
            if parent.type == 'EMPTY' and link:
                bmin = asset_data['bbox_min']
                bmax = asset_data['bbox_max']
                size_min = min(1.0, (bmax[0] - bmin[0] + bmax[1] - bmin[1] +
                                     bmax[2] - bmin[2]) / 3)
                parent.empty_display_size = size_min

        if link:
            group = parent.instance_collection

            lib = group.library
            lib['asset_data'] = asset_data

    elif asset_data['asset_type'] == 'brush':

        inscene = False
        for b in bpy.data.brushes:
            if b.blenderkit.id == asset_data['id']:
                inscene = True
                brush = b
                break
        if not inscene:
            brush = append_link.append_brush(file_names[-1],
                                             link=False,
                                             fake_user=False)

            thumbnail_name = asset_data['thumbnail'].split(os.sep)[-1]
            tempdir = paths.get_temp_dir('brush_search')
            thumbpath = os.path.join(tempdir, thumbnail_name)
            asset_thumbs_dir = paths.get_download_dirs('brush')[0]
            asset_thumb_path = os.path.join(asset_thumbs_dir, thumbnail_name)
            shutil.copy(thumbpath, asset_thumb_path)
            brush.icon_filepath = asset_thumb_path

        if bpy.context.view_layer.objects.active.mode == 'SCULPT':
            bpy.context.tool_settings.sculpt.brush = brush
        elif bpy.context.view_layer.objects.active.mode == 'TEXTURE_PAINT':
            bpy.context.tool_settings.image_paint.brush = brush

        parent = brush

    elif asset_data['asset_type'] == 'material':
        inscene = False
        for m in bpy.data.materials:
            if m.blenderkit.id == asset_data['id']:
                inscene = True
                material = m
                break
        if not inscene:
            material = append_link.append_material(file_names[-1],
                                                   link=False,
                                                   fake_user=False)
        target_object = bpy.data.objects[kwargs['target_object']]

        if len(target_object.material_slots) == 0:
            target_object.data.materials.append(material)
        else:
            target_object.material_slots[
                kwargs['material_target_slot']].material = material

        parent = material

    scene['assets used'] = scene.get('assets used', {})
    scene['assets used'][asset_data['asset_base_id']] = asset_data.copy()

    scene['assets rated'] = scene.get('assets rated', {})

    id = asset_data['asset_base_id']
    scene['assets rated'][id] = scene['assets rated'].get(id, False)

    parent['asset_data'] = asset_data

    if hasattr(parent.blenderkit, 'tags') and 'tags' in asset_data:
        asset_data['tags'].remove('non-manifold')
        parent.blenderkit.tags = ','.join(asset_data['tags'])
    if hasattr(parent.blenderkit,
               'description') and 'description' in asset_data:
        if asset_data['description'] is not None:
            parent.blenderkit.description = asset_data['description']
    if hasattr(parent.blenderkit, 'custom_props') and 'metadata' in asset_data:
        if 'product_info' in asset_data['metadata']:
            product_info = asset_data['metadata'].pop('product_info')
            clients = []
            skus = []
            for client_sku in product_info:
                clients.append(client_sku['client'])
                skus.append(client_sku['sku'])
            if hasattr(parent.blenderkit, 'client') and hasattr(
                    parent.blenderkit, 'sku'):
                parent.blenderkit.client = ','.join(clients)
                parent.blenderkit.sku = ','.join(skus)
            else:
                parent.blenderkit.custom_props['client'] = ','.join(clients)
                parent.blenderkit.custom_props['sku'] = ','.join(skus)

        for key, value in asset_data['metadata'].items():
            parent.blenderkit.custom_props[key] = value

    bpy.ops.wm.undo_push_context(message='add %s to scene' %
                                 asset_data['name'])
コード例 #24
0
ファイル: search.py プロジェクト: sambler/myblenderaddons
    def run(self):
        maxthreads = 300
        maximages = 50
        query = self.query
        params = self.params
        global reports

        t = time.time()
        mt('search thread started')
        tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
        json_filepath = os.path.join(tempdir, '%s_searchresult.json' % query['asset_type'])

        if query['token'] != '':
            headers = {
                "accept": "application/json",
                "Authorization": "Bearer %s" % query['token'],
                # "Content-Type": "application/json",
            }
        else:
            headers = {
                "accept": "application/json",
                # "Content-Type": "application/json",
            }
        rdata = {}
        rdata['results'] = []

        if params['get_next']:
            with open(json_filepath, 'r') as infile:
                try:
                    origdata = json.load(infile)
                    urlquery = origdata['next']
                    if urlquery == None:
                        return;
                except:
                    # in case no search results found on drive we don't do next page loading.
                    params['get_next'] = False
        if not params['get_next']:
            # build a new request
            url = paths.get_bkit_url() + 'search/'

            nquery = {
                # 'tags': query['keywords'],
                'asset_type': query['asset_type'],
            }
            if query.get('category'):
                nquery['category_subtree'] = query['category']

            # build request manually
            # TODO use real queries
            requeststring = '?query=' + query['keywords'].lower() + '+'
            #
            for i, q in enumerate(nquery):
                requeststring += q + ':' + str(nquery[q])
                if i < len(nquery) - 1:
                    requeststring += '+'

            requeststring += '&addon_version=%s' % params['addon_version']
            if params.get('scene_uuid') is not None:
                requeststring += '&scene_uuid=%s' % params['scene_uuid']

            urlquery = url + requeststring

        try:
            # print(urlquery)
            r = requests.get(urlquery, headers=headers)
            reports = ''
            # print(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            reports = e
            # props.report = e
            return
        mt('response is back ')
        try:
            rdata = r.json()
        except Exception as inst:
            reports = r.text
            print(inst)

        mt('data parsed ')

        # filter results here:
        # todo remove this in future
        nresults = []
        for d in rdata.get('results', []):
            # TODO this code is for filtering brush types, should vanish after we implement filter in Elastic
            mode = None
            if query['asset_type'] == 'brush':
                for p in d['parameters']:
                    if p['parameterType'] == 'mode':
                        mode = p['value']
            if query['asset_type'] != 'brush' or (
                    query.get('brushType') != None and query['brushType']) == mode:
                nresults.append(d)
        rdata['results'] = nresults

        # print('number of results: ', len(rdata.get('results', [])))
        if self.stopped():
            print('stopping search : ' + query['keywords'])
            return

        mt('search finished')
        i = 0

        thumb_small_urls = []
        thumb_small_filepaths = []
        thumb_full_urls = []
        thumb_full_filepaths = []
        # END OF PARSING
        for d in rdata.get('results', []):
            for f in d['files']:
                # TODO move validation of published assets to server, too manmy checks here.
                if f['fileType'] == 'thumbnail' and f['fileThumbnail'] != None and f['fileThumbnailLarge'] != None:
                    if f['fileThumbnail'] == None:
                        f['fileThumbnail'] = 'NONE'
                    if f['fileThumbnailLarge'] == None:
                        f['fileThumbnailLarge'] = 'NONE'

                    thumb_small_urls.append(f['fileThumbnail'])
                    thumb_full_urls.append(f['fileThumbnailLarge'])

                    imgname = paths.extract_filename_from_url(f['fileThumbnail'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_small_filepaths.append(imgpath)

                    imgname = paths.extract_filename_from_url(f['fileThumbnailLarge'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_full_filepaths.append(imgpath)

        sml_thbs = zip(thumb_small_filepaths, thumb_small_urls)
        full_thbs = zip(thumb_full_filepaths, thumb_full_urls)

        # we save here because a missing thumbnail check is in the previous loop
        # we can also prepend previous results. These have allready thumbnails downloaded...
        if params['get_next']:
            rdata['results'][0:0] = origdata['results']

        with open(json_filepath, 'w') as outfile:
            json.dump(rdata, outfile)

        killthreads_sml = []
        for k in thumb_sml_download_threads.keys():
            if k not in thumb_small_filepaths:
                killthreads_sml.append(k)  # do actual killing here?

        killthreads_full = []
        for k in thumb_full_download_threads.keys():
            if k not in thumb_full_filepaths:
                killthreads_full.append(k)  # do actual killing here?
        # TODO do the killing/ stopping here! remember threads might have finished inbetween!

        if self.stopped():
            print('stopping search : ' + query['keywords'])
            return

        # this loop handles downloading of small thumbnails
        for imgpath, url in sml_thbs:
            if imgpath not in thumb_sml_download_threads and not os.path.exists(imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_sml_download_threads[imgpath] = thread
                # threads.append(thread)

                if len(thumb_sml_download_threads) > maxthreads:
                    while len(thumb_sml_download_threads) > maxthreads:
                        threads_copy = thumb_sml_download_threads.copy()  # because for loop can erase some of the items.
                        for tk, thread in threads_copy.items():
                            if not thread.is_alive():
                                thread.join()
                                # print(x)
                                del (thumb_sml_download_threads[tk])
                                # print('fetched thumbnail ', i)
                                i += 1
        if self.stopped():
            print('stopping search : ' + query['keywords'])
            return
        idx = 0
        while len(thumb_sml_download_threads) > 0:
            threads_copy = thumb_sml_download_threads.copy()  # because for loop can erase some of the items.
            for tk, thread in threads_copy.items():
                if not thread.is_alive():
                    thread.join()
                    del (thumb_sml_download_threads[tk])
                    i += 1

        if self.stopped():
            print('stopping search : ' + query['keywords'])
            return

        # start downloading full thumbs in the end
        for imgpath, url in full_thbs:
            if imgpath not in thumb_full_download_threads and not os.path.exists(imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_full_download_threads[imgpath] = thread
        mt('thumbnails finished')
コード例 #25
0
def append_asset(asset_data, **kwargs):  # downloaders=[], location=None,
    '''Link asset to the scene'''

    file_names = paths.get_download_filenames(asset_data)
    props = None
    #####
    # how to do particle  drop:
    # link the group we are interested in( there are more groups in File!!!! , have to get the correct one!)
    #
    scene = bpy.context.scene

    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences

    if user_preferences.api_key == '':
        user_preferences.asset_counter += 1

    if asset_data['asset_type'] == 'scene':
        scene = append_link.append_scene(file_names[0],
                                         link=False,
                                         fake_user=False)
        props = scene.blenderkit
        parent = scene

    if asset_data['asset_type'] == 'model':
        s = bpy.context.scene
        downloaders = kwargs.get('downloaders')
        s = bpy.context.scene
        sprops = s.blenderkit_models
        # TODO this is here because combinations of linking objects or appending groups are rather not-usefull
        if sprops.append_method == 'LINK_COLLECTION':
            sprops.append_link = 'LINK'
            sprops.import_as = 'GROUP'
        else:
            sprops.append_link = 'APPEND'
            sprops.import_as = 'INDIVIDUAL'

        #copy for override
        al = sprops.append_link
        import_as = sprops.import_as
        # set consistency for objects already in scene, otherwise this literally breaks blender :)
        ain = asset_in_scene(asset_data)
        #override based on history
        if ain is not False:
            if ain == 'LINKED':
                al = 'LINK'
                import_as = 'GROUP'
            else:
                al = 'APPEND'
                import_as = 'INDIVIDUAL'

        # first get conditions for append link
        link = al == 'LINK'
        # then append link
        if downloaders:
            for downloader in downloaders:
                # this cares for adding particle systems directly to target mesh, but I had to block it now,
                # because of the sluggishnes of it. Possibly re-enable when it's possible to do this faster?
                if 0:  # 'particle_plants' in asset_data['tags']:
                    append_link.append_particle_system(
                        file_names[-1],
                        target_object=kwargs['target_object'],
                        rotation=downloader['rotation'],
                        link=False,
                        name=asset_data['name'])
                    return

                if sprops.import_as == 'GROUP':
                    parent, newobs = append_link.link_collection(
                        file_names[-1],
                        location=downloader['location'],
                        rotation=downloader['rotation'],
                        link=link,
                        name=asset_data['name'],
                        parent=kwargs.get('parent'))

                else:
                    parent, newobs = append_link.append_objects(
                        file_names[-1],
                        location=downloader['location'],
                        rotation=downloader['rotation'],
                        link=link,
                        name=asset_data['name'],
                        parent=kwargs.get('parent'))
                if parent.type == 'EMPTY' and link:
                    bmin = asset_data['bbox_min']
                    bmax = asset_data['bbox_max']
                    size_min = min(1.0, (bmax[0] - bmin[0] + bmax[1] -
                                         bmin[1] + bmax[2] - bmin[2]) / 3)
                    parent.empty_display_size = size_min

        elif kwargs.get('model_location') is not None:
            if sprops.import_as == 'GROUP':
                parent, newobs = append_link.link_collection(
                    file_names[-1],
                    location=kwargs['model_location'],
                    rotation=kwargs['model_rotation'],
                    link=link,
                    name=asset_data['name'],
                    parent=kwargs.get('parent'))
            else:
                parent, newobs = append_link.append_objects(
                    file_names[-1],
                    location=kwargs['model_location'],
                    rotation=kwargs['model_rotation'],
                    link=link,
                    parent=kwargs.get('parent'))
            if parent.type == 'EMPTY' and link:
                bmin = asset_data['bbox_min']
                bmax = asset_data['bbox_max']
                size_min = min(1.0, (bmax[0] - bmin[0] + bmax[1] - bmin[1] +
                                     bmax[2] - bmin[2]) / 3)
                parent.empty_display_size = size_min

        if link:
            group = parent.instance_collection

            lib = group.library
            lib['asset_data'] = asset_data

    elif asset_data['asset_type'] == 'brush':

        # TODO if already in scene, should avoid reappending.
        inscene = False
        for b in bpy.data.brushes:

            if b.blenderkit.id == asset_data['id']:
                inscene = True
                brush = b
                break
        if not inscene:
            brush = append_link.append_brush(file_names[-1],
                                             link=False,
                                             fake_user=False)

            thumbnail_name = asset_data['thumbnail'].split(os.sep)[-1]
            tempdir = paths.get_temp_dir('brush_search')
            thumbpath = os.path.join(tempdir, thumbnail_name)
            asset_thumbs_dir = paths.get_download_dirs('brush')[0]
            asset_thumb_path = os.path.join(asset_thumbs_dir, thumbnail_name)
            shutil.copy(thumbpath, asset_thumb_path)
            brush.icon_filepath = asset_thumb_path

        if bpy.context.view_layer.objects.active.mode == 'SCULPT':
            bpy.context.tool_settings.sculpt.brush = brush
        elif bpy.context.view_layer.objects.active.mode == 'TEXTURE_PAINT':  # could be just else, but for future possible more types...
            bpy.context.tool_settings.image_paint.brush = brush
        # TODO set brush by by asset data(user can be downloading while switching modes.)

        # bpy.context.tool_settings.image_paint.brush = brush
        props = brush.blenderkit
        parent = brush

    elif asset_data['asset_type'] == 'material':
        inscene = False
        for m in bpy.data.materials:
            if m.blenderkit.id == asset_data['id']:
                inscene = True
                material = m
                break
        if not inscene:
            material = append_link.append_material(file_names[-1],
                                                   link=False,
                                                   fake_user=False)
        target_object = bpy.data.objects[kwargs['target_object']]

        if len(target_object.material_slots) == 0:
            target_object.data.materials.append(material)
        else:
            target_object.material_slots[
                kwargs['material_target_slot']].material = material

        parent = material

    scene['assets used'] = scene.get('assets used', {})
    scene['assets used'][asset_data['asset_base_id']] = asset_data.copy()

    scene['assets rated'] = scene.get('assets rated', {})

    id = asset_data['asset_base_id']
    scene['assets rated'][id] = scene['assets rated'].get(id, False)

    parent[
        'asset_data'] = asset_data  # TODO remove this??? should write to blenderkit Props?
コード例 #26
0
ファイル: search.py プロジェクト: silexcorp/blender-addons
    def run(self):
        maxthreads = 50
        query = self.query
        params = self.params
        global reports

        t = time.time()
        mt('search thread started')
        tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
        # json_filepath = os.path.join(tempdir, '%s_searchresult.json' % query['asset_type'])

        headers = utils.get_headers(params['api_key'])

        rdata = {}
        rdata['results'] = []

        if params['get_next']:
            urlquery = self.result['next']
        if not params['get_next']:
            urlquery = self.query_to_url()

        try:
            utils.p(urlquery)
            r = rerequests.get(urlquery,
                               headers=headers)  # , params = rparameters)
            # print(r.url)
            reports = ''
            # utils.p(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            reports = e
            # props.report = e
            return
        mt('response is back ')
        try:
            rdata = r.json()
        except Exception as inst:
            reports = r.text
            print(inst)

        mt('data parsed ')

        # print('number of results: ', len(rdata.get('results', [])))
        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return

        mt('search finished')
        i = 0

        thumb_small_urls = []
        thumb_small_filepaths = []
        thumb_full_urls = []
        thumb_full_filepaths = []
        # END OF PARSING
        for d in rdata.get('results', []):

            get_author(d)

            for f in d['files']:
                # TODO move validation of published assets to server, too manmy checks here.
                if f['fileType'] == 'thumbnail' and f[
                        'fileThumbnail'] != None and f[
                            'fileThumbnailLarge'] != None:
                    if f['fileThumbnail'] == None:
                        f['fileThumbnail'] = 'NONE'
                    if f['fileThumbnailLarge'] == None:
                        f['fileThumbnailLarge'] = 'NONE'

                    thumb_small_urls.append(f['fileThumbnail'])
                    thumb_full_urls.append(f['fileThumbnailLarge'])

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnail'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_small_filepaths.append(imgpath)

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnailLarge'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_full_filepaths.append(imgpath)

        sml_thbs = zip(thumb_small_filepaths, thumb_small_urls)
        full_thbs = zip(thumb_full_filepaths, thumb_full_urls)

        # we save here because a missing thumbnail check is in the previous loop
        # we can also prepend previous results. These have downloaded thumbnails already...
        if params['get_next']:
            rdata['results'][0:0] = self.result['results']
        self.result = rdata
        # with open(json_filepath, 'w') as outfile:
        #     json.dump(rdata, outfile)

        killthreads_sml = []
        for k in thumb_sml_download_threads.keys():
            if k not in thumb_small_filepaths:
                killthreads_sml.append(k)  # do actual killing here?

        killthreads_full = []
        for k in thumb_full_download_threads.keys():
            if k not in thumb_full_filepaths:
                killthreads_full.append(k)  # do actual killing here?
        # TODO do the killing/ stopping here! remember threads might have finished inbetween!

        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return

        # this loop handles downloading of small thumbnails
        for imgpath, url in sml_thbs:
            if imgpath not in thumb_sml_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_sml_download_threads[imgpath] = thread
                # threads.append(thread)

                if len(thumb_sml_download_threads) > maxthreads:
                    while len(thumb_sml_download_threads) > maxthreads:
                        threads_copy = thumb_sml_download_threads.copy(
                        )  # because for loop can erase some of the items.
                        for tk, thread in threads_copy.items():
                            if not thread.is_alive():
                                thread.join()
                                # utils.p(x)
                                del (thumb_sml_download_threads[tk])
                                # utils.p('fetched thumbnail ', i)
                                i += 1
        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return
        idx = 0
        while len(thumb_sml_download_threads) > 0:
            threads_copy = thumb_sml_download_threads.copy(
            )  # because for loop can erase some of the items.
            for tk, thread in threads_copy.items():
                if not thread.is_alive():
                    thread.join()
                    del (thumb_sml_download_threads[tk])
                    i += 1

        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return

        # start downloading full thumbs in the end
        for imgpath, url in full_thbs:
            if imgpath not in thumb_full_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_full_download_threads[imgpath] = thread
        mt('thumbnails finished')