Example #1
0
def get_download_url(asset_data, scene_id, api_key, tcom=None):
    ''''retrieves the download url. The server checks if user can download the item.'''
    mt = time.time()

    headers = utils.get_headers(api_key)

    data = {'scene_uuid': scene_id}
    r = None
    try:
        r = requests.get(asset_data['download_url'],
                         params=data,
                         headers=headers)
    except Exception as e:
        print(e)
        if tcom is not None:
            tcom.error = True

    if r == None:
        tcom.report = 'Connection Error'
        tcom.error = True
        return 'Connection Error'

    if r.status_code < 400:
        data = r.json()
        url = data['filePath']
        asset_data['url'] = url
        asset_data['file_name'] = paths.extract_filename_from_url(url)
        return True

    if r.status_code == 403:
        r = 'You need Standard plan to get this item.'
        tcom.report = r
        r1 = 'All materials and brushes are aviable for free. Only users registered to Standart plan can use all models.'
        tasks_queue.add_task((ui.add_report, (r1, 5, colors.RED)))
        tcom.error = True

    if r.status_code == 401:
        tcom.report = 'Invalid API key'
        tcom.error = True
        return 'Invalid API key'
    elif r.status_code >= 500:
        tcom.report = 'Server error'
        tcom.error = True
    return False
def get_download_url(asset_data, scene_id, api_key, tcom=None):
    ''''retrieves the download url. The server checks if user can download the item.'''
    mt = time.time()
    headers = {
        "accept": "application/json",
        "Authorization": "Bearer %s" % api_key,
    }
    data = {
        'scene_uuid': scene_id
    }
    r = None
    try:
        r = requests.get(asset_data['download_url'], params=data, headers=headers)
        fprint(r.text)
    except Exception as e:
        print(e)
        if tcom is not None:
            tcom.error = True

    if r == None:
        tcom.report = 'Connection Error'
        tcom.error = True
        return 'Connection Error'
    if r.status_code < 400:
        data = r.json()
        url = data['filePath']
        asset_data['url'] = url
        asset_data['file_name'] = paths.extract_filename_from_url(url)
        return True

    if r.status_code == 403:
        tcom.report = 'Available only in higher plans.'
        tcom.error = True
        return 'Available only in higher plans.'
    if r.status_code == 401:
        tcom.report = 'Invalid API key'
        tcom.error = True
        return 'Invalid API key'
    elif r.status_code >= 500:
        tcom.report = 'Server error'
        tcom.error = True
Example #3
0
    def run(self):
        maxthreads = 300
        maximages = 50
        query = self.query
        params = self.params
        global reports

        t = time.time()
        mt('search thread started')
        tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
        json_filepath = os.path.join(
            tempdir, '%s_searchresult.json' % query['asset_type'])

        headers = utils.get_headers(params['api_key'])

        rdata = {}
        rdata['results'] = []

        if params['get_next']:
            with open(json_filepath, 'r') as infile:
                try:
                    origdata = json.load(infile)
                    urlquery = origdata['next']
                    if urlquery == None:
                        return
                except:
                    # in case no search results found on drive we don't do next page loading.
                    params['get_next'] = False
        if not params['get_next']:
            # build a new request
            url = paths.get_api_url() + 'search/'

            # build request manually
            # TODO use real queries
            requeststring = '?query=' + query['keywords'].lower() + '+'
            #
            for i, q in enumerate(query):
                requeststring += q + ':' + str(query[q]).lower()
                if i < len(query) - 1:
                    requeststring += '+'

            requeststring += '&addon_version=%s' % params['addon_version']
            if params.get('scene_uuid') is not None:
                requeststring += '&scene_uuid=%s' % params['scene_uuid']

            urlquery = url + requeststring

        try:
            utils.p(urlquery)
            r = requests.get(urlquery, headers=headers)
            reports = ''
            # utils.p(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            reports = e
            # props.report = e
            return
        mt('response is back ')
        try:
            rdata = r.json()
        except Exception as inst:
            reports = r.text
            print(inst)

        mt('data parsed ')

        # filter results here:
        # todo remove this in future
        nresults = []
        for d in rdata.get('results', []):
            # TODO this code is for filtering brush types, should vanish after we implement filter in Elastic
            mode = None
            if query['asset_type'] == 'brush':
                for p in d['parameters']:
                    if p['parameterType'] == 'mode':
                        mode = p['value']
            if query['asset_type'] != 'brush' or (
                    query.get('brushType') != None
                    and query['brushType']) == mode:
                nresults.append(d)
        rdata['results'] = nresults

        # print('number of results: ', len(rdata.get('results', [])))
        if self.stopped():
            utils.p('stopping search : ' + query['keywords'])
            return

        mt('search finished')
        i = 0

        thumb_small_urls = []
        thumb_small_filepaths = []
        thumb_full_urls = []
        thumb_full_filepaths = []
        # END OF PARSING
        getting_authors = {}
        for d in rdata.get('results', []):
            if getting_authors.get(d['author']['id']) is None:
                get_author(d)
                getting_authors[d['author']['id']] = True

            for f in d['files']:
                # TODO move validation of published assets to server, too manmy checks here.
                if f['fileType'] == 'thumbnail' and f[
                        'fileThumbnail'] != None and f[
                            'fileThumbnailLarge'] != None:
                    if f['fileThumbnail'] == None:
                        f['fileThumbnail'] = 'NONE'
                    if f['fileThumbnailLarge'] == None:
                        f['fileThumbnailLarge'] = 'NONE'

                    thumb_small_urls.append(f['fileThumbnail'])
                    thumb_full_urls.append(f['fileThumbnailLarge'])

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnail'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_small_filepaths.append(imgpath)

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnailLarge'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_full_filepaths.append(imgpath)

        sml_thbs = zip(thumb_small_filepaths, thumb_small_urls)
        full_thbs = zip(thumb_full_filepaths, thumb_full_urls)

        # we save here because a missing thumbnail check is in the previous loop
        # we can also prepend previous results. These have allready thumbnails downloaded...
        if params['get_next']:
            rdata['results'][0:0] = origdata['results']

        with open(json_filepath, 'w') as outfile:
            json.dump(rdata, outfile)

        killthreads_sml = []
        for k in thumb_sml_download_threads.keys():
            if k not in thumb_small_filepaths:
                killthreads_sml.append(k)  # do actual killing here?

        killthreads_full = []
        for k in thumb_full_download_threads.keys():
            if k not in thumb_full_filepaths:
                killthreads_full.append(k)  # do actual killing here?
        # TODO do the killing/ stopping here! remember threads might have finished inbetween!

        if self.stopped():
            utils.p('stopping search : ' + query['keywords'])
            return

        # this loop handles downloading of small thumbnails
        for imgpath, url in sml_thbs:
            if imgpath not in thumb_sml_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_sml_download_threads[imgpath] = thread
                # threads.append(thread)

                if len(thumb_sml_download_threads) > maxthreads:
                    while len(thumb_sml_download_threads) > maxthreads:
                        threads_copy = thumb_sml_download_threads.copy(
                        )  # because for loop can erase some of the items.
                        for tk, thread in threads_copy.items():
                            if not thread.is_alive():
                                thread.join()
                                # utils.p(x)
                                del (thumb_sml_download_threads[tk])
                                # utils.p('fetched thumbnail ', i)
                                i += 1
        if self.stopped():
            utils.p('stopping search : ' + query['keywords'])
            return
        idx = 0
        while len(thumb_sml_download_threads) > 0:
            threads_copy = thumb_sml_download_threads.copy(
            )  # because for loop can erase some of the items.
            for tk, thread in threads_copy.items():
                if not thread.is_alive():
                    thread.join()
                    del (thumb_sml_download_threads[tk])
                    i += 1

        if self.stopped():
            utils.p('stopping search : ' + query['keywords'])
            return

        # start downloading full thumbs in the end
        for imgpath, url in full_thbs:
            if imgpath not in thumb_full_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_full_download_threads[imgpath] = thread
        mt('thumbnails finished')
Example #4
0
def timer_update():  # TODO might get moved to handle all blenderkit stuff.

    global search_threads
    # don't do anything while dragging - this could switch asset type during drag, and make results list lenght different,
    # causing a lot of throuble literally.
    if len(search_threads) == 0 or bpy.context.scene.blenderkitUI.dragging:
        return 1
    for thread in search_threads:  # TODO this doesn't check all processess when removal... mostly 1 process will be running however.

        if not thread[0].is_alive():
            search_threads.remove(thread)  #
            icons_dir = thread[1]
            scene = bpy.context.scene
            # these 2 lines should update the previews enum and set the first result as active.
            s = bpy.context.scene
            asset_type = thread[2]
            if asset_type == 'model':
                props = scene.blenderkit_models
                json_filepath = os.path.join(icons_dir,
                                             'model_searchresult.json')
                search_name = 'bkit model search'
            if asset_type == 'scene':
                props = scene.blenderkit_scene
                json_filepath = os.path.join(icons_dir,
                                             'scene_searchresult.json')
                search_name = 'bkit scene search'
            if asset_type == 'material':
                props = scene.blenderkit_mat
                json_filepath = os.path.join(icons_dir,
                                             'material_searchresult.json')
                search_name = 'bkit material search'
            if asset_type == 'brush':
                props = scene.blenderkit_brush
                json_filepath = os.path.join(icons_dir,
                                             'brush_searchresult.json')
                search_name = 'bkit brush search'

            s[search_name] = []

            global reports
            if reports != '':
                props.report = str(reports)
                return .2
            with open(json_filepath, 'r') as data_file:
                rdata = json.load(data_file)

            result_field = []
            ok, error = check_errors(rdata)
            if ok:

                for r in rdata['results']:

                    if r['assetType'] == asset_type:
                        # utils.pprint(r)
                        if len(r['files']) > 0:
                            furl = None
                            tname = None
                            allthumbs = []
                            durl, tname = None, None
                            for f in r['files']:
                                if f['fileType'] == 'thumbnail':
                                    tname = paths.extract_filename_from_url(
                                        f['fileThumbnailLarge'])
                                    small_tname = paths.extract_filename_from_url(
                                        f['fileThumbnail'])
                                    allthumbs.append(
                                        tname
                                    )  # TODO just first thumb is used now.

                                tdict = {}
                                for i, t in enumerate(allthumbs):
                                    tdict['thumbnail_%i'] = t
                                if f['fileType'] == 'blend':
                                    durl = f['downloadUrl'].split('?')[0]
                                    # fname = paths.extract_filename_from_url(f['filePath'])
                            if durl and tname:

                                tooltip = generate_tooltip(r)
                                # utils.pprint(print(r))
                                asset_data = {
                                    'thumbnail': tname,
                                    'thumbnail_small': small_tname,
                                    # 'thumbnails':allthumbs,
                                    'download_url': durl,
                                    'id': r['id'],
                                    'asset_base_id': r['assetBaseId'],
                                    'name': r['name'],
                                    'asset_type': r['assetType'],
                                    'tooltip': tooltip,
                                    'tags': r['tags'],
                                    'can_download': r.get('canDownload', True),
                                    'verification_status':
                                    r['verificationStatus'],
                                    'author_id': str(r['author']['id'])
                                    # 'author': r['author']['firstName'] + ' ' + r['author']['lastName']
                                    # 'description': r['description'],
                                    # 'author': r['description'],
                                }
                                asset_data['downloaded'] = 0

                                # parse extra params needed for blender here
                                params = params_to_dict(r['parameters'])

                                if asset_type == 'model':
                                    if params.get('boundBoxMinX') != None:
                                        bbox = {
                                            'bbox_min':
                                            (float(params['boundBoxMinX']),
                                             float(params['boundBoxMinY']),
                                             float(params['boundBoxMinZ'])),
                                            'bbox_max':
                                            (float(params['boundBoxMaxX']),
                                             float(params['boundBoxMaxY']),
                                             float(params['boundBoxMaxZ']))
                                        }

                                    else:
                                        bbox = {
                                            'bbox_min': (-.5, -.5, 0),
                                            'bbox_max': (.5, .5, 1)
                                        }
                                    asset_data.update(bbox)
                                if asset_type == 'material':
                                    asset_data[
                                        'texture_size_meters'] = params.get(
                                            'textureSizeMeters', 1.0)

                                asset_data.update(tdict)
                                if r['assetBaseId'] in scene.get(
                                        'assets used', {}).keys():
                                    asset_data['downloaded'] = 100

                                result_field.append(asset_data)

                                # results = rdata['results']
                s[search_name] = result_field
                s['search results'] = result_field
                s[search_name + ' orig'] = rdata
                s['search results orig'] = rdata
                load_previews()
                ui_props = bpy.context.scene.blenderkitUI
                if len(result_field) < ui_props.scrolloffset:
                    ui_props.scrolloffset = 0
                props.is_searching = False
                props.search_error = False
                props.report = 'Open assetbar to see %i results. ' % len(
                    s['search results'])
                if len(s['search results']) == 0:
                    tasks_queue.add_task(
                        (ui.add_report, ('No matching results found.', )))

            # (rdata['next'])
            # if rdata['next'] != None:
            # search(False, get_next = True)
            else:
                print('error', error)
                props.report = error
                props.search_error = True

            # print('finished search thread')
            mt('preview loading finished')
    return .2
Example #5
0
def timer_update():
    # this makes a first search after opening blender. showing latest assets.
    global first_time
    preferences = bpy.context.preferences.addons['blenderkit'].preferences
    if first_time:  # first time
        first_time = False
        if preferences.show_on_start or preferences.first_run:
            # TODO here it should check if there are some results, and only open assetbar if this is the case, not search.
            #if bpy.context.scene.get('search results') is None:
            search()
            preferences.first_run = False
        if preferences.tips_on_start:
            ui.get_largest_3dview()
            ui.update_ui_size(ui.active_area, ui.active_region)
            ui.add_report(text='BlenderKit Tip: ' + random.choice(rtips),
                          timeout=12,
                          color=colors.GREEN)

    check_clipboard()

    global search_threads
    # don't do anything while dragging - this could switch asset during drag, and make results list length different,
    # causing a lot of throuble.
    if len(search_threads) == 0 or bpy.context.scene.blenderkitUI.dragging:
        return 1
    for thread in search_threads:
        # TODO this doesn't check all processes when one gets removed,
        # but most of the time only one is running anyway
        if not thread[0].is_alive():
            search_threads.remove(thread)  #
            icons_dir = thread[1]
            scene = bpy.context.scene
            # these 2 lines should update the previews enum and set the first result as active.
            s = bpy.context.scene
            asset_type = thread[2]
            if asset_type == 'model':
                props = scene.blenderkit_models
                json_filepath = os.path.join(icons_dir,
                                             'model_searchresult.json')
                search_name = 'bkit model search'
            if asset_type == 'scene':
                props = scene.blenderkit_scene
                json_filepath = os.path.join(icons_dir,
                                             'scene_searchresult.json')
                search_name = 'bkit scene search'
            if asset_type == 'material':
                props = scene.blenderkit_mat
                json_filepath = os.path.join(icons_dir,
                                             'material_searchresult.json')
                search_name = 'bkit material search'
            if asset_type == 'brush':
                props = scene.blenderkit_brush
                json_filepath = os.path.join(icons_dir,
                                             'brush_searchresult.json')
                search_name = 'bkit brush search'

            s[search_name] = []

            global reports
            if reports != '':
                props.report = str(reports)
                return .2
            with open(json_filepath, 'r') as data_file:
                rdata = json.load(data_file)

            result_field = []
            ok, error = check_errors(rdata)
            if ok:
                bpy.ops.object.run_assetbar_fix_context()
                for r in rdata['results']:
                    # TODO remove this fix when filesSize is fixed.
                    # this is a temporary fix for too big numbers from the server.
                    try:
                        r['filesSize'] = int(r['filesSize'] / 1024)
                    except:
                        utils.p('asset with no files-size')
                    if r['assetType'] == asset_type:
                        if len(r['files']) > 0:
                            furl = None
                            tname = None
                            allthumbs = []
                            durl, tname = None, None
                            for f in r['files']:
                                if f['fileType'] == 'thumbnail':
                                    tname = paths.extract_filename_from_url(
                                        f['fileThumbnailLarge'])
                                    small_tname = paths.extract_filename_from_url(
                                        f['fileThumbnail'])
                                    allthumbs.append(
                                        tname
                                    )  # TODO just first thumb is used now.

                                tdict = {}
                                for i, t in enumerate(allthumbs):
                                    tdict['thumbnail_%i'] = t
                                if f['fileType'] == 'blend':
                                    durl = f['downloadUrl'].split('?')[0]
                                    # fname = paths.extract_filename_from_url(f['filePath'])
                            if durl and tname:

                                tooltip = generate_tooltip(r)
                                #for some reason, the id was still int on some occurances. investigate this.
                                r['author']['id'] = str(r['author']['id'])

                                asset_data = {
                                    'thumbnail': tname,
                                    'thumbnail_small': small_tname,
                                    # 'thumbnails':allthumbs,
                                    'download_url': durl,
                                    'id': r['id'],
                                    'asset_base_id': r['assetBaseId'],
                                    'name': r['name'],
                                    'asset_type': r['assetType'],
                                    'tooltip': tooltip,
                                    'tags': r['tags'],
                                    'can_download': r.get('canDownload', True),
                                    'verification_status':
                                    r['verificationStatus'],
                                    'author_id': r['author']['id'],
                                    # 'author': r['author']['firstName'] + ' ' + r['author']['lastName']
                                    # 'description': r['description'],
                                }
                                asset_data['downloaded'] = 0

                                # parse extra params needed for blender here
                                params = utils.params_to_dict(r['parameters'])

                                if asset_type == 'model':
                                    if params.get('boundBoxMinX') != None:
                                        bbox = {
                                            'bbox_min':
                                            (float(params['boundBoxMinX']),
                                             float(params['boundBoxMinY']),
                                             float(params['boundBoxMinZ'])),
                                            'bbox_max':
                                            (float(params['boundBoxMaxX']),
                                             float(params['boundBoxMaxY']),
                                             float(params['boundBoxMaxZ']))
                                        }

                                    else:
                                        bbox = {
                                            'bbox_min': (-.5, -.5, 0),
                                            'bbox_max': (.5, .5, 1)
                                        }
                                    asset_data.update(bbox)
                                if asset_type == 'material':
                                    asset_data[
                                        'texture_size_meters'] = params.get(
                                            'textureSizeMeters', 1.0)

                                asset_data.update(tdict)
                                if r['assetBaseId'] in scene.get(
                                        'assets used', {}).keys():
                                    asset_data['downloaded'] = 100

                                result_field.append(asset_data)

                                # results = rdata['results']
                s[search_name] = result_field
                s['search results'] = result_field
                s[search_name + ' orig'] = rdata
                s['search results orig'] = rdata
                load_previews()
                ui_props = bpy.context.scene.blenderkitUI
                if len(result_field) < ui_props.scrolloffset:
                    ui_props.scrolloffset = 0
                props.is_searching = False
                props.search_error = False
                props.report = 'Found %i results. ' % (
                    s['search results orig']['count'])
                if len(s['search results']) == 0:
                    tasks_queue.add_task(
                        (ui.add_report, ('No matching results found.', )))

            # (rdata['next'])
            # if rdata['next'] != None:
            # search(False, get_next = True)
            else:
                print('error', error)
                props.report = error
                props.search_error = True

            # print('finished search thread')
            mt('preview loading finished')
    return .3
Example #6
0
    def run(self):
        maxthreads = 50
        query = self.query
        params = self.params
        global reports

        t = time.time()
        mt('search thread started')
        tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
        # json_filepath = os.path.join(tempdir, '%s_searchresult.json' % query['asset_type'])

        headers = utils.get_headers(params['api_key'])

        rdata = {}
        rdata['results'] = []

        if params['get_next']:
            urlquery = self.result['next']
        if not params['get_next']:
            urlquery = self.query_to_url()

        try:
            utils.p(urlquery)
            r = rerequests.get(urlquery,
                               headers=headers)  # , params = rparameters)
            # print(r.url)
            reports = ''
            # utils.p(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            reports = e
            # props.report = e
            return
        mt('response is back ')
        try:
            rdata = r.json()
        except Exception as inst:
            reports = r.text
            print(inst)

        mt('data parsed ')

        # print('number of results: ', len(rdata.get('results', [])))
        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return

        mt('search finished')
        i = 0

        thumb_small_urls = []
        thumb_small_filepaths = []
        thumb_full_urls = []
        thumb_full_filepaths = []
        # END OF PARSING
        for d in rdata.get('results', []):

            get_author(d)

            for f in d['files']:
                # TODO move validation of published assets to server, too manmy checks here.
                if f['fileType'] == 'thumbnail' and f[
                        'fileThumbnail'] != None and f[
                            'fileThumbnailLarge'] != None:
                    if f['fileThumbnail'] == None:
                        f['fileThumbnail'] = 'NONE'
                    if f['fileThumbnailLarge'] == None:
                        f['fileThumbnailLarge'] = 'NONE'

                    thumb_small_urls.append(f['fileThumbnail'])
                    thumb_full_urls.append(f['fileThumbnailLarge'])

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnail'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_small_filepaths.append(imgpath)

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnailLarge'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_full_filepaths.append(imgpath)

        sml_thbs = zip(thumb_small_filepaths, thumb_small_urls)
        full_thbs = zip(thumb_full_filepaths, thumb_full_urls)

        # we save here because a missing thumbnail check is in the previous loop
        # we can also prepend previous results. These have downloaded thumbnails already...
        if params['get_next']:
            rdata['results'][0:0] = self.result['results']
        self.result = rdata
        # with open(json_filepath, 'w') as outfile:
        #     json.dump(rdata, outfile)

        killthreads_sml = []
        for k in thumb_sml_download_threads.keys():
            if k not in thumb_small_filepaths:
                killthreads_sml.append(k)  # do actual killing here?

        killthreads_full = []
        for k in thumb_full_download_threads.keys():
            if k not in thumb_full_filepaths:
                killthreads_full.append(k)  # do actual killing here?
        # TODO do the killing/ stopping here! remember threads might have finished inbetween!

        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return

        # this loop handles downloading of small thumbnails
        for imgpath, url in sml_thbs:
            if imgpath not in thumb_sml_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_sml_download_threads[imgpath] = thread
                # threads.append(thread)

                if len(thumb_sml_download_threads) > maxthreads:
                    while len(thumb_sml_download_threads) > maxthreads:
                        threads_copy = thumb_sml_download_threads.copy(
                        )  # because for loop can erase some of the items.
                        for tk, thread in threads_copy.items():
                            if not thread.is_alive():
                                thread.join()
                                # utils.p(x)
                                del (thumb_sml_download_threads[tk])
                                # utils.p('fetched thumbnail ', i)
                                i += 1
        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return
        idx = 0
        while len(thumb_sml_download_threads) > 0:
            threads_copy = thumb_sml_download_threads.copy(
            )  # because for loop can erase some of the items.
            for tk, thread in threads_copy.items():
                if not thread.is_alive():
                    thread.join()
                    del (thumb_sml_download_threads[tk])
                    i += 1

        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return

        # start downloading full thumbs in the end
        for imgpath, url in full_thbs:
            if imgpath not in thumb_full_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_full_download_threads[imgpath] = thread
        mt('thumbnails finished')
Example #7
0
def parse_result(r):
    '''
    needed to generate some extra data in the result(by now)
    Parameters
    ----------
    r - search result, also called asset_data
    '''
    scene = bpy.context.scene

    # TODO remove this fix when filesSize is fixed.
    # this is a temporary fix for too big numbers from the server.
    try:
        r['filesSize'] = int(r['filesSize'] / 1024)
    except:
        utils.p('asset with no files-size')
    asset_type = r['assetType']
    if len(r['files']) > 0:

        allthumbs = []
        durl, tname = None, None
        for f in r['files']:
            if f['fileType'] == 'thumbnail':
                tname = paths.extract_filename_from_url(
                    f['fileThumbnailLarge'])
                small_tname = paths.extract_filename_from_url(
                    f['fileThumbnail'])
                allthumbs.append(tname)  # TODO just first thumb is used now.

            tdict = {}
            for i, t in enumerate(allthumbs):
                tdict['thumbnail_%i'] = t
            if f['fileType'] == 'blend':
                durl = f['downloadUrl'].split('?')[0]
                # fname = paths.extract_filename_from_url(f['filePath'])
        if durl and tname:

            tooltip = generate_tooltip(r)
            # for some reason, the id was still int on some occurances. investigate this.
            r['author']['id'] = str(r['author']['id'])

            # some helper props, but generally shouldn't be renaming/duplifiying original properties,
            # so blender's data is same as on server.
            asset_data = {
                'thumbnail': tname,
                'thumbnail_small': small_tname,
                # 'thumbnails':allthumbs,
                'download_url': durl,
                # 'id': r['id'],
                # 'asset_base_id': r['assetBaseId'],#this should stay ONLY for compatibility with older scenes
                # 'name': r['name'],
                # 'asset_type': r['assetType'], #this should stay ONLY for compatibility with older scenes
                'tooltip': tooltip,
                # 'tags': r['tags'],
                # 'can_download': r.get('canDownload', True),#this should stay ONLY for compatibility with older scenes
                # 'verification_status': r['verificationStatus'],#this should stay ONLY for compatibility with older scenes
                # 'author_id': r['author']['id'],#this should stay ONLY for compatibility with older scenes
                # 'author': r['author']['firstName'] + ' ' + r['author']['lastName']
                # 'description': r['description'],
            }
            asset_data['downloaded'] = 0

            # parse extra params needed for blender here
            params = utils.params_to_dict(r['parameters'])

            if asset_type == 'model':
                if params.get('boundBoxMinX') != None:
                    bbox = {
                        'bbox_min': (float(params['boundBoxMinX']),
                                     float(params['boundBoxMinY']),
                                     float(params['boundBoxMinZ'])),
                        'bbox_max': (float(params['boundBoxMaxX']),
                                     float(params['boundBoxMaxY']),
                                     float(params['boundBoxMaxZ']))
                    }

                else:
                    bbox = {'bbox_min': (-.5, -.5, 0), 'bbox_max': (.5, .5, 1)}
                asset_data.update(bbox)
            if asset_type == 'material':
                asset_data['texture_size_meters'] = params.get(
                    'textureSizeMeters', 1.0)

            asset_data.update(tdict)
            if r['assetBaseId'] in scene.get('assets used', {}).keys():
                asset_data['downloaded'] = 100

            # attempt to switch to use original data gradually, since the parsing as itself should become obsolete.
            asset_data.update(r)
            return asset_data
Example #8
0
 def create_asset_data(rdata, asset_type):
     for r in rdata['results']:
         if r['assetType'] == asset_type and len(r['files']) > 0:
             furl = None
             tname = None
             allthumbs = []
             durl, tname = None, None
             for f in r['files']:
                 if f['fileType'] == 'thumbnail':
                     tname = paths.extract_filename_from_url(
                         f['fileThumbnailLarge'])
                     small_tname = paths.extract_filename_from_url(
                         f['fileThumbnail'])
                     allthumbs.append(
                         tname)  # TODO just first thumb is used now.
                 tdict = {}
                 for i, t in enumerate(allthumbs):
                     tdict['thumbnail_%i'] = t
                 if f['fileType'] == 'blend':
                     durl = f['downloadUrl'].split('?')[0]
                     # fname = paths.extract_filename_from_url(f['filePath'])
             if durl and tname:
                 tooltip = blenderkit.search.generate_tooltip(r)
                 r['author']['id'] = str(r['author']['id'])
                 asset_data = {
                     'thumbnail': tname,
                     'thumbnail_small': small_tname,
                     # 'thumbnails':allthumbs,
                     'download_url': durl,
                     'id': r['id'],
                     'asset_base_id': r['assetBaseId'],
                     'name': r['name'],
                     'asset_type': r['assetType'],
                     'tooltip': tooltip,
                     'tags': r['tags'],
                     'can_download': r.get('canDownload', True),
                     'verification_status': r['verificationStatus'],
                     'author_id': r['author']['id'],
                     # 'author': r['author']['firstName'] + ' ' + r['author']['lastName']
                     # 'description': r['description'],
                 }
                 asset_data['downloaded'] = 0
                 # parse extra params needed for blender here
                 params = utils.params_to_dict(r['parameters'])
                 if asset_type == 'model':
                     if params.get('boundBoxMinX') != None:
                         bbox = {
                             'bbox_min': (float(params['boundBoxMinX']),
                                          float(params['boundBoxMinY']),
                                          float(params['boundBoxMinZ'])),
                             'bbox_max': (float(params['boundBoxMaxX']),
                                          float(params['boundBoxMaxY']),
                                          float(params['boundBoxMaxZ']))
                         }
                     else:
                         bbox = {
                             'bbox_min': (-.5, -.5, 0),
                             'bbox_max': (.5, .5, 1)
                         }
                     asset_data.update(bbox)
                 if asset_type == 'material':
                     asset_data['texture_size_meters'] = params.get(
                         'textureSizeMeters', 1.0)
                 asset_data.update(tdict)
         r.update(asset_data)
Example #9
0
def timer_update2():
    preferences = bpy.context.preferences.addons['blenderkit'].preferences
    if search.first_time:
        search.first_time = False
        if preferences.show_on_start:
            search()
        if preferences.tips_on_start:
            ui.get_largest_3dview()
            ui.update_ui_size(ui.active_area, ui.active_region)
            ui.add_report(text='BlenderKit Tip: ' +
                          random.choice(search.rtips),
                          timeout=12,
                          color=colors.GREEN)

    if bpy.context.window_manager.clipboard != search.last_clipboard:
        last_clipboard = bpy.context.window_manager.clipboard
        instr = 'asset_base_id:'
        if last_clipboard[:len(instr)] == instr:
            atstr = 'asset_type:'
            ati = last_clipboard.find(atstr)
            if ati > -1:
                search_props = utils.get_search_props()
                search_props.search_keywords = last_clipboard

    if len(search.search_threads
           ) == 0 or bpy.context.scene.blenderkitUI.dragging:
        return 1
    for thread in search.search_threads:
        if not thread[0].is_alive():
            search.search_threads.remove(thread)  #
            icons_dir = thread[1]
            scene = bpy.context.scene
            s = bpy.context.scene
            asset_type = thread[2]
            if asset_type == 'model':
                props = scene.blenderkit_models
                json_filepath = os.path.join(icons_dir,
                                             'model_searchresult.json')
                search_name = 'bkit model search'
            if asset_type == 'scene':
                props = scene.blenderkit_scene
                json_filepath = os.path.join(icons_dir,
                                             'scene_searchresult.json')
                search_name = 'bkit scene search'
            if asset_type == 'material':
                props = scene.blenderkit_mat
                json_filepath = os.path.join(icons_dir,
                                             'material_searchresult.json')
                search_name = 'bkit material search'
            if asset_type == 'brush':
                props = scene.blenderkit_brush
                json_filepath = os.path.join(icons_dir,
                                             'brush_searchresult.json')
                search_name = 'bkit brush search'

            s[search_name] = []

            if search.reports != '':
                props.report = str(search.reports)
                return .2
            with open(json_filepath, 'r') as data_file:
                rdata = json.load(data_file)

            result_field = []
            ok, error = search.check_errors(rdata)
            if ok:
                bpy.ops.object.run_assetbar_fix_context()
                for r in rdata['results']:
                    try:
                        r['filesSize'] = int(r['filesSize'] / 1024)
                    except:
                        utils.p('asset with no files-size')
                    if r['assetType'] == asset_type:
                        if len(r['files']) > 0:
                            tname = None
                            allthumbs = []
                            durl, tname = None, None
                            for f in r['files']:
                                if f['fileType'] == 'thumbnail':
                                    tname = paths.extract_filename_from_url(
                                        f['fileThumbnailLarge'])
                                    small_tname = paths.extract_filename_from_url(
                                        f['fileThumbnail'])
                                    allthumbs.append(tname)

                                tdict = {}
                                for i, t in enumerate(allthumbs):
                                    tdict['thumbnail_%i'] = t
                                if f['fileType'] == 'blend':
                                    durl = f['downloadUrl'].split('?')[0]
                            if durl and tname:

                                tooltip = search.generate_tooltip(r)
                                asset_data = {
                                    'thumbnail': tname,
                                    'thumbnail_small': small_tname,
                                    'download_url': durl,
                                    'id': r['id'],
                                    'asset_base_id': r['assetBaseId'],
                                    'name': r['name'],
                                    'asset_type': r['assetType'],
                                    'tooltip': tooltip,
                                    'tags': r['tags'],
                                    'can_download': r.get('canDownload', True),
                                    'verification_status':
                                    r['verificationStatus'],
                                    'author_id': str(r['author']['id'])
                                }
                                asset_data['downloaded'] = 0

                                if 'description' in r:
                                    asset_data['description'] = r[
                                        'description']
                                if 'metadata' in r:
                                    asset_data['metadata'] = r['metadata']
                                if 'sku' in r:
                                    asset_data['sku'] = r['sku']
                                if 'client' in r:
                                    asset_data['client'] = r['client']

                                params = utils.params_to_dict(r['parameters'])

                                if asset_type == 'model':
                                    if params.get('boundBoxMinX') is not None:
                                        bbox = {
                                            'bbox_min':
                                            (float(params['boundBoxMinX']),
                                             float(params['boundBoxMinY']),
                                             float(params['boundBoxMinZ'])),
                                            'bbox_max':
                                            (float(params['boundBoxMaxX']),
                                             float(params['boundBoxMaxY']),
                                             float(params['boundBoxMaxZ']))
                                        }

                                    else:
                                        bbox = {
                                            'bbox_min': (-.5, -.5, 0),
                                            'bbox_max': (.5, .5, 1)
                                        }
                                    asset_data.update(bbox)
                                if asset_type == 'material':
                                    asset_data[
                                        'texture_size_meters'] = params.get(
                                            'textureSizeMeters', 1.0)

                                asset_data.update(tdict)
                                if r['assetBaseId'] in scene.get(
                                        'assets used', {}).keys():
                                    asset_data['downloaded'] = 100

                                result_field.append(asset_data)

                s[search_name] = result_field
                s['search results'] = result_field
                s[search_name + ' orig'] = rdata
                s['search results orig'] = rdata
                search.load_previews()
                ui_props = bpy.context.scene.blenderkitUI
                if len(result_field) < ui_props.scrolloffset:
                    ui_props.scrolloffset = 0
                props.is_searching = False
                props.search_error = False
                props.report = 'Found %i results. ' % (
                    s['search results orig']['count'])
                if len(s['search results']) == 0:
                    tasks_queue.add_task(
                        (ui.add_report, ('No matching results found.', )))

            else:
                print('error', error)
                props.report = error
                props.search_error = True

            search.mt('preview loading finished')
    return .3
Example #10
0
def timer_update():  # TODO might get moved to handle all blenderkit stuff.

    global search_threads
    # don't do anything while dragging - this could switch asset type during drag, and make results list lenght different,
    # causing a lot of throuble literally.
    if len(search_threads) == 0 or bpy.context.scene.blenderkitUI.dragging:
        return 1
    for thread in search_threads:  # TODO this doesn't check all processess when removal... mostly 1 process will be running however.

        if not thread[0].is_alive():
            search_threads.remove(thread)  #
            icons_dir = thread[1]
            scene = bpy.context.scene
            # these 2 lines should update the previews enum and set the first result as active.
            s = bpy.context.scene
            asset_type = thread[2]
            if asset_type == 'model':
                props = scene.blenderkit_models
                json_filepath = os.path.join(icons_dir, 'model_searchresult.json')
                search_name = 'bkit model search'
            if asset_type == 'scene':
                props = scene.blenderkit_scene
                json_filepath = os.path.join(icons_dir, 'scene_searchresult.json')
                search_name = 'bkit scene search'
            if asset_type == 'material':
                props = scene.blenderkit_mat
                json_filepath = os.path.join(icons_dir, 'material_searchresult.json')
                search_name = 'bkit material search'
            if asset_type == 'brush':
                props = scene.blenderkit_brush
                json_filepath = os.path.join(icons_dir, 'brush_searchresult.json')
                search_name = 'bkit brush search'

            s[search_name] = []

            global reports
            if reports != '':
                props.report = str(reports)
                return .2
            with open(json_filepath, 'r') as data_file:
                rdata = json.load(data_file)

            result_field = []
            ok, error = check_errors(rdata)
            if ok:

                for r in rdata['results']:

                    if r['assetType'] == asset_type:
                        # print(r)
                        if len(r['files']) > 0:
                            furl = None
                            tname = None
                            allthumbs = []
                            durl, tname = None, None
                            for f in r['files']:
                                if f['fileType'] == 'thumbnail':
                                    tname = paths.extract_filename_from_url(f['fileThumbnailLarge'])
                                    small_tname = paths.extract_filename_from_url(f['fileThumbnail'])
                                    allthumbs.append(tname)  # TODO just first thumb is used now.

                                tdict = {}
                                for i, t in enumerate(allthumbs):
                                    tdict['thumbnail_%i'] = t
                                if f['fileType'] == 'blend':
                                    durl = f['downloadUrl'].split('?')[0]
                                    # fname = paths.extract_filename_from_url(f['filePath'])
                            if durl and tname:

                                tooltip = generate_tooltip(r)
                                # utils.pprint(print(r))
                                asset_data = {'thumbnail': tname,
                                              'thumbnail_small': small_tname,
                                              # 'thumbnails':allthumbs,
                                              'download_url': durl,
                                              'id': r['id'],
                                              'asset_base_id': r['assetBaseId'],
                                              'name': r['name'],
                                              'asset_type': r['assetType'],
                                              'tooltip': tooltip,
                                              'tags': r['tags'],
                                              'can_download': r.get('canDownload', True),
                                              'verification_status': r['verificationStatus']
                                              # 'description': r['description'],
                                              # 'author': r['description'],
                                              }
                                asset_data['downloaded'] = 0

                                # parse extra params needed for blender here
                                params = params_to_dict(r['parameters'])

                                if asset_type == 'model':
                                    if params.get('boundBoxMinX') != None:
                                        bbox = {
                                            'bbox_min': (
                                                float(params['boundBoxMinX']),
                                                float(params['boundBoxMinY']),
                                                float(params['boundBoxMinZ'])),
                                            'bbox_max': (
                                                float(params['boundBoxMaxX']),
                                                float(params['boundBoxMaxY']),
                                                float(params['boundBoxMaxZ']))
                                        }

                                    else:
                                        bbox = {
                                            'bbox_min': (-.5, -.5, 0),
                                            'bbox_max': (.5, .5, 1)
                                        }
                                    asset_data.update(bbox)
                                if asset_type == 'material':
                                    asset_data['texture_size_meters'] = params.get('textureSizeMeters', 1.0)

                                asset_data.update(tdict)
                                if r['assetBaseId'] in scene.get('assets used', {}).keys():
                                    asset_data['downloaded'] = 100

                                result_field.append(asset_data)

                                # results = rdata['results']
                s[search_name] = result_field
                s['search results'] = result_field
                s['search results orig'] = rdata
                load_previews()
                ui_props = bpy.context.scene.blenderkitUI
                if len(result_field) < ui_props.scrolloffset:
                    ui_props.scrolloffset = 0
                props.is_searching = False
                props.search_error = False
                props.report = 'Open assetbar to see %i results. ' % len(s['search results'])
                if len(s['search results']) == 0:
                    props.report = 'No matching results found.'

            # (rdata['next'])
            # if rdata['next'] != None:
            # search(False, get_next = True)
            else:
                print('error', error)
                props.report = error
                props.search_error = True

            # print('finished search thread')
            mt('preview loading finished')
    return .2
Example #11
0
    def run(self):
        maxthreads = 300
        maximages = 50
        query = self.query
        params = self.params
        global reports

        t = time.time()
        mt('search thread started')
        tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
        json_filepath = os.path.join(tempdir, '%s_searchresult.json' % query['asset_type'])

        if query['token'] != '':
            headers = {
                "accept": "application/json",
                "Authorization": "Bearer %s" % query['token'],
                # "Content-Type": "application/json",
            }
        else:
            headers = {
                "accept": "application/json",
                # "Content-Type": "application/json",
            }
        rdata = {}
        rdata['results'] = []

        if params['get_next']:
            with open(json_filepath, 'r') as infile:
                try:
                    origdata = json.load(infile)
                    urlquery = origdata['next']
                    if urlquery == None:
                        return;
                except:
                    # in case no search results found on drive we don't do next page loading.
                    params['get_next'] = False
        if not params['get_next']:
            # build a new request
            url = paths.get_bkit_url() + 'search/'

            nquery = {
                # 'tags': query['keywords'],
                'asset_type': query['asset_type'],
            }
            if query.get('category'):
                nquery['category_subtree'] = query['category']

            # build request manually
            # TODO use real queries
            requeststring = '?query=' + query['keywords'].lower() + '+'
            #
            for i, q in enumerate(nquery):
                requeststring += q + ':' + str(nquery[q])
                if i < len(nquery) - 1:
                    requeststring += '+'

            requeststring += '&addon_version=%s' % params['addon_version']
            if params.get('scene_uuid') is not None:
                requeststring += '&scene_uuid=%s' % params['scene_uuid']

            urlquery = url + requeststring

        try:
            # print(urlquery)
            r = requests.get(urlquery, headers=headers)
            reports = ''
            # print(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            reports = e
            # props.report = e
            return
        mt('response is back ')
        try:
            rdata = r.json()
        except Exception as inst:
            reports = r.text
            print(inst)

        mt('data parsed ')

        # filter results here:
        # todo remove this in future
        nresults = []
        for d in rdata.get('results', []):
            # TODO this code is for filtering brush types, should vanish after we implement filter in Elastic
            mode = None
            if query['asset_type'] == 'brush':
                for p in d['parameters']:
                    if p['parameterType'] == 'mode':
                        mode = p['value']
            if query['asset_type'] != 'brush' or (
                    query.get('brushType') != None and query['brushType']) == mode:
                nresults.append(d)
        rdata['results'] = nresults

        # print('number of results: ', len(rdata.get('results', [])))
        if self.stopped():
            print('stopping search : ' + query['keywords'])
            return

        mt('search finished')
        i = 0

        thumb_small_urls = []
        thumb_small_filepaths = []
        thumb_full_urls = []
        thumb_full_filepaths = []
        # END OF PARSING
        for d in rdata.get('results', []):
            for f in d['files']:
                # TODO move validation of published assets to server, too manmy checks here.
                if f['fileType'] == 'thumbnail' and f['fileThumbnail'] != None and f['fileThumbnailLarge'] != None:
                    if f['fileThumbnail'] == None:
                        f['fileThumbnail'] = 'NONE'
                    if f['fileThumbnailLarge'] == None:
                        f['fileThumbnailLarge'] = 'NONE'

                    thumb_small_urls.append(f['fileThumbnail'])
                    thumb_full_urls.append(f['fileThumbnailLarge'])

                    imgname = paths.extract_filename_from_url(f['fileThumbnail'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_small_filepaths.append(imgpath)

                    imgname = paths.extract_filename_from_url(f['fileThumbnailLarge'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_full_filepaths.append(imgpath)

        sml_thbs = zip(thumb_small_filepaths, thumb_small_urls)
        full_thbs = zip(thumb_full_filepaths, thumb_full_urls)

        # we save here because a missing thumbnail check is in the previous loop
        # we can also prepend previous results. These have allready thumbnails downloaded...
        if params['get_next']:
            rdata['results'][0:0] = origdata['results']

        with open(json_filepath, 'w') as outfile:
            json.dump(rdata, outfile)

        killthreads_sml = []
        for k in thumb_sml_download_threads.keys():
            if k not in thumb_small_filepaths:
                killthreads_sml.append(k)  # do actual killing here?

        killthreads_full = []
        for k in thumb_full_download_threads.keys():
            if k not in thumb_full_filepaths:
                killthreads_full.append(k)  # do actual killing here?
        # TODO do the killing/ stopping here! remember threads might have finished inbetween!

        if self.stopped():
            print('stopping search : ' + query['keywords'])
            return

        # this loop handles downloading of small thumbnails
        for imgpath, url in sml_thbs:
            if imgpath not in thumb_sml_download_threads and not os.path.exists(imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_sml_download_threads[imgpath] = thread
                # threads.append(thread)

                if len(thumb_sml_download_threads) > maxthreads:
                    while len(thumb_sml_download_threads) > maxthreads:
                        threads_copy = thumb_sml_download_threads.copy()  # because for loop can erase some of the items.
                        for tk, thread in threads_copy.items():
                            if not thread.is_alive():
                                thread.join()
                                # print(x)
                                del (thumb_sml_download_threads[tk])
                                # print('fetched thumbnail ', i)
                                i += 1
        if self.stopped():
            print('stopping search : ' + query['keywords'])
            return
        idx = 0
        while len(thumb_sml_download_threads) > 0:
            threads_copy = thumb_sml_download_threads.copy()  # because for loop can erase some of the items.
            for tk, thread in threads_copy.items():
                if not thread.is_alive():
                    thread.join()
                    del (thumb_sml_download_threads[tk])
                    i += 1

        if self.stopped():
            print('stopping search : ' + query['keywords'])
            return

        # start downloading full thumbs in the end
        for imgpath, url in full_thbs:
            if imgpath not in thumb_full_download_threads and not os.path.exists(imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_full_download_threads[imgpath] = thread
        mt('thumbnails finished')