Пример #1
0
def upload_files(filepath, upload_data, files):
    headers = utils.get_headers(upload_data['token'])

    version_id = upload_data['id']
    for f in files:
        bg_blender.progress('uploading %s' % f['type'])
        upload_info = {
            'assetId': version_id,
            'fileType': f['type'],
            'fileIndex': f['index'],
            'originalFilename': os.path.basename(f['file_path'])
        }
        upload_create_url = paths.get_bkit_url() + 'uploads/'
        upload = requests.post(upload_create_url, json=upload_info, headers=headers, verify=True)
        upload = upload.json()

        # upheaders = {
        #     "accept": "application/json",
        #     "Authorization": "Bearer %s" % upload_data['token'],
        #     "Content-Type": "multipart/form-data",
        #     "Content-Disposition": 'form-data; name="file"; filename=%s' % f['file_path']
        #
        # }
        chunk_size = 1024 * 256

        # file gets uploaded here:
        uploaded = False
        # s3 upload is now the only option
        for a in range(0, 20):
            if not uploaded:
                try:
                    upload_response = requests.put(upload['s3UploadUrl'],
                                                   data=upload_in_chunks(f['file_path'], chunk_size, f['type']),
                                                   stream=True, verify=True)

                    if upload_response.status_code == 200:
                        uploaded = True
                    else:
                        bg_blender.progress('Upload failed, retry.')
                except Exception as e:
                    bg_blender.progress('Upload %s failed, retrying' % f['type'])
                    time.sleep(1)

                # confirm single file upload to bkit server
                upload_done_url = paths.get_bkit_url() + 'uploads_s3/' + upload['id'] + '/upload-file/'
                upload_response = requests.post(upload_done_url, headers=headers, verify=True)

        bg_blender.progress('finished uploading')

    return {'FINISHED'}
    def draw(self, context):
        # draw asset properties here
        layout = self.layout
        user_preferences = bpy.context.preferences.addons[
            'blenderkit'].preferences

        if user_preferences.login_attempt:
            draw_login_progress(layout)
            return

        if user_preferences.api_key != '':
            me = bpy.context.window_manager.get('bkit profile')
            if me is not None:
                me = me['user']
                # user name
                layout.label(text='Me: %s %s' %
                             (me['firstName'], me['lastName']))
                # layout.label(text='Email: %s' % (me['email']))

                # plan information

                if me.get('currentPlanName') is not None:
                    pn = me['currentPlanName']
                    pcoll = icons.icon_collections["main"]
                    if pn == 'Free':
                        my_icon = pcoll['free']
                    else:
                        my_icon = pcoll['full']

                    row = layout.row()
                    row.label(text='My plan:')
                    row.label(text='%s plan' % pn, icon_value=my_icon.icon_id)
                    if pn == 'Free':
                        layout.operator(
                            "wm.url_open", text="Change plan", icon='URL'
                        ).url = paths.get_bkit_url() + paths.BLENDERKIT_PLANS

                # storage statistics
                # if me.get('sumAssetFilesSize') is not None:  # TODO remove this when production server has these too.
                #     layout.label(text='My public assets: %i MiB' % (me['sumAssetFilesSize']))
                # if me.get('sumPrivateAssetFilesSize') is not None:
                #     layout.label(text='My private assets: %i MiB' % (me['sumPrivateAssetFilesSize']))
                if me.get('remainingPrivateQuota') is not None:
                    layout.label(text='My free storage: %i MiB' %
                                 (me['remainingPrivateQuota']))

            layout.operator(
                "wm.url_open", text="See my uploads", icon='URL'
            ).url = paths.get_bkit_url() + paths.BLENDERKIT_USER_ASSETS
Пример #3
0
def fetch_categories(API_key):
    BLENDERKIT_API_MAIN = "https://www.blenderkit.com/api/v1/"

    url = paths.get_bkit_url() + 'categories/'

    headers = {
        "accept": "application/json",
        "Authorization": "Bearer %s" % API_key
    }
    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')

    try:
        r = requests.get(url, headers=headers)
        rdata = r.json()
        categories = rdata['results']
        fix_category_counts(categories)
        # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off.
        with open(categories_filepath, 'w') as s:
            json.dump(categories, s, indent=4)
    except:
        print('category fetching failed')
        if not os.path.exists(categories_filepath):
            source_path = paths.get_addon_file(subpath='data' + os.sep +
                                               'categories.json')
            shutil.copy(source_path, categories_filepath)
Пример #4
0
def refresh_token(api_key_refresh):
    authenticator = oauth.SimpleOAuthAuthenticator(
        server_url=paths.get_bkit_url(), client_id=CLIENT_ID, ports=PORTS)
    auth_token, refresh_token = authenticator.get_refreshed_token(
        api_key_refresh)
    if auth_token is not None and refresh_token is not None:
        tasks_queue.add_task((write_tokens, (auth_token, refresh_token)))
Пример #5
0
def login_thread(signup=False):
    r_url = paths.get_oauth_landing_url()
    url = paths.get_bkit_url()
    thread = threading.Thread(target=login,
                              args=([signup, url, r_url]),
                              daemon=True)
    thread.start()
Пример #6
0
def fetch_categories(API_key):
    BLENDERKIT_API_MAIN = "https://www.blenderkit.com/api/v1/"

    url = paths.get_bkit_url() + 'categories/'

    headers = {
        "accept": "application/json",
        "Authorization": "Bearer %s" % API_key
    }
    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')

    try:
        r = requests.get(url, headers=headers)
        rdata = r.json()
        categories = rdata['results']
        fix_category_counts(categories)
        # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off.
        with open(categories_filepath, 'w') as s:
            json.dump(categories, s, indent=4)
    except:
        # print('category fetching failed')
        if not os.path.exists(categories_filepath):
            source_path = paths.get_addon_file(subpath='data' + os.sep + 'categories.json')
            shutil.copy(source_path, categories_filepath)
Пример #7
0
    def draw(self, context):
        # draw asset properties here
        layout = self.layout
        user_preferences = bpy.context.preferences.addons[
            'blenderkit'].preferences

        if user_preferences.login_attempt:
            draw_login_progress(layout)
            return

        if user_preferences.enable_oauth:
            draw_login_buttons(layout)

        if user_preferences.api_key != '':
            me = bpy.context.window_manager.get('bkit profile')
            if me is not None:
                me = me['user']
                layout.label(text='User: %s %s' %
                             (me['firstName'], me['lastName']))
                layout.label(text='Email: %s' % (me['email']))
                if me.get(
                        'sumAssetFilesSize'
                ) is not None:  # TODO remove this when production server has these too.
                    layout.label(text='Public assets: %i MiB' %
                                 (me['sumAssetFilesSize']))
                if me.get('sumPrivateAssetFilesSize') is not None:
                    layout.label(text='Private assets: %i MiB' %
                                 (me['sumPrivateAssetFilesSize']))
                if me.get('remainingPrivateQuota') is not None:
                    layout.label(text='Remaining private storage: %i MiB' %
                                 (me['remainingPrivateQuota']))

            layout.operator(
                "wm.url_open", text="See my uploads", icon='URL'
            ).url = paths.get_bkit_url() + paths.BLENDERKIT_USER_ASSETS
Пример #8
0
def upload_files(filepath, upload_data, files):
    headers = {"accept": "application/json", "Authorization": "Bearer %s" % upload_data['token']}
    version_id = upload_data['id']
    for f in files:
        bg_blender.progress('uploading %s' % f['type'])
        upload_info = {
            'assetId': version_id,
            'fileType': f['type'],
            'fileIndex': f['index'],
            'originalFilename': os.path.basename(f['file_path'])
        }
        upload_create_url = paths.get_bkit_url() + 'uploads/'
        upload = requests.post(upload_create_url, json=upload_info, headers=headers, verify=True)
        upload = upload.json()

        upheaders = {
            "accept": "application/json",
            "Authorization": "Bearer %s" % upload_data['token'],
            "Content-Type": "multipart/form-data",
            "Content-Disposition": 'form-data; name="file"; filename=%s' % f['file_path']

        }
        chunk_size = 1024 * 256

        # file gets uploaded here:
        uploaded = False
        # s3 upload is now the only option
        for a in range(0, 20):
            if not uploaded:
                try:
                    upload_response = requests.put(upload['s3UploadUrl'],
                                                   data=upload_in_chunks(f['file_path'], chunk_size, f['type']),
                                                   stream=True, verify=True)
                    # print('upload response')
                    # print(upload_response.text)
                    uploaded = True
                except Exception as e:
                    bg_blender.progress('Upload %s failed, retrying' % f['type'])
                    time.sleep(1)

                # confirm single file upload to bkit server
                upload_done_url = paths.get_bkit_url() + 'uploads_s3/' + upload['id'] + '/upload-file/'
                upload_response = requests.post(upload_done_url, headers=headers, verify=True)

        bg_blender.progress('finished uploading')

    return {'FINISHED'}
Пример #9
0
def refresh_token_thread():
    preferences = bpy.context.preferences.addons['blenderkit'].preferences
    if len(preferences.api_key_refresh) > 0:
        url = paths.get_bkit_url()
        thread = threading.Thread(target=refresh_token,
                                  args=([preferences.api_key_refresh, url]),
                                  daemon=True)
        thread.start()
Пример #10
0
def rerequest(method, url, recursion=0, **kwargs):
    # first get any additional args from kwargs
    immediate = False
    if kwargs.get('immediate'):
        immediate = kwargs['immediate']
        kwargs.pop('immediate')
    # first normal attempt
    try:
        response = requests.request(method, url, **kwargs)
    except Exception as e:
        print(e)
        return None

    bk_logger.debug(url + str(kwargs))
    bk_logger.debug(response.status_code)

    if response.status_code == 401:
        try:
            rdata = response.json()
        except:
            rdata = {}

        tasks_queue.add_task((ui.add_report, (method + ' request Failed.' + str(rdata.get('detail')),)))

        if rdata.get('detail') == 'Invalid token.':
            user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
            if user_preferences.api_key != '':
                if user_preferences.enable_oauth and user_preferences.api_key_refresh != '':
                    tasks_queue.add_task((ui.add_report, (
                        'refreshing token. If this fails, please login in BlenderKit Login panel.', 10)))
                    refresh_url = paths.get_bkit_url()
                    auth_token, refresh_token, oauth_response = bkit_oauth.refresh_token(
                        user_preferences.api_key_refresh, refresh_url)

                    # bk_logger.debug(auth_token, refresh_token)
                    if auth_token is not None:
                        if immediate == True:
                            # this can write tokens occasionally into prefs. used e.g. in upload. Only possible
                            #  in non-threaded tasks
                            bpy.context.preferences.addons['blenderkit'].preferences.api_key = auth_token
                            bpy.context.preferences.addons['blenderkit'].preferences.api_key_refresh = refresh_token
                        else:
                            tasks_queue.add_task((bkit_oauth.write_tokens, (auth_token, refresh_token, oauth_response)))

                        kwargs['headers'] = utils.get_headers(auth_token)
                        response = requests.request(method, url, **kwargs)
                        bk_logger.debug('reresult', response.status_code)
                        if response.status_code >= 400:
                            bk_logger.debug('reresult', response.text)
                            tasks_queue.add_task((ui.add_report, (
                                response.text, 10)))

                    else:
                        tasks_queue.add_task((ui.add_report, (
                            'Refreshing token failed.Please login manually.', 10)))
                        # tasks_queue.add_task((bkit_oauth.write_tokens, ('', '', '')))
                        tasks_queue.add_task((bpy.ops.wm.blenderkit_login, ('INVOKE_DEFAULT',)), fake_context=True)
    return response
Пример #11
0
def login(signup):
    r_url = paths.get_oauth_landing_url()

    authenticator = oauth.SimpleOAuthAuthenticator(
        server_url=paths.get_bkit_url(), client_id=CLIENT_ID, ports=PORTS)
    auth_token, refresh_token = authenticator.get_new_token(register=signup,
                                                            redirect_url=r_url)
    utils.p('tokens retrieved')
    tasks_queue.add_task((write_tokens, (auth_token, refresh_token)))
def login_thread(signup=False):
    global active_authenticator
    r_url = paths.get_oauth_landing_url()
    url = paths.get_bkit_url()
    authenticator = oauth.SimpleOAuthAuthenticator(server_url=url, client_id=CLIENT_ID, ports=PORTS)
    #we store authenticator globally to be able to ping the server if connection fails.
    active_authenticator = authenticator
    thread = threading.Thread(target=login, args=([signup, url, r_url, authenticator]), daemon=True)
    thread.start()
Пример #13
0
def refresh_token_thread():
    preferences = bpy.context.preferences.addons['blenderkit'].preferences
    if len(preferences.api_key_refresh) > 0 and preferences.refresh_in_progress == False:
        preferences.refresh_in_progress = True
        url = paths.get_bkit_url()
        thread = threading.Thread(target=refresh_token, args=([preferences.api_key_refresh, url]), daemon=True)
        thread.start()
    else:
        ui.add_report('Already Refreshing token, will be ready soon.')
Пример #14
0
def upload_rating(asset):
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    api_key = user_preferences.api_key
    headers = utils.get_headers(api_key)


    asset_data = asset['asset_data']

    bkit_ratings = asset.bkit_ratings
    # print('rating asset', asset_data['name'], asset_data['asset_base_id'])
    url = paths.get_bkit_url() + 'assets/' + asset['asset_data']['id'] + '/rating/'

    ratings = [
    ]

    if bkit_ratings.rating_quality > 0.1:
        ratings.append(('quality', bkit_ratings.rating_quality))
    if bkit_ratings.rating_work_hours > 0.1:
        ratings.append(('working_hours', round(bkit_ratings.rating_work_hours, 1)))

    thread = threading.Thread(target=uplaod_rating_thread, args=(url, ratings, headers))
    thread.start()

    url = paths.get_bkit_url() + 'assets/' + asset['asset_data']['id'] + '/review'

    reviews = {
        'reviewText': bkit_ratings.rating_compliments,
        'reviewTextProblems': bkit_ratings.rating_problems,
    }
    if not (bkit_ratings.rating_compliments == '' and bkit_ratings.rating_compliments == ''):
        thread = threading.Thread(target=uplaod_review_thread, args=(url, reviews, headers))
        thread.start()

    # the info that the user rated an item is stored in the scene
    s = bpy.context.scene
    s['assets rated'] = s.get('assets rated', {})
    if bkit_ratings.rating_quality > 0.1 and bkit_ratings.rating_work_hours > 0.1:
        s['assets rated'][asset['asset_data']['asset_base_id']] = True
Пример #15
0
def rerequest(method, url, **kwargs):
    # first get any additional args from kwargs
    immediate = False
    if kwargs.get('immediate'):
        immediate = kwargs['immediate']
        kwargs.pop('immediate')
    # first normal attempt
    response = requests.request(method, url, **kwargs)

    utils.p(url)
    utils.p(response.status_code)

    if response.status_code == 401:
        try:
            rdata = response.json()
        except:
            rdata = {}

        tasks_queue.add_task(
            (ui.add_report,
             (method + ' request Failed.' + str(rdata.get('detail')), )))

        if rdata.get('detail') == 'Invalid token.':
            user_preferences = bpy.context.preferences.addons[
                'blenderkit'].preferences
            if user_preferences.api_key != '':
                if user_preferences.enable_oauth and user_preferences.api_key_refresh != '':
                    tasks_queue.add_task((ui.add_report, (
                        'refreshing token. If this fails, please login in BlenderKit Login panel.',
                        10)))
                    refresh_url = paths.get_bkit_url()
                    auth_token, refresh_token, oauth_response = bkit_oauth.refresh_token(
                        user_preferences.api_key_refresh, refresh_url)

                    # utils.p(auth_token, refresh_token)
                    if auth_token is not None:
                        if immediate == True:
                            # this can write tokens occasionally into prefs. used e.g. in upload. Only possible
                            #  in non-threaded tasks
                            bpy.context.preferences.addons[
                                'blenderkit'].preferences.api_key = auth_token
                            bpy.context.preferences.addons[
                                'blenderkit'].preferences.api_key_refresh = refresh_token

                        kwargs['headers'] = utils.get_headers(auth_token)
                        response = requests.request(method, url, **kwargs)
                        utils.p('reresult', response.status_code)
                        if response.status_code >= 400:
                            utils.p('reresult', response.text)
    return response
Пример #16
0
def mark_for_validation(self, context, asset_type):
    props = utils.get_upload_props()
    props.upload_state = 'marking for validation'
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences

    upload_data = {
        "verificationStatus": "ready"
    }

    url = paths.get_bkit_url() + 'assets/'
    headers = {"accept": "application/json", "Authorization": "Bearer %s" % user_preferences.api_key}
    url += props.id + '/'

    try:
        r = requests.patch(url, json=upload_data, headers=headers, verify=True)  # files = files,
        props.upload_state = 'marked for validation'
    except requests.exceptions.RequestException as e:
        props.upload_state = str(e)
        props.uploading = False
        return {'CANCELLED'}
    return {'FINISHED'}
Пример #17
0
def mark_for_validation(self, context, asset_type):
    props = utils.get_upload_props()
    props.upload_state = 'marking for validation'
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences

    upload_data = {
        "verificationStatus": "ready"
    }

    url = paths.get_bkit_url() + 'assets/'
    headers = {"accept": "application/json", "Authorization": "Bearer %s" % user_preferences.api_key}
    url += props.id + '/'

    try:
        r = requests.patch(url, json=upload_data, headers=headers, verify=True)  # files = files,
        props.upload_state = 'marked for validation'
    except requests.exceptions.RequestException as e:
        props.upload_state = str(e)
        props.uploading = False
        return {'CANCELLED'}
    return {'FINISHED'}
Пример #18
0
            "index": 0,
            "file_path": export_data["thumbnail_path"]
        }, {
            "type": "blend",
            "index": 0,
            "file_path": fpath
        }]

        bg_blender.progress('uploading')

        upload_files(fpath, upload_data, files)

        # mark on server as uploaded
        confirm_data = {"verificationStatus": "uploaded"}

        url = paths.get_bkit_url() + 'assets/'
        headers = {
            "accept": "application/json",
            "Authorization": "Bearer %s" % upload_data['token']
        }
        url += upload_data["id"] + '/'

        r = requests.patch(url,
                           json=confirm_data,
                           headers=headers,
                           verify=True)  # files = files,

        bg_blender.progress('upload finished successfully')

    except Exception as e:
        print(e)
Пример #19
0
def report_usages():
    '''report the usage of assets to the server.'''
    mt = time.time()
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    api_key = user_preferences.api_key
    sid = get_scene_id()
    headers = {"accept": "application/json", "Authorization": "Bearer %s" % api_key}
    url = paths.get_bkit_url() + paths.BLENDERKIT_REPORT_URL

    assets = {}
    asset_obs = []
    scene = bpy.context.scene
    asset_usages = {}

    for ob in scene.collection.objects:
        if ob.get('asset_data') != None:
            asset_obs.append(ob)

    for ob in asset_obs:
        asset_data = ob['asset_data']
        abid = asset_data['asset_base_id']

        if assets.get(abid) is None:
            asset_usages[abid] = {'count': 1}
            assets[abid] = asset_data
        else:
            asset_usages[abid]['count'] += 1

    # brushes
    for b in bpy.data.brushes:
        if b.get('asset_data') != None:
            abid = b['asset_data']['asset_base_id']
            asset_usages[abid] = {'count': 1}
            assets[abid] = b['asset_data']
    # materials
    for ob in scene.collection.objects:
        for ms in ob.material_slots:
            m = ms.material

            if m is not None and m.get('asset_data') is not None:

                abid = m['asset_data']['asset_base_id']
                if assets.get(abid) is None:
                    asset_usages[abid] = {'count': 1}
                    assets[abid] = m['asset_data']
                else:
                    asset_usages[abid]['count'] += 1

    assets_list = []
    assets_reported = scene.get('assets reported', {})

    new_assets_count = 0
    for k in asset_usages.keys():
        if k not in assets_reported.keys():
            data = asset_usages[k]
            list_item = {
                'asset': k,
                'usageCount': data['count'],
                'proximitySet': data.get('proximity', [])
            }
            assets_list.append(list_item)
            new_assets_count += 1
        if k not in assets_reported.keys():
            assets_reported[k] = True

    scene['assets reported'] = assets_reported

    if new_assets_count == 0:
        print('no new assets were added')
        return;
    usage_report = {
        'scene': sid,
        'reportType': 'save',
        'assetusageSet': assets_list
    }

    au = scene.get('assets used', {})
    ad = scene.get('assets deleted', {})

    ak = assets.keys()
    for k in au.keys():
        if k not in ak:
            ad[k] = au[k]
        else:
            if k in ad:
                ad.pop(k)

    # scene['assets used'] = {}
    for k in ak:  # rewrite assets used.
        scene['assets used'][k] = assets[k]

    ###########check ratings herer too:
    scene['assets rated'] = scene.get('assets rated', {})
    for k in assets.keys():
        scene['assets rated'][k] = scene['assets rated'].get(k, False)
    thread = threading.Thread(target=utils.requests_post_thread, args=(url, usage_report, headers))
    thread.start()
    # r = requests.post(url, headers=headers, json=usage_report)
    mt = time.time() - mt
    print('report generation:                ', mt)
Пример #20
0
    def run(self):
        maxthreads = 300
        maximages = 50
        query = self.query
        params = self.params
        global reports

        t = time.time()
        mt('search thread started')
        tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
        json_filepath = os.path.join(tempdir, '%s_searchresult.json' % query['asset_type'])

        if query['token'] != '':
            headers = {
                "accept": "application/json",
                "Authorization": "Bearer %s" % query['token'],
                # "Content-Type": "application/json",
            }
        else:
            headers = {
                "accept": "application/json",
                # "Content-Type": "application/json",
            }
        rdata = {}
        rdata['results'] = []

        if params['get_next']:
            with open(json_filepath, 'r') as infile:
                try:
                    origdata = json.load(infile)
                    urlquery = origdata['next']
                    if urlquery == None:
                        return;
                except:
                    # in case no search results found on drive we don't do next page loading.
                    params['get_next'] = False
        if not params['get_next']:
            # build a new request
            url = paths.get_bkit_url() + 'search/'

            nquery = {
                # 'tags': query['keywords'],
                'asset_type': query['asset_type'],
            }
            if query.get('category'):
                nquery['category_subtree'] = query['category']

            # build request manually
            # TODO use real queries
            requeststring = '?query=' + query['keywords'].lower() + '+'
            #
            for i, q in enumerate(nquery):
                requeststring += q + ':' + str(nquery[q])
                if i < len(nquery) - 1:
                    requeststring += '+'

            requeststring += '&addon_version=%s' % params['addon_version']
            if params.get('scene_uuid') is not None:
                requeststring += '&scene_uuid=%s' % params['scene_uuid']

            urlquery = url + requeststring

        try:
            # print(urlquery)
            r = requests.get(urlquery, headers=headers)
            reports = ''
            # print(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            reports = e
            # props.report = e
            return
        mt('response is back ')
        try:
            rdata = r.json()
        except Exception as inst:
            reports = r.text
            print(inst)

        mt('data parsed ')

        # filter results here:
        # todo remove this in future
        nresults = []
        for d in rdata.get('results', []):
            # TODO this code is for filtering brush types, should vanish after we implement filter in Elastic
            mode = None
            if query['asset_type'] == 'brush':
                for p in d['parameters']:
                    if p['parameterType'] == 'mode':
                        mode = p['value']
            if query['asset_type'] != 'brush' or (
                    query.get('brushType') != None and query['brushType']) == mode:
                nresults.append(d)
        rdata['results'] = nresults

        print('number of results: ', len(rdata.get('results', [])))
        if self.stopped():
            print('stopping search : ' + query['keywords'])
            return

        mt('search finished')
        i = 0

        thumb_small_urls = []
        thumb_small_filepaths = []
        thumb_full_urls = []
        thumb_full_filepaths = []
        # END OF PARSING
        for d in rdata.get('results', []):
            for f in d['files']:
                # TODO move validation of published assets to server, too manmy checks here.
                if f['fileType'] == 'thumbnail' and f['fileThumbnail'] != None and f['fileThumbnailLarge'] != None:
                    if f['fileThumbnail'] == None:
                        f['fileThumbnail'] = 'NONE'
                    if f['fileThumbnailLarge'] == None:
                        f['fileThumbnailLarge'] = 'NONE'

                    thumb_small_urls.append(f['fileThumbnail'])
                    thumb_full_urls.append(f['fileThumbnailLarge'])

                    imgname = paths.extract_filename_from_url(f['fileThumbnail'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_small_filepaths.append(imgpath)

                    imgname = paths.extract_filename_from_url(f['fileThumbnailLarge'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_full_filepaths.append(imgpath)

        sml_thbs = zip(thumb_small_filepaths, thumb_small_urls)
        full_thbs = zip(thumb_full_filepaths, thumb_full_urls)

        # we save here because a missing thumbnail check is in the previous loop
        # we can also prepend previous results. These have allready thumbnails downloaded...
        if params['get_next']:
            rdata['results'][0:0] = origdata['results']

        with open(json_filepath, 'w') as outfile:
            json.dump(rdata, outfile)

        killthreads_sml = []
        for k in thumb_sml_download_threads.keys():
            if k not in thumb_small_filepaths:
                killthreads_sml.append(k)  # do actual killing here?

        killthreads_full = []
        for k in thumb_full_download_threads.keys():
            if k not in thumb_full_filepaths:
                killthreads_full.append(k)  # do actual killing here?
        # TODO do the killing/ stopping here! remember threads might have finished inbetween!

        if self.stopped():
            print('stopping search : ' + query['keywords'])
            return

        # this loop handles downloading of small thumbnails
        for imgpath, url in sml_thbs:
            if imgpath not in thumb_sml_download_threads and not os.path.exists(imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_sml_download_threads[imgpath] = thread
                # threads.append(thread)

                if len(thumb_sml_download_threads) > maxthreads:
                    while len(thumb_sml_download_threads) > maxthreads:
                        threads_copy = thumb_sml_download_threads.copy()  # because for loop can erase some of the items.
                        for tk, thread in threads_copy.items():
                            if not thread.is_alive():
                                thread.join()
                                # print(x)
                                del (thumb_sml_download_threads[tk])
                                print('fetched thumbnail ', i)
                                i += 1
        if self.stopped():
            print('stopping search : ' + query['keywords'])
            return
        idx = 0
        while len(thumb_sml_download_threads) > 0:
            threads_copy = thumb_sml_download_threads.copy()  # because for loop can erase some of the items.
            for tk, thread in threads_copy.items():
                if not thread.is_alive():
                    thread.join()
                    del (thumb_sml_download_threads[tk])
                    i += 1

        if self.stopped():
            print('stopping search : ' + query['keywords'])
            return

        # start downloading full thumbs in the end
        for imgpath, url in full_thbs:
            if imgpath not in thumb_full_download_threads and not os.path.exists(imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_full_download_threads[imgpath] = thread
        mt('thumbnails finished')
Пример #21
0
def start_upload(self, context, asset_type, as_new, metadata_only):
    props = utils.get_upload_props()
    location = get_upload_location(props)
    props.upload_state = 'preparing upload'
    # do this for fixing long tags in some upload cases
    props.tags = props.tags[:]

    props.name = props.name.strip()
    # TODO  move this to separate function
    # check for missing metadata
    if asset_type == 'MODEL':
        get_missing_data_model(props)
    if asset_type == 'SCENE':
        get_missing_data_scene(props)
    elif asset_type == 'MATERIAL':
        get_missing_data_material(props)
    elif asset_type == 'BRUSH':
        get_missing_data_brush(props)

    if props.report != '':
        self.report({'ERROR_INVALID_INPUT'}, props.report)
        return {'CANCELLED'}
    if as_new:
        props.asset_base_id = ''
        props.id = ''
    export_data, upload_data, eval_path_computing, eval_path_state, eval_path, props = get_upload_data(self, context,
                                                                                                       asset_type)
    # utils.pprint(upload_data)
    upload_data['parameters'] = params_to_dict(
        upload_data['parameters'])  # weird array conversion only for upload, not for tooltips.

    binary_path = bpy.app.binary_path
    script_path = os.path.dirname(os.path.realpath(__file__))
    basename, ext = os.path.splitext(bpy.data.filepath)
    # if not basename:
    #     basename = os.path.join(basename, "temp")
    if not ext:
        ext = ".blend"
    tempdir = tempfile.mkdtemp()
    source_filepath = os.path.join(tempdir, "export_blenderkit" + ext)
    clean_file_path = paths.get_clean_filepath()
    data = {
        'clean_file_path': clean_file_path,
        'source_filepath': source_filepath,
        'temp_dir': tempdir,
        'export_data': export_data,
        'upload_data': upload_data,
        'debug_value': bpy.app.debug_value,
    }
    datafile = os.path.join(tempdir, BLENDERKIT_EXPORT_DATA_FILE)

    # check if thumbnail exists:

    if not os.path.exists(export_data["thumbnail_path"]):
        props.upload_state = 'Thumbnail not found'
        props.uploading = False
        return {'CANCELLED'}

    # first upload metadata to server, so it can be saved inside the current file
    url = paths.get_bkit_url() + 'assets/'
    headers = {"accept": "application/json", "Authorization": "Bearer %s" % upload_data['token']}
    # upload_data['license'] = 'ovejajojo'
    json_metadata = upload_data  # json.dumps(upload_data, ensure_ascii=False).encode('utf8')
    global reports
    if props.asset_base_id == '':
        try:
            r = requests.post(url, json=json_metadata, headers=headers, verify=True)  # files = files,
            props.upload_state = 'uploaded metadata'
        except requests.exceptions.RequestException as e:
            print(e)
            props.upload_state = str(e)
            props.uploading = False
            return {'CANCELLED'}

    else:
        url += props.id + '/'
        try:
            if not metadata_only:
                json_metadata["verificationStatus"] = "uploading"
            r = requests.put(url, json=json_metadata, headers=headers, verify=True)  # files = files,
            props.upload_state = 'uploaded metadata'
            # parse the reqest
            # print('uploaded metadata')
            # print(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            props.upload_state = str(e)
            props.uploading = False
            return {'CANCELLED'}

    # props.upload_state = 'step 1'
    if metadata_only:
        props.uploading = False

        return {'FINISHED'}
    try:
        rj = r.json()
        if props.asset_base_id == '':
            props.asset_base_id = rj['assetBaseId']
            props.id = rj['id']

        upload_data['assetBaseId'] = props.asset_base_id
        upload_data['id'] = props.id

        bpy.ops.wm.save_mainfile()
        # fa
        props.uploading = True
        # save a copy of actual scene but don't interfere with the users models
        bpy.ops.wm.save_as_mainfile(filepath=source_filepath, compress=False, copy=True)

        with open(datafile, 'w') as s:
            json.dump(data, s)

        proc = subprocess.Popen([
            binary_path,
            "--background",
            "-noaudio",
            clean_file_path,
            "--python", os.path.join(script_path, "upload_bg.py"),
            "--", datafile  # ,filepath, tempdir
        ], bufsize=5000, stdout=subprocess.PIPE, stdin=subprocess.PIPE)

        bg_blender.add_bg_process(eval_path_computing=eval_path_computing, eval_path_state=eval_path_state,
                                  eval_path=eval_path, process_type='UPLOAD', process=proc, location=location)

    except Exception as e:
        props.upload_state = str(e)
        props.uploading = False
        return {'CANCELLED'}

    return {'FINISHED'}
Пример #22
0
            "index": 0,
            "file_path": export_data["thumbnail_path"]
        }, {
            "type": "blend",
            "index": 0,
            "file_path": fpath
        }]

        bg_blender.progress('uploading')

        upload_files(fpath, upload_data, files)

        # mark on server as uploaded
        confirm_data = {
            "verificationStatus": "uploaded"
        }

        url = paths.get_bkit_url() + 'assets/'
        headers = {"accept": "application/json", "Authorization": "Bearer %s" % upload_data['token']}
        url += upload_data["id"] + '/'

        r = requests.patch(url, json=confirm_data, headers=headers, verify=True)  # files = files,

        bg_blender.progress('upload finished successfully')


    except Exception as e:
        print(e)
        bg_blender.progress(e)
        sys.exit(1)
Пример #23
0
def start_upload(self, context, asset_type, as_new, metadata_only):
    props = utils.get_upload_props()
    location = get_upload_location(props)
    props.upload_state = 'preparing upload'
    # do this for fixing long tags in some upload cases
    props.tags = props.tags[:]

    props.name = props.name.strip()
    # TODO  move this to separate function
    # check for missing metadata
    if asset_type == 'MODEL':
        get_missing_data_model(props)
    if asset_type == 'SCENE':
        get_missing_data_scene(props)
    elif asset_type == 'MATERIAL':
        get_missing_data_material(props)
    elif asset_type == 'BRUSH':
        get_missing_data_brush(props)

    if props.report != '':
        self.report({'ERROR_INVALID_INPUT'}, props.report)
        return {'CANCELLED'}
    if as_new:
        props.asset_base_id = ''
        props.id = ''
    export_data, upload_data, eval_path_computing, eval_path_state, eval_path, props = get_upload_data(self, context,
                                                                                                       asset_type)
    # utils.pprint(upload_data)
    upload_data['parameters'] = params_to_dict(
        upload_data['parameters'])  # weird array conversion only for upload, not for tooltips.

    binary_path = bpy.app.binary_path
    script_path = os.path.dirname(os.path.realpath(__file__))
    basename, ext = os.path.splitext(bpy.data.filepath)
    # if not basename:
    #     basename = os.path.join(basename, "temp")
    if not ext:
        ext = ".blend"
    tempdir = tempfile.mkdtemp()
    source_filepath = os.path.join(tempdir, "export_blenderkit" + ext)
    clean_file_path = paths.get_clean_filepath()
    data = {
        'clean_file_path': clean_file_path,
        'source_filepath': source_filepath,
        'temp_dir': tempdir,
        'export_data': export_data,
        'upload_data': upload_data,
        'debug_value': bpy.app.debug_value,
    }
    datafile = os.path.join(tempdir, BLENDERKIT_EXPORT_DATA_FILE)

    # check if thumbnail exists:

    if not os.path.exists(export_data["thumbnail_path"]):
        props.upload_state = 'Thumbnail not found'
        props.uploading = False
        return {'CANCELLED'}

    # first upload metadata to server, so it can be saved inside the current file
    url = paths.get_bkit_url() + 'assets/'
    headers = {"accept": "application/json", "Authorization": "Bearer %s" % upload_data['token']}
    # upload_data['license'] = 'ovejajojo'
    json_metadata = upload_data  # json.dumps(upload_data, ensure_ascii=False).encode('utf8')
    global reports
    if props.asset_base_id == '':
        try:
            r = requests.post(url, json=json_metadata, headers=headers, verify=True)  # files = files,
            props.upload_state = 'uploaded metadata'
        except requests.exceptions.RequestException as e:
            print(e)
            props.upload_state = str(e)
            props.uploading = False
            return {'CANCELLED'}

    else:
        url += props.id + '/'
        try:
            if not metadata_only:
                json_metadata["verificationStatus"] = "uploading"
            r = requests.put(url, json=json_metadata, headers=headers, verify=True)  # files = files,
            props.upload_state = 'uploaded metadata'
            # parse the reqest
            # print('uploaded metadata')
            # print(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            props.upload_state = str(e)
            props.uploading = False
            return {'CANCELLED'}

    # props.upload_state = 'step 1'
    if metadata_only:
        props.uploading = False

        return {'FINISHED'}
    try:
        rj = r.json()
        if props.asset_base_id == '':
            props.asset_base_id = rj['assetBaseId']
            props.id = rj['id']

        upload_data['assetBaseId'] = props.asset_base_id
        upload_data['id'] = props.id

        bpy.ops.wm.save_mainfile()
        # fa
        props.uploading = True
        # save a copy of actual scene but don't interfere with the users models
        bpy.ops.wm.save_as_mainfile(filepath=source_filepath, compress=False, copy=True)

        with open(datafile, 'w') as s:
            json.dump(data, s)

        proc = subprocess.Popen([
            binary_path,
            "--background",
            "-noaudio",
            clean_file_path,
            "--python", os.path.join(script_path, "upload_bg.py"),
            "--", datafile  # ,filepath, tempdir
        ], bufsize=5000, stdout=subprocess.PIPE, stdin=subprocess.PIPE)

        bg_blender.add_bg_process(eval_path_computing=eval_path_computing, eval_path_state=eval_path_state,
                                  eval_path=eval_path, process_type='UPLOAD', process=proc, location=location)

    except Exception as e:
        props.upload_state = str(e)
        props.uploading = False
        return {'CANCELLED'}

    return {'FINISHED'}
Пример #24
0
    def run(self):
        maxthreads = 300
        maximages = 50
        query = self.query
        params = self.params
        global reports

        t = time.time()
        mt('search thread started')
        tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
        json_filepath = os.path.join(tempdir, '%s_searchresult.json' % query['asset_type'])

        if query['token'] != '':
            headers = {
                "accept": "application/json",
                "Authorization": "Bearer %s" % query['token'],
                # "Content-Type": "application/json",
            }
        else:
            headers = {
                "accept": "application/json",
                # "Content-Type": "application/json",
            }
        rdata = {}
        rdata['results'] = []

        if params['get_next']:
            with open(json_filepath, 'r') as infile:
                try:
                    origdata = json.load(infile)
                    urlquery = origdata['next']
                    if urlquery == None:
                        return;
                except:
                    # in case no search results found on drive we don't do next page loading.
                    params['get_next'] = False
        if not params['get_next']:
            # build a new request
            url = paths.get_bkit_url() + 'search/'

            nquery = {
                # 'tags': query['keywords'],
                'asset_type': query['asset_type'],
            }
            if query.get('category'):
                nquery['category_subtree'] = query['category']

            # build request manually
            # TODO use real queries
            requeststring = '?query=' + query['keywords'].lower() + '+'
            #
            for i, q in enumerate(nquery):
                requeststring += q + ':' + str(nquery[q])
                if i < len(nquery) - 1:
                    requeststring += '+'

            requeststring += '&addon_version=%s' % params['addon_version']
            if params.get('scene_uuid') is not None:
                requeststring += '&scene_uuid=%s' % params['scene_uuid']

            urlquery = url + requeststring

        try:
            # print(urlquery)
            r = requests.get(urlquery, headers=headers)
            reports = ''
            # print(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            reports = e
            # props.report = e
            return
        mt('response is back ')
        try:
            rdata = r.json()
        except Exception as inst:
            reports = r.text
            print(inst)

        mt('data parsed ')

        # filter results here:
        # todo remove this in future
        nresults = []
        for d in rdata.get('results', []):
            # TODO this code is for filtering brush types, should vanish after we implement filter in Elastic
            mode = None
            if query['asset_type'] == 'brush':
                for p in d['parameters']:
                    if p['parameterType'] == 'mode':
                        mode = p['value']
            if query['asset_type'] != 'brush' or (
                    query.get('brushType') != None and query['brushType']) == mode:
                nresults.append(d)
        rdata['results'] = nresults

        # print('number of results: ', len(rdata.get('results', [])))
        if self.stopped():
            print('stopping search : ' + query['keywords'])
            return

        mt('search finished')
        i = 0

        thumb_small_urls = []
        thumb_small_filepaths = []
        thumb_full_urls = []
        thumb_full_filepaths = []
        # END OF PARSING
        for d in rdata.get('results', []):
            for f in d['files']:
                # TODO move validation of published assets to server, too manmy checks here.
                if f['fileType'] == 'thumbnail' and f['fileThumbnail'] != None and f['fileThumbnailLarge'] != None:
                    if f['fileThumbnail'] == None:
                        f['fileThumbnail'] = 'NONE'
                    if f['fileThumbnailLarge'] == None:
                        f['fileThumbnailLarge'] = 'NONE'

                    thumb_small_urls.append(f['fileThumbnail'])
                    thumb_full_urls.append(f['fileThumbnailLarge'])

                    imgname = paths.extract_filename_from_url(f['fileThumbnail'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_small_filepaths.append(imgpath)

                    imgname = paths.extract_filename_from_url(f['fileThumbnailLarge'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_full_filepaths.append(imgpath)

        sml_thbs = zip(thumb_small_filepaths, thumb_small_urls)
        full_thbs = zip(thumb_full_filepaths, thumb_full_urls)

        # we save here because a missing thumbnail check is in the previous loop
        # we can also prepend previous results. These have allready thumbnails downloaded...
        if params['get_next']:
            rdata['results'][0:0] = origdata['results']

        with open(json_filepath, 'w') as outfile:
            json.dump(rdata, outfile)

        killthreads_sml = []
        for k in thumb_sml_download_threads.keys():
            if k not in thumb_small_filepaths:
                killthreads_sml.append(k)  # do actual killing here?

        killthreads_full = []
        for k in thumb_full_download_threads.keys():
            if k not in thumb_full_filepaths:
                killthreads_full.append(k)  # do actual killing here?
        # TODO do the killing/ stopping here! remember threads might have finished inbetween!

        if self.stopped():
            print('stopping search : ' + query['keywords'])
            return

        # this loop handles downloading of small thumbnails
        for imgpath, url in sml_thbs:
            if imgpath not in thumb_sml_download_threads and not os.path.exists(imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_sml_download_threads[imgpath] = thread
                # threads.append(thread)

                if len(thumb_sml_download_threads) > maxthreads:
                    while len(thumb_sml_download_threads) > maxthreads:
                        threads_copy = thumb_sml_download_threads.copy()  # because for loop can erase some of the items.
                        for tk, thread in threads_copy.items():
                            if not thread.is_alive():
                                thread.join()
                                # print(x)
                                del (thumb_sml_download_threads[tk])
                                # print('fetched thumbnail ', i)
                                i += 1
        if self.stopped():
            print('stopping search : ' + query['keywords'])
            return
        idx = 0
        while len(thumb_sml_download_threads) > 0:
            threads_copy = thumb_sml_download_threads.copy()  # because for loop can erase some of the items.
            for tk, thread in threads_copy.items():
                if not thread.is_alive():
                    thread.join()
                    del (thumb_sml_download_threads[tk])
                    i += 1

        if self.stopped():
            print('stopping search : ' + query['keywords'])
            return

        # start downloading full thumbs in the end
        for imgpath, url in full_thbs:
            if imgpath not in thumb_full_download_threads and not os.path.exists(imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_full_download_threads[imgpath] = thread
        mt('thumbnails finished')