예제 #1
0
def upload_file(upload_data, f):
    headers = utils.get_headers(upload_data['token'])
    version_id = upload_data['id']
    bg_blender.progress(
        f"uploading {f['type']} {os.path.basename(f['file_path'])}")
    upload_info = {
        'assetId': version_id,
        'fileType': f['type'],
        'fileIndex': f['index'],
        'originalFilename': os.path.basename(f['file_path'])
    }
    upload_create_url = paths.get_api_url() + 'uploads/'
    upload = rerequests.post(upload_create_url,
                             json=upload_info,
                             headers=headers,
                             verify=True)
    upload = upload.json()
    #
    chunk_size = 1024 * 1024 * 2
    # utils.pprint(upload)
    # file gets uploaded here:
    uploaded = False
    # s3 upload is now the only option
    for a in range(0, 5):
        if not uploaded:
            try:
                upload_response = requests.put(upload['s3UploadUrl'],
                                               data=upload_in_chunks(
                                                   f['file_path'], chunk_size,
                                                   f['type']),
                                               stream=True,
                                               verify=True)

                if 250 > upload_response.status_code > 199:
                    uploaded = True
                else:
                    print(upload_response.text)
                    bg_blender.progress(f'Upload failed, retry. {a}')
            except Exception as e:
                print(e)
                bg_blender.progress('Upload %s failed, retrying' % f['type'])
                time.sleep(1)

            # confirm single file upload to bkit server
            print(upload)
            upload_done_url = paths.get_api_url(
            ) + 'uploads_s3/' + upload['id'] + '/upload-file/'
            upload_response = rerequests.post(upload_done_url,
                                              headers=headers,
                                              verify=True)

    bg_blender.progress('finished uploading')

    return uploaded
예제 #2
0
def upload_rating(asset):
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    api_key = user_preferences.api_key
    headers = utils.get_headers(api_key)

    bkit_ratings = asset.bkit_ratings
    # print('rating asset', asset_data['name'], asset_data['assetBaseId'])
    url = paths.get_api_url(
    ) + 'assets/' + asset['asset_data']['id'] + '/rating/'

    ratings = []

    if bkit_ratings.rating_quality > 0.1:
        ratings = (('quality', bkit_ratings.rating_quality), )
        tasks_queue.add_task(
            (send_rating_to_thread_quality, (url, ratings, headers)),
            wait=2.5,
            only_last=True)
    if bkit_ratings.rating_work_hours > 0.1:
        ratings = (('working_hours', round(bkit_ratings.rating_work_hours,
                                           1)), )
        tasks_queue.add_task(
            (send_rating_to_thread_work_hours, (url, ratings, headers)),
            wait=2.5,
            only_last=True)

    thread = threading.Thread(target=upload_rating_thread,
                              args=(url, ratings, headers))
    thread.start()

    url = paths.get_api_url(
    ) + 'assets/' + asset['asset_data']['id'] + '/review'

    reviews = {
        'reviewText': bkit_ratings.rating_compliments,
        'reviewTextProblems': bkit_ratings.rating_problems,
    }
    if not (bkit_ratings.rating_compliments == ''
            and bkit_ratings.rating_compliments == ''):
        thread = threading.Thread(target=upload_review_thread,
                                  args=(url, reviews, headers))
        thread.start()

    # the info that the user rated an item is stored in the scene
    s = bpy.context.scene
    s['assets rated'] = s.get('assets rated', {})
    if bkit_ratings.rating_quality > 0.1 and bkit_ratings.rating_work_hours > 0.1:
        s['assets rated'][asset['asset_data']['assetBaseId']] = True
예제 #3
0
def get_rating(asset_id, headers):
    '''
    Retrieve ratings from BlenderKit server. Can be run from a thread
    Parameters
    ----------
    asset_id
    headers

    Returns
    -------
    ratings - dict of type:value ratings
    '''
    url = paths.get_api_url() + 'assets/' + asset_id + '/rating/'
    params = {}
    r = rerequests.get(url, params=params, verify=True, headers=headers)
    if r is None:
        return
    if r.status_code == 200:
        rj = r.json()
        ratings = {}
        # store ratings - send them to task queue
        for r in rj['results']:
            ratings[r['ratingType']] = r['score']
            tasks_queue.add_task(
                (store_rating_local, (asset_id, r['ratingType'], r['score'])))
            # store_rating_local(asset_id, type = r['ratingType'], value = r['score'])

        if len(rj['results']) == 0:
            # store empty ratings too, so that server isn't checked repeatedly
            tasks_queue.add_task((store_rating_local_empty, (asset_id, )))
예제 #4
0
    def execute(self, context):
        user_preferences = bpy.context.preferences.addons[
            'blenderkit'].preferences
        api_key = user_preferences.api_key
        headers = utils.get_headers(api_key)

        url = paths.get_api_url() + f'assets/{self.asset_id}/rating/'

        rtgs = []

        if self.rating_quality_ui == '':
            self.rating_quality = 0
        else:
            self.rating_quality = int(self.rating_quality_ui)

        if self.rating_quality > 0.1:
            rtgs = (('quality', self.rating_quality), )
            tasks_queue.add_task(
                (send_rating_to_thread_quality, (url, rtgs, headers)),
                wait=2.5,
                only_last=True)

        if self.rating_work_hours > 0.1:
            rtgs = (('working_hours', round(self.rating_work_hours, 1)), )
            tasks_queue.add_task(
                (send_rating_to_thread_work_hours, (url, rtgs, headers)),
                wait=2.5,
                only_last=True)
        return {'FINISHED'}
예제 #5
0
def update_ratings_work_hours(self, context):
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    api_key = user_preferences.api_key
    headers = utils.get_headers(api_key)
    asset = self.id_data
    if asset:
        bkit_ratings = asset.bkit_ratings
        url = paths.get_api_url() + 'assets/' + asset['asset_data']['id'] + '/rating/'
    else:
        # this part is for operator rating:
        bkit_ratings = self
        url = paths.get_api_url() + f'assets/{self.asset_id}/rating/'

    if bkit_ratings.rating_work_hours > 0.45:
        ratings = [('working_hours', round(bkit_ratings.rating_work_hours, 1))]
        tasks_queue.add_task((send_rating_to_thread_work_hours, (url, ratings, headers)), wait=2.5, only_last=True)
예제 #6
0
def update_ratings_work_hours(self, context):
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    api_key = user_preferences.api_key
    headers = utils.get_headers(api_key)
    if not (hasattr(self, 'rating_work_hours')):
        # first option is for rating of assets that are from scene
        asset = self.id_data
        bkit_ratings = asset.bkit_ratings
        asset_id = asset['asset_data']['id']
    else:
        # this part is for operator rating:
        bkit_ratings = self
        asset_id = self.asset_id

    if bkit_ratings.rating_work_hours > 0.45:
        url = paths.get_api_url() + f'assets/{asset_id}/rating/'

        store_rating_local(asset_id,
                           type='working_hours',
                           value=bkit_ratings.rating_work_hours)

        ratings = [('working_hours', round(bkit_ratings.rating_work_hours, 1))]
        tasks_queue.add_task(
            (send_rating_to_thread_work_hours, (url, ratings, headers)),
            wait=2.5,
            only_last=True)
예제 #7
0
def get_assets_search():
    bpy.app.debug_value = 2

    results = []
    preferences = bpy.context.preferences.addons['blenderkit'].preferences
    url = paths.get_api_url() + 'search/all'
    i = 0
    while url is not None:
        headers = utils.get_headers(preferences.api_key)
        print('fetching assets from assets endpoint')
        print(url)
        retries = 0
        while retries < 3:
            r = rerequests.get(url, headers=headers)

            try:
                adata = r.json()
                url = adata.get('next')
                print(i)
                i += 1
            except Exception as e:
                print(e)
                print('failed to get next')
                if retries == 2:
                    url = None
            if adata.get('results') != None:
                results.extend(adata['results'])
                retries = 3
            print(f'fetched page {i}')
            retries += 1

    fpath = assets_db_path()
    with open(fpath, 'w') as s:
        json.dump(results, s)
예제 #8
0
def fetch_categories(API_key, force=False):
    url = paths.get_api_url() + 'categories/'

    headers = utils.get_headers(API_key)

    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')
    if os.path.exists(categories_filepath):
        catfile_age = time.time() - os.path.getmtime(categories_filepath)
    else:
        catfile_age = 10000000

    # global catfetch_counter
    # catfetch_counter += 1
    # bk_logger.debug('fetching categories: ', catfetch_counter)
    # bk_logger.debug('age of cat file', catfile_age)
    try:
        # read categories only once per day maximum, or when forced to do so.
        if catfile_age > 86400 or force:
            bk_logger.debug('requesting categories from server')
            r = rerequests.get(url, headers=headers)
            rdata = r.json()
            categories = rdata['results']
            fix_category_counts(categories)
            # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off.
            with open(categories_filepath, 'w', encoding='utf-8') as s:
                json.dump(categories, s, ensure_ascii=False, indent=4)
        tasks_queue.add_task((load_categories, ()))
    except Exception as e:
        bk_logger.debug('category fetching failed')
        bk_logger.exception(e)
        if not os.path.exists(categories_filepath):
            source_path = paths.get_addon_file(subpath='data' + os.sep +
                                               'categories.json')
            shutil.copy(source_path, categories_filepath)
예제 #9
0
def fetch_author(a_id, api_key):
    utils.p('fetch author')
    try:
        a_url = paths.get_api_url() + 'accounts/' + a_id + '/'
        headers = utils.get_headers(api_key)
        r = requests.get(a_url, headers=headers)
        if r.status_code == 200:
            adata = r.json()
            if not hasattr(adata, 'id'):
                utils.p(adata)
            # utils.p(adata)
            tasks_queue.add_task((write_author, (a_id, adata)))
            if adata.get('gravatarHash') is not None:
                gravatar_path = paths.get_temp_dir(
                    subdir=None) + adata['gravatarHash'] + '.jpg'
                url = "https://www.gravatar.com/avatar/" + adata[
                    'gravatarHash'] + '?d=404'
                r = requests.get(url, stream=False)
                if r.status_code == 200:
                    with open(gravatar_path, 'wb') as f:
                        f.write(r.content)
                    adata['gravatarImg'] = gravatar_path
    except Exception as e:
        utils.p(e)
    utils.p('finish fetch')
예제 #10
0
def request_profile(api_key):
    a_url = paths.get_api_url() + 'me/'
    headers = utils.get_headers(api_key)
    r = requests.get(a_url, headers=headers)
    adata = r.json()
    if adata.get('user') is None:
        utils.p(adata)
        utils.p('getting profile failed')
        return None
    return adata
예제 #11
0
def patch_individual_metadata(asset_id, metadata_dict, api_key):
    upload_data = metadata_dict
    url = paths.get_api_url() + 'assets/' + str(asset_id) + '/'
    headers = utils.get_headers(api_key)
    try:
        r = rerequests.patch(url, json=upload_data, headers=headers, verify=True)  # files = files,
    except requests.exceptions.RequestException as e:
        print(e)
        return {'CANCELLED'}
    return {'FINISHED'}
예제 #12
0
def get_rating(asset_id):
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    api_key = user_preferences.api_key
    headers = utils.get_headers(api_key)
    rl = paths.get_api_url(
    ) + 'assets/' + asset['asset_data']['id'] + '/rating/'
    rtypes = ['quality', 'working_hours']
    for rt in rtypes:
        params = {'rating_type': rt}
        r = rerequests.get(r1, params=data, verify=True, headers=headers)
        print(r.text)
예제 #13
0
def verification_status_change_thread(asset_id, state, api_key):
    upload_data = {
        "verificationStatus": state
    }
    url = paths.get_api_url() + 'assets/' + str(asset_id) + '/'
    headers = utils.get_headers(api_key)
    try:
        r = rerequests.patch(url, json=upload_data, headers=headers, verify=True)  # files = files,
    except requests.exceptions.RequestException as e:
        print(e)
        return {'CANCELLED'}
    return {'FINISHED'}
예제 #14
0
def verification_status_change(self, context, asset_id, state):
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    upload_data = {"verificationStatus": state}
    url = paths.get_api_url() + 'assets/' + str(asset_id) + '/'
    headers = utils.get_headers(user_preferences.api_key)
    try:
        r = requests.patch(url, json=upload_data, headers=headers,
                           verify=True)  # files = files,
        #print('changed status ')
        #print(r.text)
    except requests.exceptions.RequestException as e:
        print(e)
        return {'CANCELLED'}
    return {'FINISHED'}
예제 #15
0
def fetch_author(a_id, api_key):
    utils.p('fetch author')
    try:
        a_url = paths.get_api_url() + 'accounts/' + a_id + '/'
        headers = utils.get_headers(api_key)
        r = requests.get(a_url, headers=headers)
        adata = r.json()
        if not hasattr(adata, 'id'):
            utils.p(adata)
        # utils.p(adata)
        tasks_queue.add_task((write_author, (a_id, adata)))
    except Exception as e:
        utils.p(e)
    utils.p('finish fetch')
예제 #16
0
def update_ratings_work_hours(self, context):
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    api_key = user_preferences.api_key
    headers = utils.get_headers(api_key)
    asset = self.id_data
    bkit_ratings = asset.bkit_ratings
    url = paths.get_api_url(
    ) + 'assets/' + asset['asset_data']['id'] + '/rating/'

    if bkit_ratings.rating_quality > 0.1:
        ratings = [('working_hours', round(bkit_ratings.rating_work_hours, 1))]
        tasks_queue.add_task(
            (send_rating_to_thread_work_hours, (url, ratings, headers)),
            wait=1,
            only_last=True)
예제 #17
0
def patch_asset_empty(asset_id, api_key):
    '''
        This function patches the asset for the purpose of it getting a reindex.
        Should be removed once this is fixed on the server and
        the server is able to reindex after uploads of resolutions
        Returns
        -------
    '''
    upload_data = {
    }
    url = paths.get_api_url() + 'assets/' + str(asset_id) + '/'
    headers = utils.get_headers(api_key)
    try:
        r = rerequests.patch(url, json=upload_data, headers=headers, verify=True)  # files = files,
    except requests.exceptions.RequestException as e:
        print(e)
        return {'CANCELLED'}
    return {'FINISHED'}
예제 #18
0
    def query_to_url(self):
        query = self.query
        params = self.params
        # build a new request
        url = paths.get_api_url() + 'search/'

        # build request manually
        # TODO use real queries
        requeststring = '?query='
        #
        if query.get('query') not in ('', None):
            requeststring += query['query'].lower()
        for i, q in enumerate(query):
            if q != 'query':
                requeststring += '+'
                requeststring += q + ':' + str(query[q]).lower()

        # result ordering: _score - relevance, score - BlenderKit score

        if query.get('query') is None and query.get(
                'category_subtree') == None:
            # assumes no keywords and no category, thus an empty search that is triggered on start.
            # orders by last core file upload
            if query.get('verification_status') == 'uploaded':
                # for validators, sort uploaded from oldest
                requeststring += '+order:created'
            else:
                requeststring += '+order:-last_upload'
        elif query.get(
                'author_id') is not None and utils.profile_is_validator():

            requeststring += '+order:-created'
        else:
            if query.get('category_subtree') is not None:
                requeststring += '+order:-score,_score'
            else:
                requeststring += '+order:_score'

        requeststring += '&addon_version=%s' % params['addon_version']
        if params.get('scene_uuid') is not None:
            requeststring += '&scene_uuid=%s' % params['scene_uuid']
        # print('params', params)
        urlquery = url + requeststring
        return urlquery
예제 #19
0
    def execute(self, context):
        user_preferences = bpy.context.preferences.addons[
            'blenderkit'].preferences
        api_key = user_preferences.api_key
        headers = utils.get_headers(api_key)

        url = paths.get_api_url() + f'assets/{self.asset_id}/rating/'

        rtgs = []

        self.rating_quality = int(self.rating_quality_ui)

        if self.rating_quality > 0.1:
            rtgs.append(('quality', self.rating_quality))
        if self.rating_work_hours > 0.1:
            rtgs.append(('working_hours', round(self.rating_work_hours, 1)))

        thread = threading.Thread(target=upload_rating_thread,
                                  args=(url, rtgs, headers))
        thread.start()
        return {'FINISHED'}
예제 #20
0
def mark_for_validation(self, context, asset_type):
    props = utils.get_upload_props()
    props.upload_state = 'marking for validation'
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences

    upload_data = {"verificationStatus": "ready"}

    url = paths.get_api_url() + 'assets/'

    headers = utils.get_headers(user_preferences.api_key)

    url += props.id + '/'

    try:
        r = requests.patch(url, json=upload_data, headers=headers,
                           verify=True)  # files = files,
        props.upload_state = 'marked for validation'
    except requests.exceptions.RequestException as e:
        props.upload_state = str(e)
        props.uploading = False
        return {'CANCELLED'}
    return {'FINISHED'}
예제 #21
0
def fetch_categories(API_key):
    url = paths.get_api_url() + 'categories/'

    headers = utils.get_headers(API_key)

    tempdir = paths.get_temp_dir()
    categories_filepath = os.path.join(tempdir, 'categories.json')

    try:
        r = rerequests.get(url, headers=headers)
        rdata = r.json()
        categories = rdata['results']
        fix_category_counts(categories)
        # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off.
        with open(categories_filepath, 'w') as s:
            json.dump(categories, s, indent=4)
        tasks_queue.add_task((load_categories, ()))
    except Exception as e:
        utils.p('category fetching failed')
        utils.p(e)
        if not os.path.exists(categories_filepath):
            source_path = paths.get_addon_file(subpath='data' + os.sep + 'categories.json')
            shutil.copy(source_path, categories_filepath)
예제 #22
0
def start_upload(self, context, asset_type, as_new, metadata_only):
    '''start upload process, by processing data'''
    props = utils.get_upload_props()
    storage_quota_ok = check_storage_quota(props)
    if not storage_quota_ok:
        self.report({'ERROR_INVALID_INPUT'}, props.report)
        return {'CANCELLED'}

    location = get_upload_location(props)
    props.upload_state = 'preparing upload'

    auto_fix(asset_type = asset_type)

    # do this for fixing long tags in some upload cases
    props.tags = props.tags[:]


    props.name = props.name.strip()
    # TODO  move this to separate function
    # check for missing metadata
    if asset_type == 'MODEL':
        get_missing_data_model(props)
    if asset_type == 'SCENE':
        get_missing_data_scene(props)
    elif asset_type == 'MATERIAL':
        get_missing_data_material(props)
    elif asset_type == 'BRUSH':
        get_missing_data_brush(props)

    if props.report != '':
        self.report({'ERROR_INVALID_INPUT'}, props.report)
        return {'CANCELLED'}

    if as_new:
        props.asset_base_id = ''
        props.id = ''
    export_data, upload_data, eval_path_computing, eval_path_state, eval_path, props = get_upload_data(self, context,
                                                                                                       asset_type)
    # utils.pprint(upload_data)
    upload_data['parameters'] = params_to_dict(
        upload_data['parameters'])  # weird array conversion only for upload, not for tooltips.

    binary_path = bpy.app.binary_path
    script_path = os.path.dirname(os.path.realpath(__file__))
    basename, ext = os.path.splitext(bpy.data.filepath)
    # if not basename:
    #     basename = os.path.join(basename, "temp")
    if not ext:
        ext = ".blend"
    tempdir = tempfile.mkdtemp()
    source_filepath = os.path.join(tempdir, "export_blenderkit" + ext)
    clean_file_path = paths.get_clean_filepath()
    data = {
        'clean_file_path': clean_file_path,
        'source_filepath': source_filepath,
        'temp_dir': tempdir,
        'export_data': export_data,
        'upload_data': upload_data,
        'debug_value': bpy.app.debug_value,
    }
    datafile = os.path.join(tempdir, BLENDERKIT_EXPORT_DATA_FILE)

    # check if thumbnail exists:

    if not os.path.exists(export_data["thumbnail_path"]):
        props.upload_state = 'Thumbnail not found'
        props.uploading = False
        return {'CANCELLED'}

    # first upload metadata to server, so it can be saved inside the current file
    url = paths.get_api_url() + 'assets/'

    headers = utils.get_headers(upload_data['token'])

    # upload_data['license'] = 'ovejajojo'
    json_metadata = upload_data  # json.dumps(upload_data, ensure_ascii=False).encode('utf8')
    global reports
    if props.asset_base_id == '':
        try:
            r = requests.post(url, json=json_metadata, headers=headers, verify=True)  # files = files,
            props.upload_state = 'uploaded metadata'
            utils.p(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            props.upload_state = str(e)
            props.uploading = False
            return {'CANCELLED'}

    else:
        url += props.id + '/'
        try:
            if not metadata_only:
                json_metadata["verificationStatus"] = "uploading"
            r = requests.put(url, json=json_metadata, headers=headers, verify=True)  # files = files,
            props.upload_state = 'uploaded metadata'
            # parse the reqest
            # print('uploaded metadata')
            # print(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            props.upload_state = str(e)
            props.uploading = False
            return {'CANCELLED'}

    # props.upload_state = 'step 1'
    if metadata_only:
        props.uploading = False

        return {'FINISHED'}
    try:
        rj = r.json()
        if props.asset_base_id == '':
            props.asset_base_id = rj['assetBaseId']
            props.id = rj['id']

        upload_data['assetBaseId'] = props.asset_base_id
        upload_data['id'] = props.id

        bpy.ops.wm.save_mainfile()
        # fa
        props.uploading = True
        # save a copy of actual scene but don't interfere with the users models
        bpy.ops.wm.save_as_mainfile(filepath=source_filepath, compress=False, copy=True)

        with open(datafile, 'w') as s:
            json.dump(data, s)

        proc = subprocess.Popen([
            binary_path,
            "--background",
            "-noaudio",
            clean_file_path,
            "--python", os.path.join(script_path, "upload_bg.py"),
            "--", datafile  # ,filepath, tempdir
        ], bufsize=5000, stdout=subprocess.PIPE, stdin=subprocess.PIPE)

        bg_blender.add_bg_process(eval_path_computing=eval_path_computing, eval_path_state=eval_path_state,
                                  eval_path=eval_path, process_type='UPLOAD', process=proc, location=location)

    except Exception as e:
        props.upload_state = str(e)
        props.uploading = False
        return {'CANCELLED'}

    return {'FINISHED'}
예제 #23
0
                "index": 0,
                "file_path": export_data["thumbnail_path"]
            })
        if 'MAINFILE' in upload_set:
            files.append({"type": "blend", "index": 0, "file_path": fpath})

        bg_blender.progress('uploading')

        uploaded = upload_files(upload_data, files)

        if uploaded:
            # mark on server as uploaded
            if 'MAINFILE' in upload_set:
                confirm_data = {"verificationStatus": "uploaded"}

                url = paths.get_api_url() + 'assets/'

                headers = utils.get_headers(upload_data['token'])

                url += upload_data["id"] + '/'

                r = rerequests.patch(url,
                                     json=confirm_data,
                                     headers=headers,
                                     verify=True)  # files = files,

            bg_blender.progress('upload finished successfully')
        else:
            bg_blender.progress('upload failed.')

    except Exception as e:
예제 #24
0
    def run(self):
        maxthreads = 50
        query = self.query
        params = self.params
        global reports

        t = time.time()
        mt('search thread started')
        tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
        json_filepath = os.path.join(
            tempdir, '%s_searchresult.json' % query['asset_type'])

        headers = utils.get_headers(params['api_key'])

        rdata = {}
        rdata['results'] = []

        if params['get_next']:
            with open(json_filepath, 'r') as infile:
                try:
                    origdata = json.load(infile)
                    urlquery = origdata['next']
                    # rparameters = {}
                    if urlquery == None:
                        return
                except:
                    # in case no search results found on drive we don't do next page loading.
                    params['get_next'] = False
        if not params['get_next']:
            url = paths.get_api_url() + 'search/'

            urlquery = url

            # rparameters = query
            urlquery = self.query_to_url()
        try:
            utils.p(urlquery)
            r = rerequests.get(urlquery,
                               headers=headers)  # , params = rparameters)
            # print(r.url)
            reports = ''
            # utils.p(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            reports = e
            # props.report = e
            return
        mt('response is back ')
        try:
            rdata = r.json()
        except Exception as inst:
            reports = r.text
            print(inst)

        mt('data parsed ')

        # print('number of results: ', len(rdata.get('results', [])))
        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return

        mt('search finished')
        i = 0

        thumb_small_urls = []
        thumb_small_filepaths = []
        thumb_full_urls = []
        thumb_full_filepaths = []
        # END OF PARSING
        for d in rdata.get('results', []):

            get_author(d)

            for f in d['files']:
                # TODO move validation of published assets to server, too manmy checks here.
                if f['fileType'] == 'thumbnail' and f[
                        'fileThumbnail'] != None and f[
                            'fileThumbnailLarge'] != None:
                    if f['fileThumbnail'] == None:
                        f['fileThumbnail'] = 'NONE'
                    if f['fileThumbnailLarge'] == None:
                        f['fileThumbnailLarge'] = 'NONE'

                    thumb_small_urls.append(f['fileThumbnail'])
                    thumb_full_urls.append(f['fileThumbnailLarge'])

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnail'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_small_filepaths.append(imgpath)

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnailLarge'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_full_filepaths.append(imgpath)

        sml_thbs = zip(thumb_small_filepaths, thumb_small_urls)
        full_thbs = zip(thumb_full_filepaths, thumb_full_urls)

        # we save here because a missing thumbnail check is in the previous loop
        # we can also prepend previous results. These have downloaded thumbnails already...
        if params['get_next']:
            rdata['results'][0:0] = origdata['results']

        with open(json_filepath, 'w') as outfile:
            json.dump(rdata, outfile)

        killthreads_sml = []
        for k in thumb_sml_download_threads.keys():
            if k not in thumb_small_filepaths:
                killthreads_sml.append(k)  # do actual killing here?

        killthreads_full = []
        for k in thumb_full_download_threads.keys():
            if k not in thumb_full_filepaths:
                killthreads_full.append(k)  # do actual killing here?
        # TODO do the killing/ stopping here! remember threads might have finished inbetween!

        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return

        # this loop handles downloading of small thumbnails
        for imgpath, url in sml_thbs:
            if imgpath not in thumb_sml_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_sml_download_threads[imgpath] = thread
                # threads.append(thread)

                if len(thumb_sml_download_threads) > maxthreads:
                    while len(thumb_sml_download_threads) > maxthreads:
                        threads_copy = thumb_sml_download_threads.copy(
                        )  # because for loop can erase some of the items.
                        for tk, thread in threads_copy.items():
                            if not thread.is_alive():
                                thread.join()
                                # utils.p(x)
                                del (thumb_sml_download_threads[tk])
                                # utils.p('fetched thumbnail ', i)
                                i += 1
        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return
        idx = 0
        while len(thumb_sml_download_threads) > 0:
            threads_copy = thumb_sml_download_threads.copy(
            )  # because for loop can erase some of the items.
            for tk, thread in threads_copy.items():
                if not thread.is_alive():
                    thread.join()
                    del (thumb_sml_download_threads[tk])
                    i += 1

        if self.stopped():
            utils.p('stopping search : ' + str(query))
            return

        # start downloading full thumbs in the end
        for imgpath, url in full_thbs:
            if imgpath not in thumb_full_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_full_download_threads[imgpath] = thread
        mt('thumbnails finished')
예제 #25
0
def download_blenderkit_material(asset_ref):
    from blenderkit import paths, append_link, utils, version_checker, rerequests
    import requests

    def create_asset_data(rdata, asset_type):
        for r in rdata['results']:
            if r['assetType'] == asset_type and len(r['files']) > 0:
                furl = None
                tname = None
                allthumbs = []
                durl, tname = None, None
                for f in r['files']:
                    if f['fileType'] == 'thumbnail':
                        tname = paths.extract_filename_from_url(
                            f['fileThumbnailLarge'])
                        small_tname = paths.extract_filename_from_url(
                            f['fileThumbnail'])
                        allthumbs.append(
                            tname)  # TODO just first thumb is used now.
                    tdict = {}
                    for i, t in enumerate(allthumbs):
                        tdict['thumbnail_%i'] = t
                    if f['fileType'] == 'blend':
                        durl = f['downloadUrl'].split('?')[0]
                        # fname = paths.extract_filename_from_url(f['filePath'])
                if durl and tname:
                    tooltip = blenderkit.search.generate_tooltip(r)
                    r['author']['id'] = str(r['author']['id'])
                    asset_data = {
                        'thumbnail': tname,
                        'thumbnail_small': small_tname,
                        # 'thumbnails':allthumbs,
                        'download_url': durl,
                        'id': r['id'],
                        'asset_base_id': r['assetBaseId'],
                        'name': r['name'],
                        'asset_type': r['assetType'],
                        'tooltip': tooltip,
                        'tags': r['tags'],
                        'can_download': r.get('canDownload', True),
                        'verification_status': r['verificationStatus'],
                        'author_id': r['author']['id'],
                        # 'author': r['author']['firstName'] + ' ' + r['author']['lastName']
                        # 'description': r['description'],
                    }
                    asset_data['downloaded'] = 0
                    # parse extra params needed for blender here
                    params = utils.params_to_dict(r['parameters'])
                    if asset_type == 'model':
                        if params.get('boundBoxMinX') != None:
                            bbox = {
                                'bbox_min': (float(params['boundBoxMinX']),
                                             float(params['boundBoxMinY']),
                                             float(params['boundBoxMinZ'])),
                                'bbox_max': (float(params['boundBoxMaxX']),
                                             float(params['boundBoxMaxY']),
                                             float(params['boundBoxMaxZ']))
                            }
                        else:
                            bbox = {
                                'bbox_min': (-.5, -.5, 0),
                                'bbox_max': (.5, .5, 1)
                            }
                        asset_data.update(bbox)
                    if asset_type == 'material':
                        asset_data['texture_size_meters'] = params.get(
                            'textureSizeMeters', 1.0)
                    asset_data.update(tdict)
            r.update(asset_data)

    # main
    asset_base_id_str, asset_type_str = asset_ref.split()
    asset_type = asset_type_str.split(':')[1]
    scene_id = blenderkit.download.get_scene_id()
    reqstr = '?query=%s+%s+order:_score' % (asset_base_id_str, asset_type_str)
    reqstr += '&addon_version=%s' % version_checker.get_addon_version()
    reqstr += '&scene_uuid=%s' % scene_id
    url = paths.get_api_url() + 'search/' + reqstr
    api_key = user_preferences = C.preferences.addons[
        'blenderkit'].preferences.api_key
    headers = utils.get_headers(api_key)
    r = rerequests.get(url, headers=headers)
    rdata = r.json()
    create_asset_data(rdata, asset_type)
    asset_data = rdata['results'][0]
    has_url = blenderkit.download.get_download_url(asset_data, scene_id,
                                                   api_key)
    file_names = paths.get_download_filenames(asset_data)
    file_name = file_names[0]
    if not os.path.exists(file_name):
        with open(file_name, "wb") as f:
            print("Downloading %s" % file_name)
            response = requests.get(asset_data['url'], stream=True)
            total_length = response.headers.get('Content-Length')
            if total_length is None:  # no content length header
                f.write(response.content)
            else:
                dl = 0
                for data in response.iter_content(chunk_size=4096):
                    dl += len(data)
                    f.write(data)
    material = append_link.append_material(file_names[-1])
    return material
예제 #26
0
    def run(self):
        # utils.pprint(upload_data)
        self.upload_data['parameters'] = utils.dict_to_params(
            self.upload_data['parameters'])  # weird array conversion only for upload, not for tooltips.

        script_path = os.path.dirname(os.path.realpath(__file__))

        # first upload metadata to server, so it can be saved inside the current file
        url = paths.get_api_url() + 'assets/'

        headers = utils.get_headers(self.upload_data['token'])

        # self.upload_data['license'] = 'ovejajojo'
        json_metadata = self.upload_data  # json.dumps(self.upload_data, ensure_ascii=False).encode('utf8')

        # tasks_queue.add_task((ui.add_report, ('Posting metadata',)))
        self.send_message('Posting metadata')
        if self.export_data['assetBaseId'] == '':
            try:
                r = rerequests.post(url, json=json_metadata, headers=headers, verify=True,
                                    immediate=True)  # files = files,

                # tasks_queue.add_task((ui.add_report, ('uploaded metadata',)))
                utils.p(r.text)
                self.send_message('uploaded metadata')

            except requests.exceptions.RequestException as e:
                print(e)
                self.end_upload(e)
                return {'CANCELLED'}

        else:
            url += self.export_data['id'] + '/'
            try:
                if 'MAINFILE' in self.upload_set:
                    json_metadata["verificationStatus"] = "uploading"
                r = rerequests.patch(url, json=json_metadata, headers=headers, verify=True,
                                     immediate=True)  # files = files,
                self.send_message('uploaded metadata')

                # tasks_queue.add_task((ui.add_report, ('uploaded metadata',)))
                # parse the request
                # print('uploaded metadata')
                print(r.text)
            except requests.exceptions.RequestException as e:
                print(e)
                self.end_upload(e)
                return {'CANCELLED'}

        if self.stopped():
            self.end_upload('Upload cancelled by user')
            return
        # props.upload_state = 'step 1'
        if self.upload_set == ['METADATA']:
            self.end_upload('Metadata posted successfully')
            return {'FINISHED'}
        try:
            rj = r.json()
            utils.pprint(rj)
            # if r.status_code not in (200, 201):
            #     if r.status_code == 401:
            #         ###ui.add_report(r.detail, 5, colors.RED)
            #     return {'CANCELLED'}
            # if props.asset_base_id == '':
            #     props.asset_base_id = rj['assetBaseId']
            #     props.id = rj['id']
            if self.export_data['assetBaseId'] == '':
                self.export_data['assetBaseId'] = rj['assetBaseId']
                self.export_data['id'] = rj['id']
                # here we need to send asset ID's back into UI to be written in asset data.
                estring = f"{self.export_data['eval_path']}.blenderkit.asset_base_id = '{rj['assetBaseId']}'"
                tasks_queue.add_task((exec, (estring,)))
                estring = f"{self.export_data['eval_path']}.blenderkit.id = '{rj['id']}'"
                tasks_queue.add_task((exec, (estring,)))
                # after that, the user's file needs to be saved to save the

            self.upload_data['assetBaseId'] = self.export_data['assetBaseId']
            self.upload_data['id'] = self.export_data['id']

            # props.uploading = True

            if 'MAINFILE' in self.upload_set:
                if self.upload_data['assetType'] == 'hdr':
                    fpath = self.export_data['hdr_filepath']
                else:
                    fpath = os.path.join(self.export_data['temp_dir'], self.upload_data['assetBaseId'] + '.blend')

                    clean_file_path = paths.get_clean_filepath()

                    data = {
                        'export_data': self.export_data,
                        'upload_data': self.upload_data,
                        'debug_value': self.export_data['debug_value'],
                        'upload_set': self.upload_set,
                    }
                    datafile = os.path.join(self.export_data['temp_dir'], BLENDERKIT_EXPORT_DATA_FILE)

                    with open(datafile, 'w') as s:
                        json.dump(data, s)

                    # non waiting method - not useful here..
                    # proc = subprocess.Popen([
                    #     binary_path,
                    #     "--background",
                    #     "-noaudio",
                    #     clean_file_path,
                    #     "--python", os.path.join(script_path, "upload_bg.py"),
                    #     "--", datafile  # ,filepath, tempdir
                    # ], bufsize=5000, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
                    # tasks_queue.add_task((ui.add_report, ('preparing scene - running blender instance',)))
                    self.send_message('preparing scene - running blender instance')

                    proc = subprocess.run([
                        self.export_data['binary_path'],
                        "--background",
                        "-noaudio",
                        clean_file_path,
                        "--python", os.path.join(script_path, "upload_bg.py"),
                        "--", datafile
                    ], bufsize=1, stdout=sys.stdout, stdin=subprocess.PIPE, creationflags=utils.get_process_flags())

            if self.stopped():
                self.end_upload('Upload stopped by user')
                return

            files = []
            if 'THUMBNAIL' in self.upload_set:
                files.append({
                    "type": "thumbnail",
                    "index": 0,
                    "file_path": self.export_data["thumbnail_path"]
                })
            if 'MAINFILE' in self.upload_set:
                files.append({
                    "type": "blend",
                    "index": 0,
                    "file_path": fpath
                })

            self.send_message('Uploading files')

            uploaded = upload_bg.upload_files(self.upload_data, files)

            if uploaded:
                # mark on server as uploaded
                if 'MAINFILE' in self.upload_set:
                    confirm_data = {
                        "verificationStatus": "uploaded"
                    }

                    url = paths.get_api_url() + 'assets/'

                    headers = utils.get_headers(self.upload_data['token'])

                    url += self.upload_data["id"] + '/'

                    r = rerequests.patch(url, json=confirm_data, headers=headers, verify=True)  # files = files,

                self.end_upload('Upload finished successfully')
            else:
                self.end_upload('Upload failed')
        except Exception as e:
            self.end_upload(e)
            print(e)
            return {'CANCELLED'}
예제 #27
0
    def run(self):
        maxthreads = 300
        maximages = 50
        query = self.query
        params = self.params
        global reports

        t = time.time()
        mt('search thread started')
        tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
        json_filepath = os.path.join(
            tempdir, '%s_searchresult.json' % query['asset_type'])

        headers = utils.get_headers(params['api_key'])

        rdata = {}
        rdata['results'] = []

        if params['get_next']:
            with open(json_filepath, 'r') as infile:
                try:
                    origdata = json.load(infile)
                    urlquery = origdata['next']
                    if urlquery == None:
                        return
                except:
                    # in case no search results found on drive we don't do next page loading.
                    params['get_next'] = False
        if not params['get_next']:
            # build a new request
            url = paths.get_api_url() + 'search/'

            # build request manually
            # TODO use real queries
            requeststring = '?query=' + query['keywords'].lower() + '+'
            #
            for i, q in enumerate(query):
                requeststring += q + ':' + str(query[q]).lower()
                if i < len(query) - 1:
                    requeststring += '+'

            requeststring += '&addon_version=%s' % params['addon_version']
            if params.get('scene_uuid') is not None:
                requeststring += '&scene_uuid=%s' % params['scene_uuid']

            urlquery = url + requeststring

        try:
            utils.p(urlquery)
            r = requests.get(urlquery, headers=headers)
            reports = ''
            # utils.p(r.text)
        except requests.exceptions.RequestException as e:
            print(e)
            reports = e
            # props.report = e
            return
        mt('response is back ')
        try:
            rdata = r.json()
        except Exception as inst:
            reports = r.text
            print(inst)

        mt('data parsed ')

        # filter results here:
        # todo remove this in future
        nresults = []
        for d in rdata.get('results', []):
            # TODO this code is for filtering brush types, should vanish after we implement filter in Elastic
            mode = None
            if query['asset_type'] == 'brush':
                for p in d['parameters']:
                    if p['parameterType'] == 'mode':
                        mode = p['value']
            if query['asset_type'] != 'brush' or (
                    query.get('brushType') != None
                    and query['brushType']) == mode:
                nresults.append(d)
        rdata['results'] = nresults

        # print('number of results: ', len(rdata.get('results', [])))
        if self.stopped():
            utils.p('stopping search : ' + query['keywords'])
            return

        mt('search finished')
        i = 0

        thumb_small_urls = []
        thumb_small_filepaths = []
        thumb_full_urls = []
        thumb_full_filepaths = []
        # END OF PARSING
        getting_authors = {}
        for d in rdata.get('results', []):
            if getting_authors.get(d['author']['id']) is None:
                get_author(d)
                getting_authors[d['author']['id']] = True

            for f in d['files']:
                # TODO move validation of published assets to server, too manmy checks here.
                if f['fileType'] == 'thumbnail' and f[
                        'fileThumbnail'] != None and f[
                            'fileThumbnailLarge'] != None:
                    if f['fileThumbnail'] == None:
                        f['fileThumbnail'] = 'NONE'
                    if f['fileThumbnailLarge'] == None:
                        f['fileThumbnailLarge'] = 'NONE'

                    thumb_small_urls.append(f['fileThumbnail'])
                    thumb_full_urls.append(f['fileThumbnailLarge'])

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnail'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_small_filepaths.append(imgpath)

                    imgname = paths.extract_filename_from_url(
                        f['fileThumbnailLarge'])
                    imgpath = os.path.join(tempdir, imgname)
                    thumb_full_filepaths.append(imgpath)

        sml_thbs = zip(thumb_small_filepaths, thumb_small_urls)
        full_thbs = zip(thumb_full_filepaths, thumb_full_urls)

        # we save here because a missing thumbnail check is in the previous loop
        # we can also prepend previous results. These have allready thumbnails downloaded...
        if params['get_next']:
            rdata['results'][0:0] = origdata['results']

        with open(json_filepath, 'w') as outfile:
            json.dump(rdata, outfile)

        killthreads_sml = []
        for k in thumb_sml_download_threads.keys():
            if k not in thumb_small_filepaths:
                killthreads_sml.append(k)  # do actual killing here?

        killthreads_full = []
        for k in thumb_full_download_threads.keys():
            if k not in thumb_full_filepaths:
                killthreads_full.append(k)  # do actual killing here?
        # TODO do the killing/ stopping here! remember threads might have finished inbetween!

        if self.stopped():
            utils.p('stopping search : ' + query['keywords'])
            return

        # this loop handles downloading of small thumbnails
        for imgpath, url in sml_thbs:
            if imgpath not in thumb_sml_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_sml_download_threads[imgpath] = thread
                # threads.append(thread)

                if len(thumb_sml_download_threads) > maxthreads:
                    while len(thumb_sml_download_threads) > maxthreads:
                        threads_copy = thumb_sml_download_threads.copy(
                        )  # because for loop can erase some of the items.
                        for tk, thread in threads_copy.items():
                            if not thread.is_alive():
                                thread.join()
                                # utils.p(x)
                                del (thumb_sml_download_threads[tk])
                                # utils.p('fetched thumbnail ', i)
                                i += 1
        if self.stopped():
            utils.p('stopping search : ' + query['keywords'])
            return
        idx = 0
        while len(thumb_sml_download_threads) > 0:
            threads_copy = thumb_sml_download_threads.copy(
            )  # because for loop can erase some of the items.
            for tk, thread in threads_copy.items():
                if not thread.is_alive():
                    thread.join()
                    del (thumb_sml_download_threads[tk])
                    i += 1

        if self.stopped():
            utils.p('stopping search : ' + query['keywords'])
            return

        # start downloading full thumbs in the end
        for imgpath, url in full_thbs:
            if imgpath not in thumb_full_download_threads and not os.path.exists(
                    imgpath):
                thread = ThumbDownloader(url, imgpath)
                # thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
                #                           daemon=True)
                thread.start()
                thumb_full_download_threads[imgpath] = thread
        mt('thumbnails finished')
예제 #28
0
def upload_file(upload_data, f):
    headers = utils.get_headers(upload_data['token'])
    version_id = upload_data['id']

    message = f"uploading {f['type']} {os.path.basename(f['file_path'])}"
    tasks_queue.add_task((ui.add_report, (message, )))

    upload_info = {
        'assetId': version_id,
        'fileType': f['type'],
        'fileIndex': f['index'],
        'originalFilename': os.path.basename(f['file_path'])
    }
    upload_create_url = paths.get_api_url() + 'uploads/'
    upload = rerequests.post(upload_create_url,
                             json=upload_info,
                             headers=headers,
                             verify=True)
    upload = upload.json()
    #
    chunk_size = 1024 * 1024 * 2
    # utils.pprint(upload)
    # file gets uploaded here:
    uploaded = False
    # s3 upload is now the only option
    for a in range(0, 5):
        if not uploaded:
            try:
                upload_response = requests.put(upload['s3UploadUrl'],
                                               data=upload_in_chunks(
                                                   f['file_path'], chunk_size,
                                                   f['type']),
                                               stream=True,
                                               verify=True)

                if 250 > upload_response.status_code > 199:
                    uploaded = True
                    upload_done_url = paths.get_api_url(
                    ) + 'uploads_s3/' + upload['id'] + '/upload-file/'
                    upload_response = rerequests.post(upload_done_url,
                                                      headers=headers,
                                                      verify=True)
                    # print(upload_response)
                    # print(upload_response.text)
                    tasks_queue.add_task((ui.add_report, (
                        f"Finished file upload: {os.path.basename(f['file_path'])}",
                    )))
                    return True
                else:
                    print(upload_response.text)
                    message = f"Upload failed, retry. File : {f['type']} {os.path.basename(f['file_path'])}"
                    tasks_queue.add_task((ui.add_report, (message, )))

            except Exception as e:
                print(e)
                message = f"Upload failed, retry. File : {f['type']} {os.path.basename(f['file_path'])}"
                tasks_queue.add_task((ui.add_report, (message, )))
                time.sleep(1)

            # confirm single file upload to bkit server

    return False
예제 #29
0
def report_usages():
    '''report the usage of assets to the server.'''
    mt = time.time()
    user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
    api_key = user_preferences.api_key
    sid = get_scene_id()
    headers = utils.get_headers(api_key)
    url = paths.get_api_url() + paths.BLENDERKIT_REPORT_URL

    assets = {}
    asset_obs = []
    scene = bpy.context.scene
    asset_usages = {}

    for ob in scene.collection.objects:
        if ob.get('asset_data') != None:
            asset_obs.append(ob)

    for ob in asset_obs:
        asset_data = ob['asset_data']
        abid = asset_data['asset_base_id']

        if assets.get(abid) is None:
            asset_usages[abid] = {'count': 1}
            assets[abid] = asset_data
        else:
            asset_usages[abid]['count'] += 1

    # brushes
    for b in bpy.data.brushes:
        if b.get('asset_data') != None:
            abid = b['asset_data']['asset_base_id']
            asset_usages[abid] = {'count': 1}
            assets[abid] = b['asset_data']
    # materials
    for ob in scene.collection.objects:
        for ms in ob.material_slots:
            m = ms.material

            if m is not None and m.get('asset_data') is not None:

                abid = m['asset_data']['asset_base_id']
                if assets.get(abid) is None:
                    asset_usages[abid] = {'count': 1}
                    assets[abid] = m['asset_data']
                else:
                    asset_usages[abid]['count'] += 1

    assets_list = []
    assets_reported = scene.get('assets reported', {})

    new_assets_count = 0
    for k in asset_usages.keys():
        if k not in assets_reported.keys():
            data = asset_usages[k]
            list_item = {
                'asset': k,
                'usageCount': data['count'],
                'proximitySet': data.get('proximity', [])
            }
            assets_list.append(list_item)
            new_assets_count += 1
        if k not in assets_reported.keys():
            assets_reported[k] = True

    scene['assets reported'] = assets_reported

    if new_assets_count == 0:
        utils.p('no new assets were added')
        return
    usage_report = {
        'scene': sid,
        'reportType': 'save',
        'assetusageSet': assets_list
    }

    au = scene.get('assets used', {})
    ad = scene.get('assets deleted', {})

    ak = assets.keys()
    for k in au.keys():
        if k not in ak:
            ad[k] = au[k]
        else:
            if k in ad:
                ad.pop(k)

    # scene['assets used'] = {}
    for k in ak:  # rewrite assets used.
        scene['assets used'][k] = assets[k]

    ###########check ratings herer too:
    scene['assets rated'] = scene.get('assets rated', {})
    for k in assets.keys():
        scene['assets rated'][k] = scene['assets rated'].get(k, False)
    thread = threading.Thread(target=utils.requests_post_thread,
                              args=(url, usage_report, headers))
    thread.start()
    mt = time.time() - mt
    print('report generation:                ', mt)
예제 #30
0
def upload_files(filepath, upload_data, files):
    headers = utils.get_headers(upload_data['token'])

    version_id = upload_data['id']
    uploaded_all = True
    for f in files:
        bg_blender.progress('uploading %s' % f['type'])
        upload_info = {
            'assetId': version_id,
            'fileType': f['type'],
            'fileIndex': f['index'],
            'originalFilename': os.path.basename(f['file_path'])
        }
        upload_create_url = paths.get_api_url() + 'uploads/'
        upload = requests.post(upload_create_url,
                               json=upload_info,
                               headers=headers,
                               verify=True)
        upload = upload.json()

        # upheaders = {
        #     "accept": "application/json",
        #     "Authorization": "Bearer %s" % upload_data['token'],
        #     "Content-Type": "multipart/form-data",
        #     "Content-Disposition": 'form-data; name="file"; filename=%s' % f['file_path']
        #
        # }
        chunk_size = 1024 * 256

        # file gets uploaded here:
        uploaded = False
        # s3 upload is now the only option
        for a in range(0, 5):
            if not uploaded:
                try:
                    upload_response = requests.put(upload['s3UploadUrl'],
                                                   data=upload_in_chunks(
                                                       f['file_path'],
                                                       chunk_size, f['type']),
                                                   stream=True,
                                                   verify=True)

                    if upload_response.status_code == 200:
                        uploaded = True
                    else:
                        bg_blender.progress(f'Upload failed, retry. {a}')
                except Exception as e:
                    bg_blender.progress('Upload %s failed, retrying' %
                                        f['type'])
                    time.sleep(1)

                # confirm single file upload to bkit server
                upload_done_url = paths.get_api_url(
                ) + 'uploads_s3/' + upload['id'] + '/upload-file/'
                upload_response = requests.post(upload_done_url,
                                                headers=headers,
                                                verify=True)
        if not uploaded:
            uploaded_all = False
        bg_blender.progress('finished uploading')

    return uploaded_all