Esempio n. 1
0
    def getSubjectExternalRecords(self, subject):
        """Return all external records for a given subject on this Protocol."""

        er_rh = ServiceClient.get_rh_for(
            record_type=ServiceClient.EXTERNAL_RECORD)

        erl_rh = ServiceClient.get_rh_for(
            record_type=ServiceClient.EXTERNAL_RECORD_LABEL)

        labels = cache.get('ehb_labels')

        if not labels:

            labels = erl_rh.query()
            cache.set('ehb_labels', labels)

            if hasattr(cache, 'persist'):
                cache.persist('ehb_labels')

        try:
            pds_records = er_rh.get(external_system_url=self.data_source.url,
                                    path=self.path,
                                    subject_id=subject['id'])

        except PageNotFound:
            return []

        return RecordUtils.serialize_external_records(self, pds_records,
                                                      labels)
Esempio n. 2
0
    def test_persist(self):
        """Test the persist cache operation"""
        cache.set("foo", "bar", timeout=20)
        cache.persist("foo")

        ttl = cache.ttl("foo")
        self.assertIsNone(ttl)
Esempio n. 3
0
 def post(self, req, app_uuid):
     form = RegionForm(req.POST)
     if form.is_valid():
         key = utils.REDIS_REGION % (req.user.uuid, app_uuid, utils.UUID())
         cache.set(key, json.dumps(form.cleaned_data))
         cache.persist(key)
         return JsonResponse({"success": True, "msg": u"区域创建成功成功!"})
     return JsonResponse({"success": False, "msg": form.errors})
    def handle(self, *args, **options):
        erl_rh = ServiceClient.get_rh_for(
            record_type=ServiceClient.EXTERNAL_RECORD_LABEL)
        labels = erl_rh.query()
        cache.set('ehb_labels', labels)
        if hasattr(cache, 'persist'):
            cache.persist('ehb_labels')

        print("caching of ehb_labels complete")
Esempio n. 5
0
        def get_and_cache_completion_codes(self,
                                           cache_data={},
                                           subject_records={}):
            completion_codes = {}
            if driver.form_names:  # nonlongitudinal studies
                completion_fields = construct_field_names(driver.form_names)
                record_set = driver.get(
                    _format=driver.FORMAT_JSON,
                    records=[record_name],
                    rawResponse=True,
                    fields=completion_fields).read().strip()
                record_set = driver.raw_to_json(record_set)
                # iterate through the record set to find the completion field
                for r in record_set:
                    completion_codes = assign_form_statuses(
                        driver, r, completion_fields)

            else:  # longitudinal studies
                # must specify field study_id for redcap api to return study_id and event name
                field_names = [driver.record_id_field_name]
                completion_fields = construct_field_names(
                    list(driver.form_data.keys()))
                field_names += completion_fields
                temp = driver.get(_format=driver.FORMAT_JSON,
                                  rawResponse=True,
                                  records=[record_name],
                                  fields=field_names).read().strip()
                record_set = driver.raw_to_json(temp)
                record_set = json.loads(
                    json.dumps(record_set)
                )  # have to reload json in order for the field redcap_event_name to be read

                first_event = True
                for r in record_set:  # iterate through the record set to find the completion field
                    redcap_eventname = r['redcap_event_name']
                    if redcap_eventname in driver.unique_event_names:
                        event_index = driver.unique_event_names.index(
                            redcap_eventname)
                        if first_event:
                            completion_codes = assign_form_statuses(
                                driver, r, completion_fields, event_index)
                            first_event = False
                        else:
                            # merge the 2 dictionaries
                            completion_codes = {
                                **completion_codes,
                                **assign_form_statuses(driver, r, completion_fields, event_index)
                            }

            subject_records[
                record_id] = completion_codes  # {recordid: {form_spec: completion_code, form_spec: completion_code}}
            cache_data[
                subject_id] = subject_records  # {subjectid: {recordid: {form_spec: completion_code, form_spec: completion_code}}}
            cache.set(cache_key, cache_data,
                      timeout=172800)  # cache key expires after 2 days
            cache.persist(cache_key)
            return completion_codes
Esempio n. 6
0
 def handle(self, *args, **options):
     m_mode = cache.get('maintenance_mode')
     if m_mode:
         cache.delete('maintenance_mode')
         print('Maintenance Mode Off')
     else:
         cache.set('maintenance_mode', 1)
         cache.persist('maintenance_mode')
         print('Maintenance Mode On')
Esempio n. 7
0
    def write(self, key, value):
        try:
            cache.set(key, value)
            cache.persist(key)
            print("key is:", key)
            print("value is:", value)

        except Exception as e:
            print(e)
Esempio n. 8
0
def fitting(request):
    x = demjson.decode(request.body)
    tt_mod, obs_mod = [float(s) for s in request.session['tt']
                       ], [float(l) for l in request.session['obs']]
    fits, instrument, my_phases, state = x['myReducer5'][0], x['myReducer3'][
        0], x['myReducer4']['Phases'], str(x['myReducer4'])
    my_cells, real_sgs, atomLists = makeCellModel(my_phases)
    phases = makePhases(fits)
    if len(fits) != 0:
        backg = H.LinSpline(None)
        u, v, w, wavelength = float(instrument['u']), float(
            instrument['v']), float(instrument['w']), float(
                instrument['wavelength'])
        try:
            steps = 'steps = ' + str(fits['steps'])
            burn = 'burn = ' + str(fits['burn'])
            num_burn = int(fits['burn'])
            num_steps = int(fits['steps'])
        except:
            burn = 'burn = 0'
            num_burn = 0
            steps = 'steps = 5'
            num_steps = 5
        u_pm, v_pm, w_pm, zero_pm, eta_pm, scale_pm = tryParams(phases)
        cell_hkl = makeModelCell(my_cells[0], real_sgs, phases)
        m = makeModel(tt_mod, obs_mod, backg, u, v, w, wavelength, real_sgs,
                      cell_hkl, atomLists, instrument, u_pm, v_pm, w_pm,
                      zero_pm, eta_pm, scale_pm, phases)
        key = hashlib.sha1(str(datetime.datetime.now())).hexdigest()[:5]
        if not os.path.exists('/tmp/bland/store_' + key):
            os.makedirs('/tmp/bland/store_' + key)
        with open('/tmp/bland/store_' + key + '/out.txt', 'w') as fp:
            fp.write('Starting...')
        thread = threading.Thread(target=fitter,
                                  args=(
                                      m,
                                      steps,
                                      num_steps,
                                      burn,
                                      num_burn,
                                      key,
                                  ))
        thread.start()
        _key, _value = 'bland.views.fitting', (state +
                                               str(datetime.datetime.now()))
        token = salted_hmac(_key, _value).hexdigest()[:10]
        cache.set(token, key)
        cache.persist(token)
        while (1):
            with open('/tmp/bland/store_' + key + '/out.txt', 'r') as f:
                first_line = f.readline()
            if (first_line != 'Starting...'):
                break
            else:
                time.sleep(5)
        return HttpResponse(token)
Esempio n. 9
0
def getUserTwittsCount(user):
    key = str(user.id) + "_twitts_count"
    if cache.has_key(key):
        print("returing cache")
        return cache.get(key)
    else:
        print("creating cache")
        counts = TwitterPost.objects.all().filter(user=user).count()
        cache.set(key, counts, timeout=22)
        cache.persist(key)
        return counts
Esempio n. 10
0
def search_sysuser(user_id=None):
    user = cache.get('sysuser:'******'从缓存中获取后台{}用户,获取到的id为:{}'.format(user_id, user))

    if not user:
        logger.info('没有查询到缓存数据,从数据库中提取并永久缓存')
        user = SysUser.objects.filter(user_id=user_id).first()
        cache.set('sysuser:'******'sysuser:' + user_id)

    return user
Esempio n. 11
0
def resume_isread(uid, resume_id):
    use_isread = cache.get('isread:' + uid + ':' + resume_id)
    if not use_isread:
        list_obj = HrUserResumeRead.objects.filter(
            uid=uid, resume_id=resume_id).count()
        if list_obj > 0:
            cache.set('isread:' + uid + ':' + resume_id, True)
            cache.persist('isread:' + uid + ':' + resume_id)
            return True
        else:
            return False
    return use_isread
Esempio n. 12
0
def safe_set_cache(key, value):
    ''' Just kind '''
    logger = logging.getLogger('django_info')
    try:
        cache.set(key, value)
        cache.persist(key)
        logger.info('[SUCCESS] Saving in redis key:{}, value:{}'.format(
            key, value))
    except Exception as e:
        logger.error('[ERROR SET variables on Redis]: {}, type:{}'.format(
            e, type(e)))
        raise e
Esempio n. 13
0
def CacheCode():
    # 从redis中获取code对象
    code_obj = cache.get('code_obj')
    logger.debug('从redis中获取到的code_obj对象为:{}'.format(code_obj))
    if not code_obj:
        logger.info('没有从redis中获取到code对象,重新去数据库中拿一份')
        try:
            code_obj = SysCodes.objects.filter(status='1')
            cache.set('code_obj', code_obj)
            cache.persist('code_obj')
        except Exception as e:
            logger.error('code码这出现错误信息:{}'.format(e))
    return code_obj
Esempio n. 14
0
 def update_cache(self, cache_key, subject_id, record_id):
     cache_data = cache.get(cache_key)
     try:
         if cache_data:
             if subject_id in cache_data:
                 subject_data = cache_data[subject_id]
                 if record_id in subject_data:
                     del subject_data[record_id]
                 cache_data[subject_id] = subject_data
             cache.set(cache_key, cache_data,
                       timeout=172800)  # cache key expires after 2 days
             cache.persist(cache_key)
     except:
         raise Exception('Record form was not updated')
Esempio n. 15
0
def unpack(request):
    username = request.POST['name']
    conn = get_redis_connection()
    treasure = int(conn.hget(username, 'treasure'))
    if cache.ttl(username):
        time = int(cache.ttl(username))
        if 7200 <= time < 9900:
            treasure += 100
        elif 0 < time:
            treasure += 200
        elif time == 0:
            treasure += 300
            cache.persist(username)
    conn.hmset(username, {'treasure': treasure})
    return HttpResponse('领取成功!')
Esempio n. 16
0
def search_criteria(request):
    if request.method == 'GET':
        codes = CacheCode().filter(status='1').all()
        logger.info('获取codes对应码')
        redis_codes = cache.get('codes')

        if not redis_codes:
            logger.info('从redis中读取codes信息失败,需要添加缓存信息')
        else:
            logger.info('从redis中读取codes信息成功,直接返回数据')
            # 准备给前端返回的数据信息
            return JsonResponse({
                'status': 200,
                'msg': '已找到字典数据',
                'data': redis_codes
            })

        # 设置全量数据列表
        all_list = []
        # 设置key种类列表
        list = []

        logger.info('开始从SQL中读取codes表,马上开始整理')
        # 设置字典信息的key值
        for k, i in enumerate(codes):
            if i.dtype not in list:
                list.append(i.dtype)
                all_list.append({'dtype': i.dtype, 'sublist': []})

        for k, i in enumerate(codes):
            for num, det in enumerate(all_list):
                if i.dtype == det['dtype']:
                    if not i.name == '未知':
                        det['sublist'].append({'name': i.name, 'code': i.code})

        logger.info('codes表整理完成')

        if not redis_codes:
            logger.info('确定redis中没有codes码,开始将SQL中整理好的codes码载入redis')
            cache.set('codes', all_list)

            logger.info('设定当前codes码永不过期')
            cache.persist('codes')

        # 准备给前端返回的数据信息
        response_data = {'status': 200, 'msg': '已找到字典数据', 'data': all_list}
        logger.info('字典信息已返回')
    return JsonResponse(response_data)
Esempio n. 17
0
def get_permission_list(user):
    '''
    获取权限列表,可用redis存取
    '''
    if user.is_superuser:
        perms_list = ['admin']
    else:
        perms = Permission.objects.none()
        roles = user.roles.all()
        if roles:
            for i in roles:
                perms = perms | i.perms.all()
        perms_list = perms.values_list('method', flat=True)
        perms_list = list(set(perms_list))
    cache.set(user.username, perms_list)
    cache.persist(user.username)
    return perms_list
Esempio n. 18
0
    def getSubjectExternalRecords(self, subject):
        '''
        Return all external records for a given subject on this Protocol
        '''
        er_rh = ServiceClient.get_rh_for(record_type=ServiceClient.EXTERNAL_RECORD)
        erl_rh = ServiceClient.get_rh_for(record_type=ServiceClient.EXTERNAL_RECORD_LABEL)
        labels = cache.get('ehb_labels')
        if not labels:
            labels = erl_rh.query()
            cache.set('ehb_labels', labels)
            if hasattr(cache, 'persist'):
                cache.persist('ehb_labels')
        try:
            pds_records = er_rh.get(
                external_system_url=self.data_source.url, path=self.path, subject_id=subject['id'])
        except PageNotFound:
            return []

        return RecordUtils.serialize_external_records(self, pds_records, labels)
Esempio n. 19
0
def do_user_connect_document(user_id: int, document_id: int):
    key_doc = key_duser(document_id)

    document = fetch_document_with_lock(user_id, document_id)
    title = document.title
    contributors = fetch_contributors(document)

    with cache.lock('doclock' + str(document_id)):
        connected_users = cache.get_or_set(key_doc, '[]')
        cache.persist("key_doc")
        connected_users = json.loads(connected_users)
        if not str(user_id) in connected_users:
            connected_users.append(str(user_id))
        connected = json.dumps(connected_users)

        cache.set(key_doc, connected, timeout=None)

    return (get_latest_version_rid(document_id), title, contributors,
            connected_users)
Esempio n. 20
0
 def update_cache(self):
     subs = json.loads(self.cached_data)
     for sub in subs:
         if sub['id'] == int(self.subject.id):
             er = self.get_external_record(record_id=self.record_id)
             label = self.get_label(er)
             exRec = json.loads(er.json_from_identity(er))
             exRec['pds'] = self.pds.id
             exRec['label_id'] = label['id']
             if label['label'] == '':
                 label['label'] = 'Record'
             exRec['label_desc'] = label['label']
             if exRec['external_system'] == 3:
                 sub['external_ids'].append(exRec)
             sub['external_records'].append(exRec)
     cache_key = 'protocol{0}_sub_data'.format(self.pds.protocol.id)
     cache.set(cache_key, json.dumps(subs))
     cache.persist(cache_key)
     self.check_cache()
Esempio n. 21
0
 def update_cache(self):
     subs = json.loads(self.cached_data)
     for sub in subs:
         if sub['id'] == int(self.subject.id):
             er = self.get_external_record(record_id=self.record_id)
             context = {"record": er}
             label = self.get_label(context)
             exRec = json.loads(er.json_from_identity(er))
             exRec['pds'] = self.pds.id
             exRec['label_id'] = label['id']
             if label['label'] == '':
                 label['label'] = 'Record'
             exRec['label_desc'] = label['label']
             if exRec['external_system'] == 3:
                 sub['external_ids'].append(exRec)
             sub['external_records'].append(exRec)
     cache_key = 'protocol{0}_sub_data'.format(self.pds.protocol.id)
     cache.set(cache_key, json.dumps(subs))
     cache.persist(cache_key)
     self.check_cache()
Esempio n. 22
0
def fitting(request):
    x = demjson.decode(request.body)
    tt_mod, obs_mod = [float(s) for s in request.session['tt']], [float(l) for l in request.session['obs']]   
    fits, instrument, my_phases, state = x['myReducer5'][0], x['myReducer3'][0], x['myReducer4']['Phases'], str(x['myReducer4'])
    my_cells, real_sgs, atomLists = makeCellModel(my_phases)
    phases = makePhases(fits)
    if len(fits) != 0:
	backg = H.LinSpline(None)
	u, v, w, wavelength = float(instrument['u']), float(instrument['v']), float(instrument['w']), float(instrument['wavelength'])
	try:
	    steps = 'steps = ' + str(fits['steps'])
	    burn = 'burn = ' + str(fits['burn'])
	    num_burn = int(fits['burn'])
	    num_steps = int(fits['steps'])
	except:
	    burn = 'burn = 0'
	    num_burn = 0
	    steps = 'steps = 5'
	    num_steps = 5
	u_pm, v_pm, w_pm, zero_pm, eta_pm, scale_pm = tryParams(phases)
	cell_hkl = makeModelCell(my_cells[0], real_sgs, phases)
	m = makeModel(tt_mod, obs_mod, backg, u, v, w, wavelength, real_sgs, cell_hkl, atomLists, instrument, u_pm, v_pm, w_pm, zero_pm, eta_pm, scale_pm, phases)	
	key = hashlib.sha1(str(datetime.datetime.now())).hexdigest()[:5]
	if not os.path.exists('/tmp/bland/store_' + key):
		os.makedirs('/tmp/bland/store_' + key)    	
	with open('/tmp/bland/store_' + key + '/out.txt', 'w') as fp:
	    fp.write('Starting...')
	thread = threading.Thread(target=fitter, args=(m, steps, num_steps, burn, num_burn, key,))
	thread.start()
	_key, _value = 'bland.views.fitting', (state + str(datetime.datetime.now()))
	token = salted_hmac(_key, _value).hexdigest()[:10]
	cache.set(token, key)
	cache.persist(token)
	while(1):
	    with open('/tmp/bland/store_' + key + '/out.txt', 'r') as f:
		first_line = f.readline()
	    if(first_line != 'Starting...'):
		break
	    else:
		time.sleep(5)
	return HttpResponse(token)
Esempio n. 23
0
def userPlan(request):  
  if request.method == 'POST':                
    cache.set("userGoal", request.body)    
    cache.persist("userGoal")
    serializer = UserPlanSerializer(cache.get("userGoal"))
    
    ##save to DB from redis at a later time
    serializerSave = UserPlanSerializer(data = request.data)
    if serializerSave.is_valid():
      serializerSave.save()
      return Response(serializer.data, status=status.HTTP_201_CREATED)    
    else:
      return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
    return Response(serializer.data)

  elif request.method == 'GET':
    if cache.get("userGoal"):      
      gotUserGoal = cache.get("userGoal")            
      print(gotUserGoal, "this is not the goal we had in mind")
      serializer = UserPlanSerializer(gotUserGoal)
      return Response(gotUserGoal)
  return Response("no cache bro")      
Esempio n. 24
0
def test_DJredis(req):
    '''
    use Django-redis
    :param req: 
    :return: 
    '''
    if req.method == "GET":
        new_keyvalue = cache.set("name1", '1111', timeout=20)
        cache.set("astring", '1111', timeout=20)  # timeout 为 None 时 永不更新
        # time_out = cache.ttl("astring")  # 获取超时时间
        cache.persist("astring")  # 设置永不更新
        # time_out = cache.ttl("astring")
        cache.expire("astring", timeout=5)  # 重新设置超时时间
        time_out = cache.ttl("astring")
        # django-redis 支持使用全局通配符的方式来检索或者删除键.
        keys = cache.keys("astring")  # 返回一个list,所有匹配的值
        delete_number = cache.delete_pattern(
            "name1")  # 删除键和值,并且返回删除数量  当用通配符号时可能不止删除一个
        change_key = cache.set(
            "astring", "value1",
            nx=True)  # 创建新的key 和 value,当且仅当 key 不存在,返回值为是否创建成功,None代表键值已经存在

        r = get_redis_connection(
            "default"
        )  # Use the name you have defined for Redis in settings.CACHES
        connection_pool = r.connection_pool
        print("Created connections so far: %d" %
              connection_pool._created_connections)  # 查看当前连接数

        delete_number = cache.delete_pattern("*")

        # 至此 ,Django_redis 的全部常用功能没了
        return render_to_response('Redis.html', {
            'time_out': time_out,
            'keys': keys,
            'delete': delete_number
        })
Esempio n. 25
0
    def cache_records(self, protocol_id):
        protocol_id = protocol_id[0]
        if protocol_id == 'all':
            protocols = Protocol.objects.all()
        else:
            protocols = Protocol.objects.filter(id=int(protocol_id)).all()
        er_label_rh = ServiceClient.get_rh_for(record_type=ServiceClient.EXTERNAL_RECORD_LABEL)
        lbls = er_label_rh.query()
        print('Caching {0} protocol(s)...'.format(len(protocols)))
        for protocol in protocols:
            print('Caching {}'.format(protocol))
            subjects = protocol.getSubjects()
            organizations = protocol.organizations.all()
            if subjects:
                subs = [eHBSubjectSerializer(sub).data for sub in subjects]
            else:
                continue
            ehb_orgs = []
            # We can't rely on Ids being consistent across apps so we must
            # append the name here for display downstream.
            for o in organizations:
                ehb_orgs.append(o.getEhbServiceInstance())
            # Check if the protocol has external IDs configured. If so retrieve them
            manageExternalIDs = False

            protocoldatasources = protocol.getProtocolDataSources()

            for pds in protocoldatasources:
                if pds.driver == 3:
                    ExIdSource = pds
                    manageExternalIDs = True

            if manageExternalIDs:
                try:
                    config = json.loads(ExIdSource.driver_configuration)
                    if 'sort_on' in list(config.keys()):
                        # er_label_rh = ServiceClient.get_rh_for(record_type=ServiceClient.EXTERNAL_RECORD_LABEL)
                        # lbl = er_label_rh.get(id=config['sort_on'])
                        lbl = ''
                        addl_id_column = lbl
                except:
                    raise
                    pass

            for sub in subs:
                sub['external_records'] = []
                sub['external_ids'] = []
                sub['organization'] = sub['organization_id']
                sub.pop('organization_id')
                for pds in protocoldatasources:
                    sub['external_records'].extend(self.getExternalRecords(pds, sub, lbls))
                if manageExternalIDs:
                    # Break out external ids into a separate object for ease of use
                    for record in sub['external_records']:
                        if record['external_system'] == 3:
                            sub['external_ids'].append(record)
                for ehb_org in ehb_orgs:
                    if sub['organization'] == ehb_org.id:
                        sub['organization_name'] = ehb_org.name
            cache_key = 'protocol{0}_sub_data'.format(protocol.id)
            cache.set(cache_key, json.dumps(subs))
            cache.persist(cache_key)
Esempio n. 26
0
    def cache_records(self, protocol_id):
        """Cache subject records from a given protocol locally."""

        # TODO: consider passing in a list if we ever need to cache a small
        # number of protocols at one time. look at why only using first item in
        # list.
        protocol_id = protocol_id[0]

        if protocol_id == 'all':
            # Special "all" protocol gets all protocols.
            protocols = Protocol.objects.all()
        else:
            protocols = Protocol.objects.filter(id=int(protocol_id)).all()

        # Get external record label request handler.
        er_label_rh = ServiceClient.get_rh_for(
            record_type=ServiceClient.EXTERNAL_RECORD_LABEL)

        # Retrieve the actual external record labels.
        lbls = er_label_rh.query()

        # Tell user how many protocols are being cached.
        print('Caching {0} protocol(s)...'.format(len(protocols)))

        for protocol in protocols:

            # Tell user which protocol is being cached.
            print('Caching {}'.format(protocol))

            # Get list of subjects and organizations in the protocol.
            subjects = protocol.getSubjects()
            organizations = protocol.organizations.all()

            # Serialize retrieved subjects or continue if there are none.
            if subjects:
                subs = [eHBSubjectSerializer(sub).data for sub in subjects]
            else:
                continue

            ehb_orgs = []

            # We can't rely on Ids being consistent across apps so we must
            # append the name here for display downstream.
            for o in organizations:
                ehb_orgs.append(o.getEhbServiceInstance())

            # TODO: Explain this block, down to the `for sub in subs` loop.
            # Check if the protocol has external IDs configured.
            # If so, retrieve them.
            manageExternalIDs = False
            protocoldatasources = protocol.getProtocolDataSources()

            for pds in protocoldatasources:
                if pds.driver == 3:
                    ExIdSource = pds
                    manageExternalIDs = True

            if manageExternalIDs:
                try:
                    config = json.loads(ExIdSource.driver_configuration)
                    if 'sort_on' in list(config.keys()):
                        # er_label_rh = ServiceClient.get_rh_for(
                        #     record_type=ServiceClient.EXTERNAL_RECORD_LABEL)
                        # lbl = er_label_rh.get(id=config['sort_on'])
                        lbl = ''
                        addl_id_column = lbl  # noqa
                except:
                    raise
                    pass

            # Transform subjects for ease of use.
            for sub in subs:

                # Initialize new fields.
                sub['external_records'] = []
                sub['external_ids'] = []
                sub['organization'] = sub['organization_id']
                sub.pop('organization_id')

                # Add external records from all data sources.
                for pds in protocoldatasources:
                    try:
                        sub['external_records'].extend(
                            self.getExternalRecords(pds, sub, lbls))
                    except:
                        print("there was an error processing external records")
                        print("subject DB id:")
                        print(sub['id'])
                        print("protocol data source:")
                        print(pds)
                        pass

                # TODO: Explain this block.
                if manageExternalIDs:
                    # Break out external ids into a separate object for ease of
                    # use.
                    for record in sub['external_records']:
                        if record['external_system'] == 3:
                            try:
                                sub['external_ids'].append(record)
                            except:
                                print(
                                    "an error occured getting external records"
                                )
                                print(sub['external_ids'])

                # Add organization name to subject record for display, since
                # organization IDs can vary across apps. (?)
                for ehb_org in ehb_orgs:
                    if sub['organization'] == ehb_org.id:
                        sub['organization_name'] = ehb_org.name

            # Cache the array of subjects.
            cache_key = 'protocol{0}_sub_data'.format(protocol.id)
            cache.set(cache_key, json.dumps(subs))
            cache.persist(cache_key)
Esempio n. 27
0
    def test_persist(self):
        cache.set("foo", "bar", timeout=20)
        cache.persist("foo")

        ttl = cache.ttl("foo")
        assert ttl is None
Esempio n. 28
0
 def persist(cls, key):
     return cache.persist(cls._getkey(key))
Esempio n. 29
0
    def cache_records(self, protocol_id):
        protocol_id = protocol_id[0]
        if protocol_id == 'all':
            protocols = Protocol.objects.all()
        else:
            protocols = Protocol.objects.filter(id=int(protocol_id)).all()
        er_label_rh = ServiceClient.get_rh_for(
            record_type=ServiceClient.EXTERNAL_RECORD_LABEL)
        lbls = er_label_rh.query()
        print('Caching {0} protocol(s)...'.format(len(protocols)))
        for protocol in protocols:
            print('Caching {}'.format(protocol))
            subjects = protocol.getSubjects()
            organizations = protocol.organizations.all()
            if subjects:
                subs = [eHBSubjectSerializer(sub).data for sub in subjects]
            else:
                continue
            ehb_orgs = []
            # We can't rely on Ids being consistent across apps so we must
            # append the name here for display downstream.
            for o in organizations:
                ehb_orgs.append(o.getEhbServiceInstance())
            # Check if the protocol has external IDs configured. If so retrieve them
            manageExternalIDs = False

            protocoldatasources = protocol.getProtocolDataSources()

            for pds in protocoldatasources:
                if pds.driver == 3:
                    ExIdSource = pds
                    manageExternalIDs = True

            if manageExternalIDs:
                try:
                    config = json.loads(ExIdSource.driver_configuration)
                    if 'sort_on' in list(config.keys()):
                        # er_label_rh = ServiceClient.get_rh_for(record_type=ServiceClient.EXTERNAL_RECORD_LABEL)
                        # lbl = er_label_rh.get(id=config['sort_on'])
                        lbl = ''
                        addl_id_column = lbl
                except:
                    raise
                    pass

            for sub in subs:
                sub['external_records'] = []
                sub['external_ids'] = []
                sub['organization'] = sub['organization_id']
                sub.pop('organization_id')
                for pds in protocoldatasources:
                    try:
                        sub['external_records'].extend(
                            self.getExternalRecords(pds, sub, lbls))
                    except:
                        print("there was an error processing external records")
                        print("subject DB id:")
                        print(sub['id'])
                        print("protocol data source:")
                        print(pds)
                        pass

                if manageExternalIDs:
                    # Break out external ids into a separate object for ease of use
                    for record in sub['external_records']:
                        if record['external_system'] == 3:
                            try:
                                sub['external_ids'].append(record)
                            except:
                                print(
                                    "an error occured getting external records"
                                )
                                print(sub['external_ids'])
                for ehb_org in ehb_orgs:
                    if sub['organization'] == ehb_org.id:
                        sub['organization_name'] = ehb_org.name
            cache_key = 'protocol{0}_sub_data'.format(protocol.id)
            cache.set(cache_key, json.dumps(subs))
            cache.persist(cache_key)