def updateVideo(cls, focus=True): if focus: task_name = "生成每日视频待爬链接" else: task_name = "生成保守观测视频待爬链接" logger.info(task_name) doc_filter = {'focus': focus} videoCollection = MongoDbDao.getCollection('video') total = videoCollection.count_documents(doc_filter) cursor = videoCollection.find(doc_filter, {"aid": 1}).batch_size(200) if total == 0: return countNum = 0 aid_list = '' progressTask = ProgressTask(task_name, total, collection=cls.__tracerCollection) for each_doc in cursor: aid_list += str(each_doc['aid']) + ',' countNum += 1 logger.info(each_doc['aid']) if countNum == 50: progressTask.current_value += countNum cls.pushVideoRedisUrlToRedis(aid_list[:-1]) aid_list = '' countNum = 0 progressTask.current_value += countNum cls.pushVideoRedisUrlToRedis(aid_list[:-1])
def insert_data(data, url): try: conn = pymysql.connect(url, user=rds_user_name, passwd=rds_password, db=db_name, connect_timeout=5) rows = [] for entry in data: row = () values = [] for attribute, value in entry.items(): values.append(value) row = tuple(values) rows.append(row) cursor = conn.cursor() query = "INSERT INTO airquality (date, time,co, tin_oxide, metanic_hydro, benzene_conc, titania, nox," \ "tungsten_oxide_nox,average_no2, tungsten_oxide_no2, indium_oxide, temp, relative_humidity," \ "absolute_humidity) VALUES " + ",".join( "(%s, %s, %s,%s, %s, %s,%s, %s, %s,%s, %s, %s,%s, %s, %s)" for _ in rows) flattened_values = [item for sublist in rows for item in sublist] cursor.execute(query, flattened_values) conn.commit() except Exception as e: logger.error( "ERROR: Unexpected error: Could not connect to MySql instance.") logger.error(e) sys.exit() logger.info("SUCCESS: Connection to RDS mysql instance succeeded")
def send_mail_verification(*args, **kwargs): logger.info("Sent feedback email") subject = kwargs['subject'] message = kwargs['message'] return send_mail(subject, message, from_email='*****@*****.**', recipient_list=['*****@*****.**'])
def __autoCrawlTask(cls): task_name = "自动爬虫计划调度服务" logger.info(task_name) ExistsTask(task_name, update_frequency=60, collection=MongoDbDao.getCollection('tracer')) while True: schedule.run_pending() sleep(60)
def create_patient_and_diagnosis(): user = User.objects.create() user.username = '******' user.email = '*****@*****.**' user.password = make_password('1qazxsw2') user.is_patient = True user.is_doctor = False user.save() patient = Patient.objects.create(user=user) patient.name = 'patient' patient.country = 'Germany' patient.address = 'Lohmühlenstraße 65, 12435 Berlin, Germany' patient.zipcode = '12435' patient.phone_number = '+49 30 12085961' patient.image = 'users/no-img.svg' patient.request = "IT" patient.save() logger.info("{} doctor created.".format(patient)) diagnosis_first = Diagnosis.objects.create( doctor=Doctor.objects.get(user_id=user.id - 1), patient=Patient.objects.get(user_id=user.id)) diagnosis_first.title = 'Preventive Medicine' diagnosis_first.description = 'Free to talk to me' diagnosis_first.video_link = 'https://zoom.us/codeuniversity/1234567890' diagnosis_first.video_password = '******' diagnosis_first.type = DiagnosisType.PREVENTIVE diagnosis_first.image = 'diagnoses/no-img.jpg' diagnosis_first.status = RegisterStatus.REGISTERED diagnosis_first.date = '2020-12-23' diagnosis_first.save() diagnosis_second = Diagnosis.objects.create( doctor=Doctor.objects.get(user_id=user.id - 1), patient=Patient.objects.get(user_id=user.id)) diagnosis_second.title = 'Mental Illness Baster SS' diagnosis_second.description = 'Free to talk to me' diagnosis_second.video_link = 'https://zoom.us/codeuniversity/1234567890' diagnosis_second.video_password = '******' diagnosis_second.type = DiagnosisType.MENTAL diagnosis_second.image = 'diagnoses/mental.jpg' diagnosis_second.status = RegisterStatus.COMPLETED diagnosis_second.date = '2020-12-23' diagnosis_second.save() summary = Summary.objects.create(diagnosis=diagnosis_second) summary.comment = 'Take care yourself' summary.save() return user
def updateAutoAddVideo(cls): task_name = "生成作者最新发布的视频的待爬链接" logger.info(task_name) authorCollection = MongoDbDao.getCollection('author') doc_filter = {'$or': [{'focus': True}, {'forceFocus': True}]} total = authorCollection.count_documents(doc_filter) authorCollectionResult = authorCollection.find(doc_filter, {'mid': 1}) if total != 0: progressTask = ProgressTask(task_name, total, collection=cls.__tracerCollection) for each_doc in authorCollectionResult: progressTask.current_value += 1 url = 'https://space.bilibili.com/ajax/member/getSubmitVideos?mid={}&pagesize=10&page=1&order=pubdate'.format( each_doc['mid']) cls.__redisConnection.rpush("videoAutoAdd:start_urls", url)
def retry(self, request, reason, spider): try: p = request.meta['proxy_object'] p.available = True p.rating = p.rating + reason p.get_proxy_state() except AttributeError: logger.error('There is no proxy_object in request') pass retryreq = request.copy() proxy_object = self.proxy_storage.get_proxy() retryreq.meta['proxy_object'] = proxy_object retryreq.meta['proxy'] = proxy_object.address logger.info('Request fails, retrying') return retryreq
def autoCrawlBangumi(cls): task_name = "生成番剧国创待爬链接" logger.info(task_name) progressTask = ProgressTask(task_name, 1, collection=cls.__tracerCollection) urls = [ "https://www.bilibili.com/ranking/bangumi/167/0/7", "https://www.bilibili.com/ranking/bangumi/13/0/7" ] for url in urls: cls.__redisConnection.rpush(cls.__bangumiAndDonghua_key, url) progressTask.current_value += 1
def addTagTask(cls): task_name = "生成待爬标签视频链接" doc_filter = {'tag': {'$exists': False}} total = cls.__videoCollection.find(doc_filter, {"aid": 1}).count() cursor = cls.__videoCollection.find(doc_filter, { "aid": 1 }).batch_size(100) progressTask = ProgressTask(task_name, total, collection=cls.__tracerCollection) url = 'https://www.bilibili.com/video/av{}' for each_video in cursor: progressTask.current_value += 1 aid = each_video['aid'] logger.info("待爬AV号{}".format(aid)) cls.__redisConnection.rpush("tagAdder:start_urls", url.format(aid))
def handle_bar(context, bar_dict): # 开始编写你的主要的算法逻辑 # bar_dict[order_book_id] 可以拿到某个证券的bar信息 # context.portfolio 可以拿到现在的投资组合状态信息 # 使用order_shares(id_or_ins, amount)方法进行落单 # TODO: 开始编写你的算法吧! # 对于选择出来的股票按照平均比例买入: if not context.fired: for stock in context.stocks: order_target_percent(stock, context.average_percent) logger.info("Bought: " + str(context.average_percent) + " % for stock: " + str(stock)) context.fired = True
def updateAuthor(cls): task_name = "生成每日作者待爬链接" logger.info(task_name) coll = MongoDbDao.getCollection('author') filter_dict = { '$or': [{ 'focus': True }, { 'forceFocus': True }] } cursor = coll.find(filter_dict, {"mid": 1}).batch_size(200) total = coll.count_documents(filter_dict) if total != 0: t = ProgressTask(task_name, total, collection=MongoDbDao.getCollection('tracer')) for each_doc in cursor: t.current_value += 1 cls.pushAuthorRedisUrlToRedis(each_doc['mid'])
def crawlOnlineTopListData(cls): task_name = "生成强力追踪待爬链接" logger.info(task_name) response = requests.get(cls.__online_url) data_text = etree.HTML( response.content.decode('utf8')).xpath('//script/text()')[-2] j = json.loads(data_text.lstrip('window.__INITIAL_STATE__=')[:-122]) total = len(j['onlineList']) progressTask = ProgressTask(task_name, total, collection=cls.__tracerCollection) for each_video in j['onlineList']: mid = each_video['owner']['mid'] if mid not in [7584632, 928123]: AuthorService.pushAuthorRedisUrlToRedis(mid) VideoService.pushVideoRedisUrlToRedis(each_video['aid']) progressTask.current_value += 1
def process_item(self, item, spider): full_num = item['number'] item_type = item['type'] if item_type is not 'TM': logger.warning('Doctype of {} is not "TM"'.format(full_num)) raise DropItem # check if cert already exist try: cert = self.session.query(Certificate).filter_by(full_num=full_num, type=item_type)[0] logger.info('Found certificate with {} number. Trying to update fields.'.format(full_num)) self.check_fields(cert, item) except IndexError: logger.info('No certificate with {} number. Trying to create new.'.format(full_num)) num = int(full_num.split('/')[0]) cert = Certificate(num=num, full_num=full_num, os_id=next(self.num_gen), type='TM', b900=num * 100) cert = self.check_fields(cert, item) cert.create_date = cert.update_date self.session.add(cert) finally: return item
def get(self, request, mobile): """ :param request: :param mobile: :return: """ # 接收参数 image_code = request.GET.get('image_code') image_code_id = request.GET.get('image_code_id') # 校验参数 if not all([image_code, image_code_id]): return HttpResponseBadRequest('参数不全') # 创建连接到redis对象 redis_conn = get_redis_connection('code') # 提取图形验证码 redis_text = redis_conn.get('img_%s' % image_code_id) if redis_text is None: # 图形验证码过期或者不存在 return HttpResponseBadRequest('图形验证码失效') # 删除图形验证码,避免恶意测试图形验证码 # try: # redis_conn.delete('img_%s' % image_code_id) # except Exception as e: # logger.error(e) # 对比图形验证码 # image_code_server = image_code_server.decode() # bytes转字符串 if image_code.lower() != redis_text.decode().lower(): # 转小写后比较用lower() return HttpResponseBadRequest('输入的图形验证码有误') # 生成6位短信验证码 sms_code = '%06d' % random.randint(0, 999999) logger.info(sms_code) # 保存短信验证码 redis_conn.setex('sms_%s' % mobile, 300, sms_code) # 发送短信验证码 CCP().send_template_sms(mobile, [sms_code, 5], 1) # # celery异步发送短信验证码 # send_sms_code.delay(mobile, sms_code) # 响应结果 return http.JsonResponse({'code': '0', 'errmsg': '短信发送成功'})
def init(context): # 查询revenue前十名的公司的股票并且他们的pe_ratio在55和60之间。打fundamentals的时候会有auto-complete方便写查询代码。 fundamental_df = get_fundamentals( query(fundamentals.income_statement.revenue, fundamentals.eod_derivative_indicator.pe_ratio). filter(fundamentals.eod_derivative_indicator.pe_ratio > 55).filter( fundamentals.eod_derivative_indicator.pe_ratio < 60).order_by( fundamentals.income_statement.revenue.desc()).limit(10)) # 将查询结果dataframe的fundamental_df存放在context里面以备后面只需: context.fundamental_df = fundamental_df # 实时打印日志看下查询结果,会有我们精心处理的数据表格显示: logger.info(context.fundamental_df) update_universe(context.fundamental_df.columns.values) # 对于每一个股票按照平均现金买入: context.stocks = context.fundamental_df.columns.values stocks_number = len(context.stocks) context.average_percent = 0.99 / stocks_number logger.info("Calculated average percent for each stock is: %f" % context.average_percent) context.fired = False
def spider_idle(self, spider): # 空闲计数 self.idle_count += 1 # 每次触发 spider_idle时,记录下触发时间戳 self.idle_list.append(time.time()) # 获取当前已经连续触发的次数 idle_list_len = len(self.idle_list) # 判断 当前触发时间与上次触发时间 之间的间隔是否大于5秒,如果大于5秒,说明redis 中还有key if idle_list_len > 2 and self.idle_list[-1] - self.idle_list[-2] > 6: self.idle_list = [self.idle_list[-1]] elif idle_list_len > self.idle_number: idle_start_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.idle_list[0])) idle_close_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.idle_list[0])) # 连续触发的次数达到配置次数后关闭爬虫 logger.info( '\n continued idle number exceed {} Times' '\n meet the idle shutdown conditions, will close the reptile operation' '\n idle start time: {}, close spider time: {}'.format( self.idle_number, idle_start_time, idle_close_time)) # 执行关闭爬虫操作 self.crawler.engine.close_spider(spider, 'closespider_pagecount')
def get(self, request, mobile): """ :param request: :param mobile: 手机号码 :return: JSON """ # 3.创建连接到redis的对象 redis_conn = get_redis_connection('verify_code') send_flag = redis_conn.get('send_flag_%s' % mobile) if send_flag: return http.JsonResponse({'code': RETCODE.THROTTLINGERR, 'errmsg': '发送短信验证码过快'}) # 1.接收参数 image_code_client = request.GET.get('image_code') uuid = request.GET.get('image_code_id') # 2.校验参数 if not all([image_code_client, uuid]): return http.JsonResponse({'code': RETCODE.NECESSARYPARAMERR, 'errmsg': '缺少必传参数'}) # 4.提取图形验证码 image_code_server = redis_conn.get('img_%s' % uuid) if image_code_server is None: # 图形验证码过期或者不存在 return http.JsonResponse({'code': RETCODE.IMAGECODEERR, 'errmsg': '图形验证码失效'}) # 5.删除图形验证码,避免恶意侧视图形验证码 try: redis_conn.delete('img_%s' % uuid) except Exception as e: logger.error(e) # 6.对比图形验证码 image_code_server = image_code_server.decode() # bytes转字符串 if image_code_client.lower() != image_code_server.lower(): # 转小写后比较 return http.JsonResponse({'code': RETCODE.IMAGECODEERR, 'errmsg': '输入图形验证码错误'}) # 7. 生成短信验证码:生成6位数验证码 sms_code = '%06d' % random.randint(0, 999999) logger.info(sms_code) print(sms_code) # 8.保存短信验证码,保存到redis中 # 短信验证码有效期, 单位:秒 # SMS_CODE_REDIS_EXPIRES = 300 # 创建管道 pl = redis_conn.pipeline() # 保存到redis中 pl.setex('sms_code_%s' % mobile, const.SEND_SMS_TEPLATE_ID, sms_code) pl.setex('send_flag_%s' % mobile, 60, 1) # 执行管道 pl.execute() # 9.发送短信验证码 # 短信模板 # SMS_CODE_REDIS_EXPIRES // 60 = 5min # SEND_SMS_TEMPLATE_ID = 1 # CCP().send_template_sms(mobile, [sms_code, 5], 1) # 导入异步的包 # from celery_tasks.sms.email import send_sms_code # send_sms_code.delay(mobile, sms_code) # 10.响应结果 return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '发送短信成功'})
def calculateAuthorRank(cls): task_name = "计算作者排名数据" authorCollection = MongoDbDao.getCollection('author') keys = ['cFans', 'cArchive_view', 'cArticle_view'] allCount = authorCollection.count_documents({keys[0]: {'$exists': 1}}) progressTask = ProgressTask( task_name, allCount * len(keys), collection=MongoDbDao.getCollection('tracer')) for each_key in keys: logger.info("开始计算作者{}排名".format(each_key)) authorCollectionResult = authorCollection.find( { each_key: { '$exists': 1 } }, { 'mid': 1, 'rank': 1, each_key: 1 }).batch_size(300).sort(each_key, DESCENDING) if each_key == 'cFans': each_rank = 'fansRank' each_d_rank = 'dFansRank' each_p_rank = 'pFansRank' elif each_key == 'cArchive_view': each_rank = 'archiveViewRank' each_d_rank = 'dArchiveViewRank' each_p_rank = 'pArchiveViewRank' elif each_key == 'cArticle_view': each_rank = 'articleViewRank' each_d_rank = 'dArticleViewRank' each_p_rank = 'pArticleViewRank' iTh = 1 for each_author in authorCollectionResult: progressTask.current_value += 1 logger.info("计算{}排名".format(each_author['mid'])) if each_key in each_author: if 'rank' in each_author: rank = each_author['rank'] if each_rank in each_author['rank']: rank[each_d_rank] = each_author['rank'][ each_rank] - iTh else: rank[each_d_rank] = 0 rank[each_rank] = iTh rank[each_p_rank] = cls.__format_p_rank(iTh, allCount) else: # 初始化 rank = { each_rank: iTh, each_d_rank: 0, each_p_rank: cls.__format_p_rank(iTh, allCount) } if each_author[each_key] == 0: if 'rank' in each_author: rank = each_author['rank'] rank[each_d_rank] = 0 rank[each_rank] = -1 rank[each_p_rank] = -1 else: rank = {each_rank: -1, each_d_rank: 0, each_p_rank: -1} if each_key == 'cArticle_view': rank['updateTime'] = datetime.datetime.now() authorCollection.update_one({'mid': each_author['mid']}, {'$set': { 'rank': rank, }}) iTh += 1 progressTask.current_value = progressTask.total_value logger.info("计算作者排名结束")
def __init__(self, conf): super().__init__(**conf) logger.info('YaMongoPool gets ready')
def spider_opened(self, spider): logger.info( "opened spider %s redis spider Idle, Continuous idle limit: %d", spider.name, self.idle_number)
''' from venv import logger from vehicle.vehicle import vehicle from vehicle.vehicleArrange import arrange import firebase_admin import google from firebase_admin import credentials from firebase_admin import firestore from random import randint from firebase import credentialsFileLocation try: firebase_admin.get_app() logger.info('firebase already intialized.') except ValueError as e: logger.info('firebase not initialized. initialize.') cred = credentials.Certificate(credentialsFileLocation) firebase_admin.initialize_app(cred) db = firestore.client() def extract(carID): doc_ref = db.collection(u'cars').document(carID) try: doc = doc_ref.get()
def product_page_scraper(row_values): prod_varieties = [] prod_gallery = [] prod_id = row_values['prod_id'] xhr_url = 'https://drop.com/api/drops;dropUrl={};isPreview=false;noCache=false;withPrices=true?lang=en-US&returnMeta=true'.format( prod_id) prod_raw = requests.get(xhr_url).text # download the raw json prod_dict = json.loads(prod_raw) # parse it into a dict prod_msrp_price = prod_dict['data']['msrpPrice'] prod_brand = [ x.strip() for x in re.split('\+|x', prod_dict['data']['brand']) ] prod_massdrop_price = prod_dict.get('data', {}).get('currentPrice') prod_category_name = prod_dict['data']['primaryCategoryName'] prod_is_promo = prod_dict['data']['isPromo'] content_dict = prod_dict.get('data', {}).get('description', {}).get('content') for dic in content_dict: if 'Specs' in dic.values(): # replace all instances of <li> or </li> with nothing dic['copy'] = dic['copy'].replace('\n', '') dic['copy'] = dic['copy'].replace('\t', '') soup = BeautifulSoup(dic['copy'], features="html.parser") text = soup.get_text(',') specs_list = [x.strip() for x in text.split(',')] for s in specs_list: # remove blank items from list if s == '': specs_list.remove(s) if prod_msrp_price and prod_massdrop_price is not None: prod_discount = prod_msrp_price - prod_massdrop_price else: prod_discount = 0 for list_item in content_dict: if 'images' in list_item: for image in list_item['images']: prod_gallery.append(image['src']) soup = BeautifulSoup(content_dict[0]['copy'], features="html.parser") prod_description = soup.get_text() prod_recommended_total = row_values['prod_recommended_total'] prod_recommended_yes = row_values['prod_recommended_yes'] if prod_recommended_total != 0: prod_recommended_pc = prod_recommended_yes / prod_recommended_total else: prod_recommended_pc = 0 try: for i in content_dict: if i['layout'] == 'hoverGallery': for j in i['images']: prod_varieties.append(j['title']) except KeyError as error: logger.info(error) row_values.update({ 'prod_msrp_price': prod_msrp_price, 'prod_brand': prod_brand, 'prod_massdrop_price': prod_massdrop_price, 'prod_category_name': prod_category_name, 'prod_is_promo': prod_is_promo, 'prod_discount': prod_discount, 'prod_gallery': prod_gallery, 'prod_description': prod_description, 'prod_recommended_pc': prod_recommended_pc, 'prod_varieties': prod_varieties })
def clean_redis(self): self.__redis.flushdb() # 清空 redis logger.info('clean redis success!') return 0
def spider_closed(self, spider): logger.info( "closed spider %s, idle count %d , Continuous idle count %d", spider.name, self.idle_count, len(self.idle_list))
def check_fields(self, cert, item): if 'country' in item and cert.country != item['country']: cert.country = item['country'] logger.info('"Country" was updated') if 'application_number' in item and cert.application_number != item['application_number']: cert.application_number = item['application_number'] logger.info('"Application" number was updated') if 'valid_until' in item and cert.valid_until != datetime.strptime(item['valid_until'], "%d.%m.%Y").date(): cert.valid_until = datetime.strptime(item['valid_until'], "%d.%m.%Y").date() logger.info('"Valid until" was updated') if 'application_date' in item and \ cert.application_date != datetime.strptime(item['application_date'], "%d.%m.%Y").date(): cert.application_date = datetime.strptime(item['application_date'], "%d.%m.%Y").date() logger.info('"Application date" was updated') if 'registration_date' in item and \ cert.registration_date != datetime.strptime(item['registration_date'], "%d.%m.%Y").date(): cert.registration_date = datetime.strptime(item['registration_date'], "%d.%m.%Y").date() logger.info('"Registration date" was updated to {}'.format(cert.registration_date)) if "status_str" in item: parts = re.split('[:()]', item["status_str"]) if parts[0].strip() == "Статус": status = parts[1].strip() status = status[0].upper() + status[1:] if cert.status != status: cert.status = status logger.info('"Status" was updated') if len(parts) > 2 and "последнее изменение" in parts[2]: status_date = datetime.strptime(parts[3].strip(), "%d.%m.%Y") if cert.status_date != status_date: cert.status_date = status_date logger.info('"Status date" was updated') if 'owner' in item and cert.owner != item['owner']: cert.owner = item['owner'] logger.info('"Owner" was updated') if 'owner_address' in item and cert.owner_address != item['owner_address']: cert.owner_address = item['owner_address'] logger.info('"Owner" was updated') if 'colors' in item and cert.colors != item['colors']: cert.colors = item['colors'] logger.info('"Colours" was updated') if "priority" in item and cert.priority != datetime.strptime(item['priority'], "%d.%m.%Y").date(): cert.priority = datetime.strptime(item['priority'], "%d.%m.%Y").date() logger.info('"Priority" was updated') if "icgs" in item: icgs = self.session.query(ICGS).filter_by(cert=cert.os_id) if icgs.count() > 0: logger.info('ICGS for certificate {} already exists'.format(cert.full_num)) self.session.query(ICGS).filter_by(cert=cert.os_id).delete() self.session.commit() logger.info('Delete old ICGS and create new.') # create new icgs icgs_list = item["icgs"] if isinstance(item["icgs"], list) else [item["icgs"]] for icgs_str in icgs_list: icgs_parts = re.split('(\d+\s+-)', icgs_str)[1:] # Cut the part before the first number for icgs_code, icgs_desc in zip(icgs_parts[::2], icgs_parts[1::2]): icgs_code = int(icgs_code.strip('\t\n- ')) icgs_desc = icgs_desc.strip('\t\n; ') try: icgs_desc_en = self.translator.translate(icgs_desc, dest='en').text except (IOError, ValueError): icgs_desc_en = icgs_desc icgs = ICGS(cert=cert.os_id, cls=icgs_code, descr_ru=icgs_desc, descr_en=icgs_desc_en) self.session.add(icgs) logger.info('ICGS was updated') if "image_url" in item: pics = self.session.query(Pics).filter_by(cert=cert.os_id) if pics.count() > 0: logger.error('Picture for certificate {} already exists'.format(cert.full_num)) if UPDATE_IMAGES: self.session.query(Pics).filter_by(cert=cert.os_id).delete() self.session.commit() logger.info('Old image was deleted') self.create_image(cert, item) logger.info('Image was uploaded') else: self.create_image(cert, item) logger.info('Image was uploaded') cert.update_date = datetime.utcnow() return cert
def set(self, key, value, ex=None): try: return self.__redis.set(key, value, ex) except Exception as e: logger.info("redis set:%s fail!" % e) return False
def create_doctor_and_diagnosis(): user = User.objects.create() user.username = '******' user.email = '*****@*****.**' user.password = make_password('1qazxsw2') user.is_doctor = True user.is_patient = False user.save() doctor = Doctor.objects.create(user=user) doctor.name = 'doctor' doctor.country = 'Germany' doctor.address = 'Lohmühlenstraße 65, 12435 Berlin, Germany' doctor.zipcode = '12435' doctor.phone_number = '+49 30 12085961' doctor.image = 'users/no-img.svg' doctor.validity = Validity.VALID doctor.speciality = "IT" doctor.save() license = License.objects.create(doctor=doctor) license.image = 'licenses/sample.jpg' license.save() logger.info("{} doctor created.".format(doctor)) for i in range(4): diagnosis_first = Diagnosis.objects.create(doctor=Doctor.objects.get( user_id=user.id)) diagnosis_first.title = 'Mental Illness Baster SS' diagnosis_first.description = 'Free to talk to me' diagnosis_first.video_link = 'https://zoom.us/codeuniversity/1234567890' diagnosis_first.video_password = '******' diagnosis_first.type = DiagnosisType.PREVENTIVE diagnosis_first.image = 'diagnoses/no-img.jpg' diagnosis_first.status = RegisterStatus.UNREGISTERED diagnosis_first.date = '2020-12-23' diagnosis_first.save() for i in range(4): diagnosis_second = Diagnosis.objects.create(doctor=Doctor.objects.get( user_id=user.id)) diagnosis_second.title = 'Mental Illness' diagnosis_second.description = 'Free to talk to me' diagnosis_second.video_link = 'https://zoom.us/codeuniversity/1234567890' diagnosis_second.video_password = '******' diagnosis_second.type = DiagnosisType.MENTAL diagnosis_second.image = 'diagnoses/mental.jpg' diagnosis_second.status = RegisterStatus.UNREGISTERED diagnosis_second.date = '2020-12-21' diagnosis_second.save() for i in range(4): diagnosis_third = Diagnosis.objects.create(doctor=Doctor.objects.get( user_id=user.id)) diagnosis_third.title = 'Preventive Medicine Trial' diagnosis_third.description = 'Free to talk to me' diagnosis_third.video_link = 'https://zoom.us/codeuniversity/1234567890' diagnosis_third.video_password = '******' diagnosis_third.type = DiagnosisType.PREVENTIVE diagnosis_third.image = 'diagnoses/no-img.jpg' diagnosis_third.status = RegisterStatus.UNREGISTERED diagnosis_third.date = '2020-12-21' diagnosis_third.save() return user
def get(self, reqeust, mobile): """ :param reqeust: 请求对象 :param mobile: 手机号 :return: JSON """ # 接收参数 image_code_client = reqeust.GET.get('image_code') uuid = reqeust.GET.get('uuid') # 校验参数 if not all([image_code_client, uuid]): return http.JsonResponse({ 'code': RETCODE.NECESSARYPARAMERR, 'errmsg': '缺少必传参数' }) # 创建连接到redis的对象 redis_conn = get_redis_connection('verify_code') # 提取图形验证码 image_code_server = redis_conn.get('img_%s' % uuid) if image_code_server is None: # 图形验证码过期或者不存在 return http.JsonResponse({ 'code': RETCODE.IMAGECODEERR, 'errmsg': '图形验证码失效' }) # 删除图形验证码,避免恶意测试图形验证码 try: redis_conn.delete('img_%s' % uuid) except Exception as e: logger.error(e) image_code_server = image_code_server.decode() # bytes转字符串 if image_code_client.lower() != image_code_server.lower(): # 转小写后比较 return http.JsonResponse({ 'code': RETCODE.IMAGECODEERR, 'errmsg': '输入图形验证码有误' }) sms_code = '%06d' % random.randint(0, 999999) logger.info(sms_code) # set a flag to block message sending request # send_flag = redis_conn.get("send_flag_%s" % mobile) # if send_flag: # return http.JsonResponse({'code': RETCODE.THROTTLINGERR, 'errmsg': '短信发送过于频繁'}) # redis_conn.setex('sms_%s' % mobile, constants.SMS_CODE_REDIS_EXPIRES, sms_code) # redis_conn.setex('send_flag_%s' % mobile, constants.SMS_CODE_REDIS_EXPIRES, 1) # use pipeline version pipe = redis_conn.pipeline() pipe.setex('sms_%s' % mobile, constants.SMS_CODE_REDIS_EXPIRES, sms_code) pipe.setex('send_flag_%s' % mobile, constants.SMS_CODE_REDIS_EXPIRES, 1) pipe.execute() # 发送短信验证码 # CCP().send_template_sms(mobile, [sms_code, constants.SMS_CODE_REDIS_EXPIRES // 60], # constants.SEND_SMS_TEMPLATE_ID) send_sms_code(mobile, sms_code) # 响应结果 return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '发送短信成功'})
def check_proxy_response(self, response): if self.wrong_response in response.body.decode('cp1251'): logger.info('Verification failed with response: ' + response.body.decode('cp1251')) return False return True
passwd=rds_password, db=db_name, connect_timeout=5) cursor = conn.cursor() cursor.execute( "create table airquality (id int auto_increment primary key, date varchar(255) not null, time varchar(255) not null, co varchar(255) not null, tin_oxide varchar(255) not null, metanic_hydro varchar(255) not null, benzene_conc varchar(255) not null, titania varchar(255) not null, nox varchar(255) not null, tungsten_oxide_nox varchar(255) not null, average_no2 varchar(255) not null, tungsten_oxide_no2 varchar(255) not null, indium_oxide varchar(255) not null, temp varchar(255) not null, relative_humidity varchar(255) not null, absolute_humidity varchar(255) not null);" ) conn.commit() except Exception as e: logger.error( "ERROR: Unexpected error: Could not connect to MySql instance.") logger.error(e) sys.exit() logger.info("SUCCESS: Connection to RDS mysql instance succeeded") # Method to insert the data into the database def insert_data(data, url): try: conn = pymysql.connect(url, user=rds_user_name, passwd=rds_password, db=db_name, connect_timeout=5) rows = [] for entry in data: row = () values = [] for attribute, value in entry.items():