def _(browser=browser): s = Source(name="Apostila de flask", link="https://negros.dev/apostila-flask.pdf") s2 = Source(name="Apostila de flask", link="https://negros.dev/apostila-flask.pdf") content = ContentFactory.create(name="Introdução ao Flask", sources=[s, s2]) browser.visit(url_for("contents.show", content_slug=content.slug)) assert browser.is_text_present(s.name) assert browser.is_text_present(s2.name)
def test_000_source_creation(self): """ Testing Catalogue Source Creation """ print("Testing Catalogue Source Creation") for pti in prods_test_item: sm = Source({ 'key': pti['retailer'] if 'retailer' in pti else pti['source'], 'name': pti['retailer'].capitalize() if 'retailer' in pti else pti['source'].capitalize() }) # Equal to the one saved self.assertEqual( pti['retailer'] if 'retailer' in pti else pti['source'], sm.save())
def source(): """ 此处要使用view_models ,将testuser转化为对象。然后再进行__dict__操作 2019.9.10:增加缓存功能 :return: """ print(request.remote_addr) page = request.args.get('page') rows = request.args.get('pagerows') source = Source.get_source(page, rows) source_count =Source.get_source_count() # source = Source.query.filter_by().all() sources = SourceCollection() sources.fill(source_count,source) return json.dumps(sources,default=lambda o:o.__dict__) # return json.dumps(testuser.__dict__) pass
def get_all(): """ Fetch all Sources in DB """ logger.info("Fetch all sources...") params = request.args.to_dict() logger.debug(params) # Validate required params _needed_params = {'cols'} if not _needed_params.issubset(params): params['cols'] = '' rets = Source.get_all(params['cols']) if not rets: raise errors.ApiError(70003, "Could not fetch Sources data!") return jsonify(rets)
def add_source(): # 删除缓存 # cache.delete('source') # 假数据 # form = {'source_name': 'wwtest2', 'label_type_id': 2, 'file_url': 'F:/数据需求/标注系统测试/1'} # form = MultiDict(json.loads(request.data)) try: # 获取内网ip addr = socket.gethostbyname(socket.getfqdn(socket.gethostname())) # 获取公网ip # url = requests.get("http://txt.go.sohu.com/ip/soip") # text = url.text # ip = re.findall(r'\d+.\d+.\d+.\d+', text) # addr = ip[0] except: addr = '127.0.0.1' form = json.loads(request.data) if request.method == 'POST': source_image_path = Source_image_path() files = source_image_path.select_files_path(form['file_url'], addr) count = len(files) print(count) with db.auto_commit(): source = Source() form['count'] = count source.set_attrs(form) db.session.add(source) with db.auto_commit(): for file in files: source_image_path = Source_image_path() form['source_id'] = source.id form['image_url'] = file source_image_path.set_attrs(form) db.session.add(source_image_path) return json.dumps({'status' : 'success'})
def create(self, payloads): response = ResponseBuilder() source = Source() source.account_number = payloads['account_number'] source.bank = payloads['bank'] source.alias = payloads['alias'] db.session.add(source) try: db.session.commit() data = source.as_dict() return response.set_data(data).build() except SQLAlchemyError as e: data = e.orig.args return response.set_data(data).set_error(True).build()
def get_intersection(source): """ Endpoint to fetch `Product`s by attr's. /intersect?<field1>=<values>&<field2>=<values> translates to: where <field1> in (<vals>) and <field2> in (<vals>) @Request: - <field>=<values> : n number of fields and values to make the qry - cols : columns - p : page - ipp : items per page @Response: - products """ logger.info("Query source catalogue...") params = request.args.to_dict() params['source'] = source logger.debug(params) # The keys of the params are the fields if not params: logger.error(70001, "No params to query with") # Pagination default if not 'p' in params: params['p'] = 1 if not 'ipp' in params: params['ipp'] = 100 # Query items _prods = Source.get_products(**params) return jsonify({'status': 'OK', 'products': _prods})
def create_source(): with open(test_fn, 'r') as fin: for line in fin: tokens, slots = line.split("\t") slots = slots.split(' ') intent_name = slots[-1].strip() slots[-1] = "O" for s in slots: if s and s not in slots_set: slots_set.add(s) label = Label(label=s) db.session.add(label) slots = ' '.join(slots).strip() if intent_name not in intents_set: intents_set.add(intent_name) intent = Intent(intent=intent_name) db.session.add(intent) else: intent = Intent.query.filter_by(intent=intent_name).first() target = Target(intent=intent, intent_id=intent.id) source = Source(tokens=tokens.strip(), labels=slots, intent=intent, intent_id=intent.id, target=target, target_id=target.id) target.source = source target.source_id = source.id db.session.add(source) db.session.add(target) with open(train_fn, 'r') as fin: for line in fin: tokens, slots = line.split("\t") slots = slots.split(' ') intent_name = slots[-1].strip() slots[-1] = "O" for s in slots: if s and s not in slots_set: slots_set.add(s) label = Label(label=s) db.session.add(label) slots = ' '.join(slots).strip() if intent_name not in intents_set: intents_set.add(intent_name) intent = Intent(intent=intent_name) db.session.add(intent) else: intent = Intent.query.filter_by(intent=intent_name).first() target = Target(intent=intent, intent_id=intent.id) source = Source(tokens=tokens.strip(), labels=slots, intent=intent, intent_id=intent.id, target=target, target_id=target.id, training_set=True) target.source = source target.source_id = source.id target.is_training_set = True db.session.add(source) db.session.add(target) db.session.commit()