def exportDivSchedulesRefFormat(self, startgameday, prefix=""): headers = ['Game#', 'Game#', 'Tourn Match#','Date', 'Day', 'Time', 'Division', 'Round', 'Home', 'Visitor', 'Field', 'cr_trust', 'ar_trust', 'm_trust'] datasheet = Dataset(title=prefix) datasheet.headers = list(headers) schedule_list = self.dbinterface.findDivisionSchedulePHMSARefFormat(startgameday) tabformat_list = [(_offset+x[match_id_CONST], x[match_id_CONST], tournMapGamedayIdToCalendar(x[gameday_id_CONST]), tournMapGamedayIdToDate(x[gameday_id_CONST]), datetime.strptime(x[start_time_CONST],"%H:%M").strftime("%I:%M %p"), x[age_CONST]+x[gen_CONST], x[round_CONST], x[home_CONST], x[away_CONST], self.fieldinfo[self.findexerGet(x[venue_CONST])]['name'], _reftrust_level[_rindexerGet(getTournDivID(x[age_CONST], x[gen_CONST]))]['cr'], _reftrust_level[_rindexerGet(getTournDivID(x[age_CONST], x[gen_CONST]))]['ar'], _reftrust_level[_rindexerGet(getTournDivID(x[age_CONST], x[gen_CONST]))]['ment']) for x in schedule_list] if prefix else [(mapGamedayIdToCalendar(x[gameday_id_CONST],format=1), 'Saturday', datetime.strptime(x[start_time_CONST],"%H:%M").strftime("%I:%M %p"), x[age_CONST]+x[gen_CONST], x[home_CONST], x[away_CONST], self.fieldinfo[self.findexerGet(x[venue_CONST])]['name']) for x in schedule_list] if prefix: atabformat_list = [(_offset+i, j[0], j[1], j[2], j[3], j[4], j[5], j[6], j[7], j[8], j[9], j[10], j[11], j[12]) for i,j in enumerate(tabformat_list)] else: atabformat_list = tabformat_list for tabformat in atabformat_list: datasheet.append(tabformat) sheet_xls_relpath = prefix+'_RefFormat.xls' sheet_xls_abspath = os.path.join('/home/henry/workspace/datagraph/bottle_baseball/download/xls', sheet_xls_relpath) with open(sheet_xls_abspath,'wb') as f: f.write(datasheet.xls) f.close()
def render_to_response(self, context, **response_kwargs): """If exporting, generate a csv.""" if 'export' in self.request.GET: data = Dataset() data.headers = ( 'Name', 'Messages', 'Threads', 'Replies', 'Posters', 'Category', 'Tags', 'State', 'Members', 'Admins', 'Private', 'Published', 'Moderated', 'Featured', 'Member list published', 'Created', 'Created By' ) for group in self.get_queryset(): data.append(( group.group.name, group.message_count, group.thread_count, group.reply_count, group.posters, group.category.name, groups_tags_string([group]), group.state, group.member_count, group.owner_count, group.private, group.published, group.moderated, group.featured, group.member_list_published, group.created_at, group.created_by )) response = HttpResponse( data.csv, content_type='text/csv' ) response['Content-Disposition'] = 'attachment; filename=groups.csv' return response else: return super(GroupReportListView, self).render_to_response( context, **response_kwargs)
def exportDivTeamSchedules(self, div_id, age, gen, numteams, prefix=""): headers = ['Gameday#', 'Game Date', 'Day', 'Start Time', 'Venue', 'Home Team', 'Away Team'] datasheet_list = [] for team_id in range(1, numteams+1): team_str = age+gen+str(team_id) datasheet = Dataset(title=team_str) datasheet.headers = list(headers) teamdata_list = self.dbinterface.findTeamSchedule(age, gen, team_id) tabformat_list = [(x[gameday_id_CONST], tournMapGamedayIdToCalendar(x[gameday_id_CONST]), tournMapGamedayIdToDate(x[gameday_id_CONST]), datetime.strptime(x[start_time_CONST],"%H:%M").strftime("%I:%M %p"), self.fieldinfo[self.findexerGet(x[venue_CONST])]['name'], x[home_CONST], x[away_CONST]) for x in teamdata_list] for tabformat in tabformat_list: datasheet.append(tabformat) datasheet_list.append(datasheet) book = Databook(datasheet_list) cdir = os.path.dirname(__file__) bookname_xls = prefix+age + gen +'_schedule.xls' bookname_html = prefix+age + gen +'_schedule.html' booknamefull_xls = os.path.join('/home/henry/workspace/datagraph/bottle_baseball/download/xls', bookname_xls) booknamefull_html = os.path.join('~/workspace/datagraph/bottle_baseball/download/html', bookname_html) with open(booknamefull_xls,'wb') as f: f.write(book.xls) f.close() '''
def generate_elimdivxls(self, genxls_id): headers = ['Match ID', 'Game Date', 'Day', 'Time', 'Division', 'Home', 'Visitor', 'Venue', 'Round', 'Comment'] datasheet_list = list() for divinfo in self.divinfo_list: div_id = divinfo[genxls_id] div_age = divinfo['div_age'] div_gen = divinfo['div_gen'] div_str = div_age + div_gen datasheet = Dataset(title=div_str) datasheet.headers = list(headers) match_list = self.sdbinterface.get_schedule(genxls_id, div_age=div_age, div_gen=div_gen, elim_flag=True) # note conversions for time from 24-hour to am/pm format tabformat_list = [(y['match_id'], x['game_date'], parser.parse(x['game_date']).strftime("%a"), datetime.strptime(x['start_time'], "%H:%M").strftime("%I:%M%p"), div_str, self.team_map(div_id, y['home']), self.team_map(div_id, y['away']), self.fieldinfo_list[self.findexerGet(y['venue'])]['field_name'], y['around'], y['comment']) for x in match_list for y in x['gameday_data']] for tabformat in tabformat_list: datasheet.append(tabformat) datasheet_list.append(datasheet) book = Databook(datasheet_list) bookname_xls_relpath = self.schedcol_name + "_byDivision.xls" bookname_xls_fullpath = os.path.join(self.dir_path, bookname_xls_relpath) with open(bookname_xls_fullpath,'wb') as f: f.write(book.xls) f.close() return [{'path':bookname_xls_relpath}]
def export(self, queryset=None, task_meta=None): if queryset is None: queryset = self.get_queryset() headers = self.get_export_headers() data = Dataset(headers=headers) if isinstance(queryset, QuerySet): # Iterate without the queryset cache, to avoid wasting memory when # exporting large datasets. iterable = queryset.iterator() else: iterable = queryset if task_meta is not None: # initialize the total amount accross multiple resources self.num_done = task_meta['done'] for obj in iterable: data.append(self.export_resource(obj)) if task_meta is not None: self._update_task_state(task_meta) logger.debug('Num done: %d' % self.num_done) return data
def member_query(db): node_id = request.params.get('node_id') realname = request.params.get('realname') idcard = request.params.get('idcard') mobile = request.params.get('mobile') _query = db.query( models.SlcMember, models.SlcNode.node_name ).filter( models.SlcNode.id == models.SlcMember.node_id ) if idcard: _query = _query.filter(models.SlcMember.idcard==idcard) if mobile: _query = _query.filter(models.SlcMember.mobile==mobile) if node_id: _query = _query.filter(models.SlcMember.node_id == node_id) if realname: _query = _query.filter(models.SlcMember.realname.like('%'+realname+'%')) if request.path == '/member': return render("bus_member_list", page_data = get_page_data(_query), node_list=db.query(models.SlcNode),**request.params) elif request.path == "/member/export": data = Dataset() data.append((u'区域',u'姓名',u'用户名',u'证件号',u'邮箱', u'联系电话', u'地址', u'创建时间')) for i,_node_name in _query: data.append(( _node_name, i.realname, i.member_name,i.idcard, i.email,i.mobile, i.address,i.create_time )) name = u"RADIUS-MEMBER-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls" with open(u'./static/xls/%s' % name, 'wb') as f: f.write(data.xls) return static_file(name, root='./static/xls',download=True)
def csv_output_computers(): """Utility function to generate a CSV output of computers information from Incredibus data, for audit/cleansing purposes. """ computers = Computer.objects.all() d = Dataset() d.headers = [ 'ID', 'HOSTNAME', 'CHASSIS', 'PROBABLE OWNER EMAIL', 'PROBABLE OWNER CC', 'MANAGED BY EMAIL', 'ASSET NO.', 'SERIAL NO.' ] for i in computers: row = [i.pk, i.sam_account_name[:-1], i.chassis] if i.probable_owner: row += [i.probable_owner.email.lower(), i.probable_owner.cost_centre] else: row += ['', ''] if i.managed_by: row += [i.managed_by.email.lower()] else: row += [''] row += ['', i.serial_number] d.append(row) f = open('/tmp/computers.csv', 'w') f.write(d.csv) f.close()
def dump_program_reports(): print('Running program report dump...') dataset = Dataset() dataset.headers = ['Site ID', 'Mobile', 'Timestamp', 'Group', 'Program', 'Period code', 'Period number', 'Atot', 'Arel', 'Tin', 'Tout', 'Dead', 'DefT', 'Dcur', 'Dmed'] for report in ProgramReport.objects.select_related('group', 'program').order_by('created'): if not report.reporter.mobile.startswith('+'): continue dataset.append([ report.site.hcid, report.reporter.mobile, timegm(report.created.utctimetuple()), report.group.code, report.program.code, report.period_code, report.period_number, report.new_marasmic_patients, report.readmitted_patients, report.patients_transferred_in, report.patients_transferred_out, report.patient_deaths, report.unconfirmed_patient_defaults, report.patients_cured, report.unresponsive_patients ]) with open('program_reports.csv', 'w') as f: f.write(dataset.csv) print('Done')
def generate_fieldxls(self): headers = ['Game Date', 'Day', 'Time', 'Division', 'Home', 'Visitor', 'Venue'] datasheet_list = list() for fieldinfo in self.fieldinfo_list: field_name = fieldinfo['field_name'] field_id = fieldinfo['field_id'] datasheet = Dataset(title=field_name) datasheet.headers = list(headers) match_list = self.sdbinterface.get_schedule('field_id', field_id=field_id) tabformat_list = [(x['game_date'], parser.parse(x['game_date']).strftime("%a"), datetime.strptime(x['start_time'], "%H:%M").strftime("%I:%M%p"), x['div_age']+x['div_gen'], x['home'], x['away'], field_name) for x in match_list] for tabformat in tabformat_list: datasheet.append(tabformat) datasheet_list.append(datasheet) book = Databook(datasheet_list) bookname_xls_relpath = self.schedcol_name + "_byField.xls" bookname_xls_fullpath = os.path.join(self.dir_path, bookname_xls_relpath) with open(bookname_xls_fullpath,'wb') as f: f.write(book.xls) f.close() return [{'path':bookname_xls_relpath}]
def render_to_response(self, context, **response_kwargs): """If exporting, generate a csv.""" if 'export' in self.request.GET: data = Dataset() data.headers = ( u'Name', u'Email', u'Phone', u'Zip', u'State', u'Joined', u'Last login', u'Total Groups Joined', u'Flags received', u'Messages sent', u'Staff?', u'Superuser?', u'Banned?', u'Visits' ) for user in self.get_queryset(): data.append(( user, user.email, user.phone, user.zip_code, user.state, user.date_joined, user.last_login, user.total_groups_joined, user.flags_received, user.messages_sent, user.is_staff, user.is_superuser, user.is_banned, user.visit_count )) response = HttpResponse( data.csv, content_type='text/csv' ) response['Content-Disposition'] = 'attachment; filename=users.csv' return response else: return super(UserReportListView, self).render_to_response( context, **response_kwargs)
def acceptlog_query(db,render): node_id = request.params.get('node_id') accept_type = request.params.get('accept_type') account_number = request.params.get('account_number') operator_name = request.params.get('operator_name') query_begin_time = request.params.get('query_begin_time') query_end_time = request.params.get('query_end_time') opr_nodes = get_opr_nodes(db) _query = db.query( models.SlcRadAcceptLog.id, models.SlcRadAcceptLog.accept_type, models.SlcRadAcceptLog.accept_time, models.SlcRadAcceptLog.accept_desc, models.SlcRadAcceptLog.operator_name, models.SlcRadAcceptLog.accept_source, models.SlcRadAcceptLog.account_number, models.SlcMember.node_id, models.SlcNode.node_name ).filter( models.SlcRadAcceptLog.account_number == models.SlcRadAccount.account_number, models.SlcMember.member_id == models.SlcRadAccount.member_id, models.SlcNode.id == models.SlcMember.node_id ) if operator_name: _query = _query.filter(models.SlcRadAcceptLog.operator_name == operator_name) if node_id: _query = _query.filter(models.SlcMember.node_id == node_id) else: _query = _query.filter(models.SlcMember.node_id.in_([i.id for i in opr_nodes])) if account_number: _query = _query.filter(models.SlcRadAcceptLog.account_number.like('%' + account_number + '%')) if accept_type: _query = _query.filter(models.SlcRadAcceptLog.accept_type == accept_type) if query_begin_time: _query = _query.filter(models.SlcRadAcceptLog.accept_time >= query_begin_time + ' 00:00:00') if query_end_time: _query = _query.filter(models.SlcRadAcceptLog.accept_time <= query_end_time + ' 23:59:59') _query = _query.order_by(models.SlcRadAcceptLog.accept_time.desc()) type_map = ACCEPT_TYPES if request.path == '/': return render( "bus_acceptlog_list", page_data=get_page_data(_query), node_list=opr_nodes, type_map=type_map, get_orderid=lambda aid: db.query(models.SlcMemberOrder.order_id).filter_by(accept_id=aid).scalar(), **request.params ) elif request.path == '/export': data = Dataset() data.append((u'区域', u'上网账号', u'受理类型', u'受理时间', u'受理渠道', u'操作员', u'受理描述')) for i in _query: data.append(( i.node_name, i.account_number, type_map.get(i.accept_type), i.accept_time, i.accept_source, i.operator_name, i.accept_desc )) name = u"RADIUS-ACCEPTLOG-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls" return export_file(name, data)
def post(self): node_id = self.get_argument('node_id',None) accept_type = self.get_argument('accept_type',None) account_number = self.get_argument('account_number',None) operator_name = self.get_argument('operator_name',None) query_begin_time = self.get_argument('query_begin_time',None) query_end_time = self.get_argument('query_end_time',None) opr_nodes = self.get_opr_nodes() _query = self.db.query( models.TrAcceptLog.id, models.TrAcceptLog.accept_type, models.TrAcceptLog.accept_time, models.TrAcceptLog.accept_desc, models.TrAcceptLog.operator_name, models.TrAcceptLog.accept_source, models.TrAcceptLog.account_number, models.TrCustomer.node_id, models.TrNode.node_name ).filter( models.TrAcceptLog.account_number == models.TrAccount.account_number, models.TrCustomer.customer_id == models.TrAccount.customer_id, models.TrNode.id == models.TrCustomer.node_id ) if operator_name: _query = _query.filter(models.TrAcceptLog.operator_name == operator_name) if node_id: _query = _query.filter(models.TrCustomer.node_id == node_id) else: _query = _query.filter(models.TrCustomer.node_id.in_([i.id for i in opr_nodes])) if account_number: _query = _query.filter(models.TrAcceptLog.account_number.like('%' + account_number + '%')) if accept_type: _query = _query.filter(models.TrAcceptLog.accept_type == accept_type) if query_begin_time: _query = _query.filter(models.TrAcceptLog.accept_time >= query_begin_time + ' 00:00:00') if query_end_time: _query = _query.filter(models.TrAcceptLog.accept_time <= query_end_time + ' 23:59:59') _query = _query.order_by(models.TrAcceptLog.accept_time.desc()) type_map = ACCEPT_TYPES if self.request.path == '/admin/customer/acceptlog': return self.render( "acceptlog_list.html", page_data=self.get_page_data(_query), node_list=opr_nodes, type_map=type_map, get_orderid=lambda aid: self.db.query(models.TrCustomerOrder.order_id).filter_by(accept_id=aid).scalar(), **self.get_params() ) elif self.request.path == '/admin/customer/acceptlog/export': data = Dataset() data.append((u'区域', u'上网账号', u'受理类型', u'受理时间', u'受理渠道', u'操作员', u'受理描述')) for i in _query: data.append(( i.node_name, i.account_number, type_map.get(i.accept_type), i.accept_time, i.accept_source, i.operator_name, i.accept_desc )) name = u"RADIUS-ACCEPTLOG-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls" return self.export_file(name, data)
def dataset(data, headers=None): """ `data` is a list of dicts. """ dataset = Dataset() dataset.dict = data if headers: dataset.headers = headers return dataset
def read_database(): with open(COMO_BATTERY_FILE, 'r') as como: data = Dataset(headers=['time', 'capacity', 'cycles']) # http://stackoverflow.com/questions/10206905/ # how-to-convert-json-string-to-dictionary-and-save-order-in-keys data.dict = json.loads( zlib.decompress(como.read()), object_pairs_hook=collections.OrderedDict) return data
def acceptlog_query(db): node_id = request.params.get('node_id') accept_type = request.params.get('accept_type') account_number = request.params.get('account_number') operator_name = request.params.get('operator_name') query_begin_time = request.params.get('query_begin_time') query_end_time = request.params.get('query_end_time') _query = db.query( models.SlcRadAcceptLog.id, models.SlcRadAcceptLog.accept_type, models.SlcRadAcceptLog.accept_time, models.SlcRadAcceptLog.accept_desc, models.SlcRadAcceptLog.operator_name, models.SlcRadAcceptLog.accept_source, models.SlcRadAcceptLog.account_number, models.SlcMember.node_id, models.SlcNode.node_name ).filter( models.SlcRadAcceptLog.account_number == models.SlcRadAccount.account_number, models.SlcMember.member_id == models.SlcRadAccount.member_id, models.SlcNode.id == models.SlcMember.node_id ) if operator_name: _query = _query.filter(models.SlcRadAcceptLog.operator_name == operator_name) if node_id: _query = _query.filter(models.SlcMember.node_id == node_id) if account_number: _query = _query.filter(models.SlcRadAcceptLog.account_number.like('%'+account_number+'%')) if accept_type: _query = _query.filter(models.SlcRadAcceptLog.accept_type == accept_type) if query_begin_time: _query = _query.filter(models.SlcRadAcceptLog.accept_time >= query_begin_time+' 00:00:00') if query_end_time: _query = _query.filter(models.SlcRadAcceptLog.accept_time <= query_end_time+' 23:59:59') _query = _query.order_by(models.SlcRadAcceptLog.accept_time.desc()) type_map = {'open':u'开户','pause':u'停机','resume':u'复机','cancel':u'销户','next':u'续费','charge':u'充值'} if request.path == '/acceptlog': return render( "bus_acceptlog_list", page_data = get_page_data(_query), node_list=db.query(models.SlcNode), type_map = type_map, get_orderid = lambda aid:db.query(models.SlcMemberOrder.order_id).filter_by(accept_id=aid).scalar(), **request.params ) elif request.path == '/acceptlog/export': data = Dataset() data.append((u'区域',u'上网账号',u'受理类型',u'受理时间',u'受理渠道',u'操作员',u'受理描述')) for i in _query: data.append(( i.node_name, i.account_number, type_map.get(i.accept_type), i.accept_time,i.accept_source,i.operator_name,i.accept_desc )) name = u"RADIUS-ACCEPTLOG-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls" with open(u'./static/xls/%s' % name, 'wb') as f: f.write(data.xls) return static_file(name, root='./static/xls',download=True)
def order_query(db, render): node_id = request.params.get('node_id') product_id = request.params.get('product_id') pay_status = request.params.get('pay_status') account_number = request.params.get('account_number') query_begin_time = request.params.get('query_begin_time') query_end_time = request.params.get('query_end_time') opr_nodes = get_opr_nodes(db) _query = db.query( models.SlcMemberOrder, models.SlcMember.node_id, models.SlcMember.realname, models.SlcRadProduct.product_name, models.SlcNode.node_name ).filter( models.SlcMemberOrder.product_id == models.SlcRadProduct.id, models.SlcMemberOrder.member_id == models.SlcMember.member_id, models.SlcNode.id == models.SlcMember.node_id ) if node_id: _query = _query.filter(models.SlcMember.node_id == node_id) else: _query = _query.filter(models.SlcMember.node_id.in_([i.id for i in opr_nodes])) if account_number: _query = _query.filter(models.SlcMemberOrder.account_number.like('%' + account_number + '%')) if product_id: _query = _query.filter(models.SlcMemberOrder.product_id == product_id) if pay_status: _query = _query.filter(models.SlcMemberOrder.pay_status == pay_status) if query_begin_time: _query = _query.filter(models.SlcMemberOrder.create_time >= query_begin_time + ' 00:00:00') if query_end_time: _query = _query.filter(models.SlcMemberOrder.create_time <= query_end_time + ' 23:59:59') _query = _query.order_by(models.SlcMemberOrder.create_time.desc()) if request.path == '/': return render("bus_order_list", node_list=opr_nodes, products=db.query(models.SlcRadProduct).filter_by(product_status=0), page_data=get_page_data(_query), **request.params) elif request.path == '/export': data = Dataset() data.append(( u'区域', u"用户姓名", u'上网账号', u'资费', u"订购时间", u'订单费用', u'实缴费用', u'支付状态', u'订购渠道', u'订单描述' )) _f2y = utils.fen2yuan _fms = utils.fmt_second _pst = {0: u'未支付', 1: u'已支付', 2: u'已取消'} for i, _, _realname, _product_name, _node_name in _query: data.append(( _node_name, _realname, i.account_number, _product_name, i.create_time, _f2y(i.order_fee), _f2y(i.actual_fee), _pst.get(i.pay_status), i.order_source, i.order_desc )) name = u"RADIUS-ORDERS-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls" return export_file(name, data)
def post(self): node_id = self.get_argument('node_id',None) product_id = self.get_argument('product_id',None) pay_status = self.get_argument('pay_status',None) account_number = self.get_argument('account_number',None) query_begin_time = self.get_argument('query_begin_time',None) query_end_time = self.get_argument('query_end_time',None) opr_nodes = self.get_opr_nodes() _query = self.db.query( models.TrCustomerOrder, models.TrCustomer.node_id, models.TrCustomer.realname, models.TrProduct.product_name, models.TrNode.node_name ).filter( models.TrCustomerOrder.product_id == models.TrProduct.id, models.TrCustomerOrder.customer_id == models.TrCustomer.customer_id, models.TrNode.id == models.TrCustomer.node_id ) if node_id: _query = _query.filter(models.TrCustomer.node_id == node_id) else: _query = _query.filter(models.TrCustomer.node_id.in_([i.id for i in opr_nodes])) if account_number: _query = _query.filter(models.TrCustomerOrder.account_number.like('%' + account_number + '%')) if product_id: _query = _query.filter(models.TrCustomerOrder.product_id == product_id) if pay_status: _query = _query.filter(models.TrCustomerOrder.pay_status == pay_status) if query_begin_time: _query = _query.filter(models.TrCustomerOrder.create_time >= query_begin_time + ' 00:00:00') if query_end_time: _query = _query.filter(models.TrCustomerOrder.create_time <= query_end_time + ' 23:59:59') _query = _query.order_by(models.TrCustomerOrder.create_time.desc()) if self.request.path == '/admin/customer/order': return self.render("order_list.html", node_list=opr_nodes, products=self.db.query(models.TrProduct).filter_by(product_status=0), page_data=self.get_page_data(_query), **self.get_params()) elif self.request.path == '/admin/customer/order/export': data = Dataset() data.append(( u'区域', u"用户姓名", u'上网账号', u'资费', u"订购时间", u'订单费用', u'实缴费用', u'支付状态', u'订购渠道', u'订单描述' )) _f2y = utils.fen2yuan _fms = utils.fmt_second _pst = {0: u'未支付', 1: u'已支付', 2: u'已取消'} for i, _, _realname, _product_name, _node_name in _query: data.append(( _node_name, _realname, i.account_number, _product_name, i.create_time, _f2y(i.order_fee), _f2y(i.actual_fee), _pst.get(i.pay_status), i.order_source, i.order_desc )) name = u"RADIUS-ORDERS-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls" return self.export_file(name, data)
def card_list(db): product_id = request.params.get('product_id') card_type = request.params.get('card_type') card_status = request.params.get('card_status') batch_no = request.params.get('batch_no') query_begin_time = request.params.get('query_begin_time') query_end_time = request.params.get('query_end_time') _query = db.query(models.SlcRechargerCard) if product_id and card_type == '0': _query = _query.filter(models.SlcRechargerCard.product_id==product_id) if card_type: _query = _query.filter(models.SlcRechargerCard.card_type==card_type) if batch_no: _query = _query.filter(models.SlcRechargerCard.batch_no==batch_no) if card_status: _query = _query.filter(models.SlcRechargerCard.card_status==card_status) if query_begin_time: _query = _query.filter(models.SlcRechargerCard.create_time >= query_begin_time+' 00:00:00') if query_end_time: _query = _query.filter(models.SlcRechargerCard.create_time <= query_end_time+' 23:59:59') products = db.query(models.SlcRadProduct).filter( models.SlcRadProduct.product_status == 0, models.SlcRadProduct.product_policy.in_([0,2]) ) if request.path == '/list': print "total:",_query.count() return render("card_list", page_data = get_page_data(_query), card_types = forms.card_types, card_states = forms.card_states, products = products, colors = {0:'',1:'class="success"',2:'class="warning"',3:'class="danger"'}, **request.params ) elif request.path == '/export': data = Dataset() data.append(( u'批次号',u'充值卡号',u'充值卡密码',u'充值卡类型',u'状态', u'资费id', u'面值/售价',u"授权月数",u"过期时间",u'创建时间' )) print "total:",_query.count() for i in _query: data.append(( i.batch_no, i.card_number, utils.decrypt(i.card_passwd),forms.card_types[i.card_type], forms.card_states[i.card_status],get_product_name(db,i.product_id),utils.fen2yuan(i.fee_value), i.months,i.expire_date,i.create_time )) name = u"RADIUS-CARD-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls" with open(u'./static/xls/%s' % name, 'wb') as f: f.write(data.xls) return static_file(name, root='./static/xls',download=True)
def post(self): node_id = self.get_argument('node_id',None) account_number = self.get_argument('account_number',None) query_begin_time = self.get_argument('query_begin_time',None) query_end_time = self.get_argument('query_end_time',None) opr_nodes = self.get_opr_nodes() _query = self.db.query( models.TrBilling, models.TrCustomer.node_id, models.TrNode.node_name ).filter( models.TrBilling.account_number == models.TrAccount.account_number, models.TrCustomer.customer_id == models.TrAccount.customer_id, models.TrNode.id == models.TrCustomer.node_id ) if node_id: _query = _query.filter(models.TrCustomer.node_id == node_id) else: _query = _query.filter(models.TrCustomer.node_id.in_(i.id for i in opr_nodes)) if account_number: _query = _query.filter(models.TrBilling.account_number.like('%' + account_number + '%')) if query_begin_time: _query = _query.filter(models.TrBilling.create_time >= query_begin_time + ' 00:00:00') if query_end_time: _query = _query.filter(models.TrBilling.create_time <= query_end_time + ' 23:59:59') _query = _query.order_by(models.TrBilling.create_time.desc()) if self.request.path == '/admin/customer/billing': return self.render("billing_list.html", node_list=opr_nodes, page_data=self.get_page_data(_query), **self.get_params()) elif request.path == '/admin/customer/billing/export': data = Dataset() data.append(( u'区域', u'上网账号', u'BAS地址', u'会话编号', u'记账开始时间', u'会话时长', u'已扣时长', u"已扣流量", u'应扣费用', u'实扣费用', u'剩余余额', u'剩余时长', u'剩余流量', u'是否扣费', u'扣费时间' )) _f2y = utils.fen2yuan _fms = utils.fmt_second _k2m = utils.kb2mb _s2h = utils.sec2hour for i, _, _node_name in _query: data.append(( _node_name, i.account_number, i.nas_addr, i.acct_session_id, i.acct_start_time, _fms(i.acct_session_time), _fms(i.acct_times), _k2m(i.acct_flows), _f2y(i.acct_fee), _f2y(i.actual_fee), _f2y(i.balance), _s2h(i.time_length), _k2m(i.flow_length), (i.is_deduct == 0 and u'否' or u'是'), i.create_time )) name = u"RADIUS-BILLING-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls" return self.export_file(name, data)
def fetch_timetable(browser, link): # Utility method to return a nice Dataset from a timetable url if debug: puts('Fetching timetable from %s' % link) response = browser.follow_link(link) soup = BeautifulSoup(response.read()) table = soup.find('table') timetable = [] for row in table.findAll('tr'): title = None title_test = row.find('td') if title_test.find('span'): title = title_test.getText() values = [] for col in row.findAll('td')[1:]: value = col.getText() if value == ' ': value = None if isinstance(value, basestring) and ':' in value: try: time = value.strip().split(':') time = datetime.time(*[int(s) for s in time]) except: pass else: value = time values.append(value) timetable.append((title, values)) while len(timetable): if 'TRAIN NO.' not in timetable[0][0]: del timetable[0] else: break train_nums = timetable[0] data = Dataset() data.headers = train_nums[1] if debug: puts(repr(data.headers)) for place, times in timetable[1:]: if debug: puts(repr((place, times))) data.rpush(times, tags=[place.title().replace('`S', "'s")]) #Strip out TRAIN NO. columns while 1: try: del data['TRAIN NO.'] except: break return data
def test_in_the_news(self): from tablib import Dataset, Databook data = Dataset() mention = Mention("http://www.lvbusinesspress.com/articles/2011/11/28/news/iq_49033368.txt") mention.append(data) mention = Mention("http://www.lasvegasgleaner.com/las_vegas_gleaner/2011/12/unwitting-local-tools-of-corporate-overlords-hire-a-lawyer.html - NPRI in the News") mention.append(data) self.assertEqual(len(data), 2) self.assertEqual(len(data.filter(['in-the-news'])), 1)
def export(self, empty=False): serializer = self.empty_serializer dataset = Dataset(headers=self.get_export_header(serializer)) if not empty: for instance in self.get_export_queryset(): dataset.append(self.get_export_row(serializer, instance)) return ExportResult( filename=self.get_export_filename(), dataset=dataset, file_formats=self.file_formats )
def seven_largest_single_data_sheet(self): work_sheet = Dataset(title="7 Largest Disbursements") work_sheet.headers = ['iso', 'country', 'position', 'shown amount', 'shown donor'] for country in models.Recipient.objects.all(): table = LargestDisbursementTable(country=country).as_dictionary()["table"] for position, disbursement in enumerate(table): formatted = disbursement["disbursement"] donor = disbursement["donor"] work_sheet.append([country.iso3, country.name, position + 1, formatted, donor]) return work_sheet
def five_largest_graph_data_sheet(self): work_sheet = Dataset(title="Five Largest Graph") work_sheet.headers = ['iso', 'country', 'position', 'shown percentage', 'real percentage', 'donor'] for country in models.Recipient.objects.all(): table = FiveLargestGraph(country=country).as_list() for position, disbursement in enumerate(table): real = disbursement["percentage"]["real"] formatted = disbursement["percentage"]["formatted"] donor = disbursement["name"] work_sheet.append([country.iso3, country.name, position + 1, formatted, real, donor]) return work_sheet
def test_logentry_creation_with_import_obj_exception(self): # from https://mail.python.org/pipermail/python-dev/2008-January/076194.html def monkeypatch_method(cls): def decorator(func): setattr(cls, func.__name__, func) return func return decorator # Cause an exception in import_row, but only after import is confirmed, # so a failure only occurs when ImportMixin.process_import is called. class R(BookResource): def import_obj(self, obj, data, dry_run): if dry_run: super().import_obj(obj, data, dry_run) else: raise Exception @monkeypatch_method(BookAdmin) def get_resource_class(self): return R # Verify that when an exception occurs in import_row, when raise_errors is False, # the returned row result has a correct import_type value, # so generating log entries does not fail. @monkeypatch_method(BookAdmin) def process_dataset(self, dataset, confirm_form, request, *args, **kwargs): resource = self.get_import_resource_class()(**self.get_import_resource_kwargs(request, *args, **kwargs)) return resource.import_data(dataset, dry_run=False, raise_errors=False, file_name=confirm_form.cleaned_data['original_file_name'], user=request.user, **kwargs) dataset = Dataset(headers=["id","name","author_email"]) dataset.append([1, "Test 1", "*****@*****.**"]) input_format = '0' content = dataset.csv f = SimpleUploadedFile("data.csv", content.encode(), content_type="text/csv") data = { "input_format": input_format, "import_file": f, } response = self.client.post('/admin/core/book/import/', data) self.assertEqual(response.status_code, 200) confirm_form = response.context['confirm_form'] data = confirm_form.initial response = self.client.post('/admin/core/book/process_import/', data, follow=True) self.assertEqual(response.status_code, 200)
def multilateral_and_foundation_table_data_sheet(self): work_sheet = Dataset(title="Multilateral-Foundation") work_sheet.headers = ['iso', 'country', 'number of disbursements', 'total shown disbursement', 'total real disbursement'] for country in models.Recipient.objects.all(): table = MultilateralAndFoundationDisbursementSourcesTable(country=country).as_dictionary() if len(table): number = table["total"]["number_of_disbursements"] amount_formatted = table["total"]["amount"]["formatted"] amount_real = table["total"]["amount"].get("real", '') work_sheet.append([country.iso3, country.name, number, amount_formatted, amount_real]) return work_sheet
def other_disbursements_data_sheet(self): work_sheet = Dataset(title="Other disbursements") work_sheet.headers = ['iso', 'country', 'amount of other disbursements vs 7 largest'] for country in models.Recipient.objects.all(): re_disb = re.compile("Other (\d+) Disb\s*") disbursements = models.Disbursement.objects.filter(country=country) other_disbursements = disbursements.get(donor__contains="Other ") ndisb = int(re_disb.match(other_disbursements.donor).groups()[0]) total_disbursements_count = disbursements.count() - 1 + ndisb work_sheet.append([country.iso3, country.name, total_disbursements_count]) return work_sheet
def dump_connections(): print('Running connection dump...') dataset = Dataset() dataset.headers = ['Identity', 'Backend'] for connection in Connection.objects.filter(identity__startswith='+'): dataset.append([ connection.identity, connection.backend.name ]) with open('connections.csv', 'w') as f: f.write(dataset.csv) print('Done')
def _import(self): """Execute the import.""" import_dataset = Dataset() fieldtypes = self._prepare_dataset_to_import() import_dataset.headers = self.read_dataset.headers inProj = Proj(init='epsg:25831') outProj = Proj(init='epsg:4326') for row in self.read_dataset.dict: # Ignore rows with emtpy lat or lon if row['lon'] is not None and row['lat'] is not None: row['lon'], row['lat'] = transform( inProj, outProj, row['lon'], row['lat'] ) row = self._check_fieldtypes(row, fieldtypes) new = [] for key in row: new.append(row[key]) import_dataset.append(new) db = connection.cursor() import_dataset.headers = None with tempfile.NamedTemporaryFile() as f: f.write(import_dataset.csv) f.seek(0) try: db.copy_from(f, 'storm_drain', columns=(self.read_dataset.headers), sep=",", null='null') self._add_version( self.request, self.read_dataset.dict[0]['version'] ) self.response = { 'success': True, 'headers': self.read_dataset.headers } except Exception as e: error = str(e).replace('\n', ' ').replace('\r', '') self.response = {'success': False, 'err': error}
def put(self, *args, **kwargs): """Import a file.""" if self.request.user.is_manager(): if self.request.method == 'POST': # Prepare the input Dataset self.read_dataset = Dataset() # Get file name and extension file = self.request.FILES['stormdrain-file'] name, extension = os.path.splitext( self.request.FILES['stormdrain-file'].name ) # Load data into the input Dataset self.read_dataset.load(file.read(), extension[1:]) # headers to lowercase file_headers = [x.lower() for x in self.read_dataset.headers] # Detect missing headers missing_headers = self._get_missing_fields(file_headers) # Get optional headers self._get_optional_fields(file_headers) # If there is no missing header if len(missing_headers) == 0: self._import() else: txtMissing = ', '.join(missing_headers) self.response['err'] = ( 'Missing one or more required fields' '('+txtMissing+')' ) else: self.response['err'] = 'No file uploaded' else: self.response['err'] = 'Unauthorized' return self._end()
def main(argv): dataset = Dataset(argv.data) data = tablib.Dataset(headers=['Question', 'Answer']) for q, a in dataset: data.append((q, a)) if argv.format == 'json': with open(argv.output, 'w') as f: json.dump(data.export('json'), f, indent=4) else: with open(argv.output, 'wb') as f: f.write(data.export('xls'))
def _import_file(fpath, resource_class, do_raise=True): try: log.info(_("Importing file {}.").format(fpath)) with open(fpath, "r") as json_file: data = Dataset().load(json_file.read(), format="json") resource = resource_class() log.info( _("...Importing resource {}.").format( resource.__class__.__name__)) return resource.import_data(data, raise_errors=do_raise) except AttributeError: log.error(_("FAILURE importing file {}!").format(fpath)) raise
def clean_csv_file(self, *args, **kwargs): csv_file = self.cleaned_data['csv_file'] csv_file.seek(0) dataset = Dataset().load(csv_file.read().decode('utf-8'), format='csv') for idx, row in enumerate(dataset, start=2): try: self.importer.validate_row(row) except ValidationError as e: raise forms.ValidationError('Line {}: {}'.format( idx, '\n'.join(e.messages))) return csv_file
def get_dataset(self, fields): headers = [field.rpartition('-')[0] for field in fields] dataset = Dataset(headers=headers) for submission in self.queryset.only('data').iterator(): row_data = [] form_fields = [ field for field in submission.get_form_data() if field.field_id in fields ] for header in fields: for field in form_fields: if field.field_id == header: row_data.append(field.value) break else: row_data.append('') if row_data: dataset.append(row_data) return dataset
def __init__(self, in_fname, format_fname, out_sheet='Sheet1', out_fname=''): self.in_fname = in_fname self.in_fname_prefix, self.in_type = os.path.splitext(in_fname) self.in_type = self.in_type.lstrip('.') self.out_fname_prefix, self.out_type = os.path.splitext(out_fname) self.out_type = self.out_type.lstrip('.') if not self.out_fname_prefix or not self.out_type: self.out_fname_prefix = self.in_fname_prefix + '_formatted' self.out_type = 'xlsx' self.out_sheet = out_sheet with open(format_fname, 'r') as f: self.file_format = f.read() sys.path.append(os.path.dirname(os.path.abspath(format_fname))) self.format_name = os.path.splitext(os.path.basename(format_fname))[0] try: self.format_module = importlib.import_module(self.format_name) except: self.format_module = '' if self.in_type in ['xls', 'xlsx']: self.file_read_mode = 'rb' else: self.file_read_mode = 'r' with open(in_fname, self.file_read_mode) as f: self.data = Dataset().load(f.read()) if self.in_type in ['xls', 'xlsx']: for i, row in enumerate(self.data): self.data[i] = [int(n) if type(n)==float and n == int(n) else n for n in row]
def dump_stock_reports(): print('Running stock report dump...') dataset = Dataset() dataset.headers = ['Site ID', 'Mobile', 'Timestamp', 'Items'] for stock_report in StockReport.objects.order_by('created'): if not stock_report.reporter.mobile.startswith('+'): continue summary = '; '.join(['{} {} {}'.format(log.item.code, log.last_quantity_received, log.current_holding) for log in stock_report.logs.all()]) dataset.append([ stock_report.site.hcid, stock_report.reporter.mobile, timegm(stock_report.created.utctimetuple()), summary ]) with open('stock_reports.csv', 'w') as f: f.write(dataset.csv) print('Done')
def product_mst_upload(request): if request.method == 'POST': prodmst_resource = prodMstResource() dataset = Dataset() uploaded_file = request.FILES['myfile'] decoded_data = uploaded_file.read().decode('UTF-8') io_string = io.StringIO(decoded_data) next(io_string) for column in csv.reader(io_string, delimiter=',', quotechar="|"): created = prod_mst.objects.create( Product_Code = column[0] ) return render(request, 'core/simple_upload.html')
def analyze(): name = request.args.get("myselect") option_var = request.args.get("myoption") if name not in header: flash("please select the header and option") return redirect("/") elif option_var not in option: flash("option not found ") return redirect("/") df = pandas.read_csv(file_name) try: if option_var == "mean": set_data = df[name].mean() elif option_var == "sum": set_data = df[name].sum() elif option_var == "max": set_data = df[name].max() elif option_var == "count": set_data = df[name].count() elif option_var == "std": set_data = df[name].std() elif option_var == "var": set_data = df[name].var() elif option_var == "min": set_data = df[name].min() except: flash("pleas make sure use the option with the valid header you can't use some option with string value !") return redirect("/") imported_data = Dataset().load(open(file_name).read()) data = imported_data[name] new_list = [] for d in data: try: if d.isdigit(): new_list.append(int(d)) elif type(d) == str: new_list.append(float(d)) except: flash("this option only for Number") return redirect("/") graph = pygal.Line() graph.title = "Full customization option For " + str(name) graph.x_labels = [] graph.add(name, new_list) graph_data = graph.render_data_uri() return render_template("analyze.html", set_data=set_data, option_var=option_var, name=name, graph_data=graph_data)
def anemicwoman_bulk(request): if request.method== "POST": bulk_resource = bulkResource() dataset = Dataset() bulk =request.FILES['myFile'] print(bulk) if not bulk.name.endswith('xlsx'): messages.info(request,'wrong format') return render(request, "bulk_reg_pregnant.html") imported_data = dataset.load(bulk.read(),format='xlsx') print(imported_data) user=User.objects.all() for data in imported_data: if User.objects.filter(username=data[3]).exists(): messages.info(request,"Username already entered") else: value = User.objects.create_user(id=data[0],first_name=data[1],last_name=data[2],username=data[3],email=data[4],password=data[5]) value.save() my_group = Group.objects.get(name='anemic_pregnant_woman') my_group.user_set.add(value) contact=AnemicPregnantWoman(uid=data[6],birthdate=data[7],age=data[8],personalcontact=data[9],ICDSname=data[10],ICDScenteraddress=data[11],ICDScentercontact=data[12],occupation=data[13],education=data[14],annualincome=data[15],weight=data[16],weightunit=data[17],height=data[18],heightunit=data[17],bmi=data[18],waist=data[19],waistunit=data[20],hip=data[21],hipunit=data[22],whratio=data[23],whratioderived=data[24],foodhabbits=data[25],uploaded_photo=data[26],feedback=data[27],user=value) contact.save() messages.info(request,"User created") print('user created') stuff_in_string = "Hello {} Your username for Community Diet Diversity(dietdiversity.communitygis.net) site is {} and Password is {}.Thanks!!".format(data[1],data[3], data[5]) print(stuff_in_string) # email=i.email }} send_mail('Community Diet Diversity', stuff_in_string, '*****@*****.**', [data[4]], fail_silently=False) # messages.info(request,"data entered") return render(request,"bulk_reg_pregnant.html") else: return render(request,"bulk_reg_pregnant.html")
def mentor_bulk(request): if request.method== "POST": bulk_resource = bulkResource() dataset = Dataset() bulk =request.FILES['myFile'] print(bulk) if not bulk.name.endswith('xlsx'): messages.info(request,'wrong format') return render(request, "bulk_reg_hm.html") imported_data = dataset.load(bulk.read(),format='xlsx') print(imported_data) user=User.objects.all() for data in imported_data: if User.objects.filter(username=data[3]).exists(): messages.info(request,"Username already entered") else: value = User.objects.create_user(id=data[0],first_name=data[1],last_name=data[2],username=data[3],email=data[4],password=data[5]) value.save() my_group = Group.objects.get(name='mentor') my_group.user_set.add(value) contact=Mentor(uid='MT'+''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(5)),contact=data[6],user=value) contact.save() messages.info(request,"User created") print('user created') stuff_in_string = "Hello {} Your username for Community Diet Diversity(dietdiversity.communitygis.net) site is {} and Password is {}.Thanks!!".format(data[1],data[3], data[5]) print(stuff_in_string) # email=i.email }} send_mail('Community Diet Diversity', stuff_in_string, '*****@*****.**', [data[4]], fail_silently=False) # messages.info(request,"data entered") return render(request,"bulk_reg_hm.html") else: return render(request,"bulk_reg_hm.html")
def simple_upload(request): if request.method == 'POST': person_resource = DeliveryResource() dataset = Dataset() new_persons = request.FILES['myfile'] imported_data = dataset.load(new_persons.read().decode(), format='csv') #print(imported_data) i = 1 for data in imported_data: print(data[2]) value = Delivery( i, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15], data[16], data[17], data[18], data[19], data[20], ) value.save() i += 1 return render(request, 'input.html')
def simple_upload(request): if request.method == 'POST': contacts_resource = ContactInfoResource() dataset = Dataset() new_contacts = request.FILES['myfile'] dataset.load(new_contacts.read().decode('utf-8')) #convert Dataset to List my_list = [] for i in dataset: my_list.append(list(i)) #Replace blank to the ID of the user count = 0 for x in my_list: my_list[count][1] = request.user.id if ContactInfo.objects.filter( first_name=my_list[count][2], last_name=my_list[count][3]).exists(): my_list[count][0] = ContactInfo.objects.get( first_name=my_list[count][2], last_name=my_list[count][3]).id count = count + 1 #creating new dataset then add Headers my_data = Dataset() my_data.headers = ([ 'id', 'created_by', 'first_name', 'last_name', 'contact_number', 'address' ]) #Append list to new dataset for x in my_list: my_data.append(x) contacts_resource.import_data(my_data, dry_run=False) # Actually import now return render(request, 'contacts/import.html')
def exportDivTeamSchedules(self, div_id, age, gen, numteams, prefix=""): headers = [ 'Gameday#', 'Game Date', 'Day', 'Start Time', 'Venue', 'Home Team', 'Away Team' ] datasheet_list = [] for team_id in range(1, numteams + 1): team_str = age + gen + str(team_id) datasheet = Dataset(title=team_str) datasheet.headers = list(headers) teamdata_list = self.dbinterface.findTeamSchedule( age, gen, team_id) tabformat_list = [ (x[gameday_id_CONST], tournMapGamedayIdToCalendar(x[gameday_id_CONST]), tournMapGamedayIdToDate(x[gameday_id_CONST]), datetime.strptime(x[start_time_CONST], "%H:%M").strftime("%I:%M %p"), self.fieldinfo[self.findexerGet(x[venue_CONST])]['name'], x[home_CONST], x[away_CONST]) for x in teamdata_list ] for tabformat in tabformat_list: datasheet.append(tabformat) datasheet_list.append(datasheet) book = Databook(datasheet_list) cdir = os.path.dirname(__file__) bookname_xls = prefix + age + gen + '_schedule.xls' bookname_html = prefix + age + gen + '_schedule.html' booknamefull_xls = os.path.join( '/home/henry/workspace/datagraph/bottle_baseball/download/xls', bookname_xls) booknamefull_html = os.path.join( '~/workspace/datagraph/bottle_baseball/download/html', bookname_html) with open(booknamefull_xls, 'wb') as f: f.write(book.xls) f.close() '''
def wacc(request): id = request.GET['id'] data = WACC.objects.filter(project=id) error = '' user = request.user data_1 = Input.objects.filter(user=user) for i in data_1: pid = i.project if request.method == 'POST': input_resource = WACCResource() dataset = Dataset() new_input = request.FILES[ 'myFile'] if 'myFile' in request.FILES else None if new_input is None: error = 'Please choose file!' else: imported_data = dataset.load(new_input.read()) for i in imported_data['project']: vari = i break any_data = WACC.objects.filter(project=vari) if not any_data: result = input_resource.import_data( dataset, dry_run=True) # Test the data import if result.has_errors(): error = "Invalid Input Data!" if not result.has_errors(): input_resource.import_data( dataset, dry_run=False) # Actually import now return redirect(user_details) else: error = 'Invalid Input Data!' return render(request, 'wacc.html', { 'data': data, 'pid': pid, 'error': error })
def simple_upload(request): if request.method == 'POST': report_set = ReportResource() dataset = Dataset() new_set = request.FILES['myfile'] imported_data = dataset.load(new_set.read()) print(imported_data) for data in imported_data: print(data) # value = Report.objects.create(published_date=data[0], report_title=data[1]) value = Report.objects.create(published_date=data[0], report_title=data[1], main_category=data[2], pages=data[3], summary=data[4], table_of_contents=data[5], list_of_table=data[6], list_of_figures=data[7], companies_mentioned=data[8], single_user_licence=data[9], multi_user_licence=data[10], corporate_user_licence=data[11]) print(value) # value.save() # print("Imported Data:") # print(imported_data) # result = report_set.import_data(dataset, dry_run=True) # Test the data import # print("Resut:") # print(result) # if not result.has_errors(): # a = report_set.import_data(dataset, dry_run=False) # Actually import now # print("Done") return render(request, 'base.html')
def simple_upload(request): if request.method == 'POST': university_resource = UniversityResource() dataset = Dataset() new_universities = request.FILES['myfile'] imported_data = dataset.load(new_universities.read(), format='xlsx') print(imported_data) for data in imported_data: #print(data[1]) value = University( data[0], data[1], data[2] ) value.save() # result = person_resource.import_data(dataset, dry_run=True) # Test the data import # if not result.has_errors(): # person_resource.import_data(dataset, dry_run=False) # Actually import now return render(request, 'core/uni_upload.html')
def upload_file(request): if request.method == "POST": customer_resource = CustomerResources dataset = Dataset() new_dataset = request.FILES["file"] if not new_dataset.name.endswith(".csv"): messages.info( request, "wrong file format. Back to the Main page and try again") return render(request, "upload_csv.html") imported_data = dataset.load(new_dataset.read().decode("UTF-8"), format="csv") for column in imported_data: value = Customer(customer=column[0], item=column[1], total=column[2], quantity=column[3], date=column[4]) value.save() return render(request, "upload_csv.html")
def render_to_response(self, context, **response_kwargs): """If exporting, generate a csv.""" if 'export' in self.request.GET: data = Dataset() data.headers = (u'Name', u'Email', u'Phone', u'Zip', u'State', u'Joined', u'Last login', u'Total Groups Joined', u'Flags received', u'Messages sent', u'Staff?', u'Superuser?', u'Banned?', u'Visits') for user in self.get_queryset(): data.append( (user, user.email, user.phone, user.zip_code, user.state, user.date_joined, user.last_login, user.total_groups_joined, user.flags_received, user.messages_sent, user.is_staff, user.is_superuser, user.is_banned, user.visit_count)) response = HttpResponse(data.csv, content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=users.csv' return response else: return super(UserReportListView, self).render_to_response(context, **response_kwargs)
def simple_upload(request): if request.method == 'POST': person_resource = PersonResource() dataset = Dataset() new_persons = request.FILES['myfile'] imported_data = dataset.load(new_persons.read(),format='xlsx') #print(imported_data) for data in imported_data: print(data[1]) value = Person( data[0], data[1], data[2], data[3], data[4], data[5], data[6], ) value.save() return render(request, 'input.html')
def import_posts(request): if request.method == 'POST': # data = { # 'key' : request.user # } try: person_resource = UpdateResource(request=request) dataset = Dataset() new_persons = request.FILES['myfile'] imported_data = dataset.load(new_persons.read().decode('utf-8'), format='csv') result = person_resource.import_data( dataset, dry_run=True) # Test the data import except Exception as e: print(type(e)) if not result.has_errors(): person_resource.import_data(dataset, dry_run=False) # Actually import now return redirect('updates') else: return render(request, 'updates/import.html') else: return render(request, 'updates/import.html')
def import_controls(request): if request.method == 'POST': file_format = request.POST['file-format'] control_resource = ControlResource() dataset = Dataset() new_controls = request.FILES['importData'] #Allowed to import data as CSV or JSON if file_format == 'CSV': #required to load the data on imported_data = dataset.load(new_controls.read().decode('utf-8'), format='csv') result = control_resource.import_data(dataset, dry_run=True) elif file_format == 'JSON': imported_data = dataset.load(new_controls.read().decode('utf-8'), format='json') # Testing data import result = control_resource.import_data(dataset, dry_run=True) if not result.has_errors(): # Import now control_resource.import_data(dataset, dry_run=False) return render(request, 'import.html')
def edu_org_upload(request): if request.method == 'POST': dataset = Dataset() new_orgs = request.FILES['myfile'] imported_data = dataset.load(new_orgs.read(), format='xlsx') org_type = None for data in imported_data: if data[1] is not None: if data[4] is not None and data[4] == 1: org_type = 'school' elif data[4] is not None and data[4] == 2: org_type = 'college' elif data[4] is not None and data[4] == 3: org_type = 'lyceum' elif data[4] is not None and data[4] == 2: org_type = 'university' value = EduOrganisation(data[0], data[1], data[2], data[3], data[4]) value.save() return redirect('organisations-list') return render(request, 'edu_organisation/eduorganisations_upload.html')
def import_guest_save(request, event_id): if request.method == 'POST': excel_first_column = request.POST.get('select-headers') person_resource = GuestResource() dataset = Dataset() add_fk_to_imported_data = Dataset() new_guests = request.FILES['myfile'] imported_data = dataset.load(new_guests.read()) add_fk_to_imported_data.headers = ('event_id', 'guest_name') for c in imported_data[excel_first_column]: add_fk_to_imported_data.append((event_id, c)) person_resource.import_data(add_fk_to_imported_data, dry_run=False) # Actually import now return redirect(reverse('guests', args=(event_id, )))
def send_report(): datestamp = datetime.datetime.now().strftime('%d%b%Y') report_file = DATA_DIR + 'GA360-%s.csv' % datestamp table = Dataset().load(open(report_file, 'rt').read()).export('df').to_html() send_email(to=models.Variable.get('QUARTERLY_EMAIL_RECIPIENT', '*****@*****.**'), cc=models.Variable.get('ANALYTICS_TEAM_EMAILS', []), subject='%s Automated Quarterly GA360 report [DO NOT RESPOND]' % datestamp, html_content=table, files=[report_file])
def uploadData(request): if request.method == 'POST': # data_resource = AccidentResource() data_resource = resources.modelresource_factory( model=models.Accident)() dataset = Dataset() new_data = request.FILES['importData'] imported_data = dataset.load(new_data.read().decode('utf-8'), format='csv') result = data_resource.import_data(dataset, dry_run=True) # Testing data import if not result.has_errors(): data_resource.import_data(dataset, dry_run=False) # Actually import now wilayaform = wilaya() data = Accident.objects.all().values() total = len(data) context = { 'data': data, 'wilayaform': wilayaform, 'total': total, } return render(request, 'home/bdd.html', context)
def upload_file(request): if request.method == 'POST': brand_resource = BrandResource() new_brands = request.FILES['myfile'] print(f"file: {new_brands}") imported_data = Dataset().load(new_brands.read().decode('UTF-8'), format='csv') print(f"data: {imported_data}") result = brand_resource.import_data(imported_data, dry_run=True) if not result.has_errors(): brand_resource.import_data(imported_data, dry_run=False) return render(request, 'pages/upload_file.html')
def users_import(request): context = {} if request.method == 'POST': user_resource = UserResource() data_set = Dataset() new_users = request.FILES['myfile'] import_data = data_set.load(new_users.read(), format='csv') result = user_resource.import_data(data_set, dry_run=True) if not result.has_errors(): user_resource.import_data(data_set, dry_run=False) else: errors = dict(result.row_errors()) context.update({'errors': errors}) return render(request, 'control/import-user.html', context=context)
def simple_upload(request): if request.method == 'POST': person_resource = PersonResource() dataset = Dataset() new_persons = request.FILES['myfile'] imported_data = dataset.load(new_persons.read()) for i in imported_data: if len(str(i[2])) == 10 and "@" in i[1]: name = i[0] email_id = i[1] phone_number = i[2] start_date = radar.random_datetime( start=datetime.date(day=1, month=1, year=1975), stop=datetime.date(day=1, month=3, year=2015)) age = datetime.datetime.today().year - start_date.year data = Person(name=name, email_id=email_id, phone_number=phone_number, age=age) data.save() return render(request, 'simple_upload.html', {}) else: return render(request, 'simple_upload.html', {})
def vader_analyse(file_input): """Labels the dataset with vader sentiment tool""" sentences = getdata_from_db(1000) print("Working on %d tweets" % (len(sentences))) headers = ('text', 'label', 'score') analyzed_data = [] sid = SentimentIntensityAnalyzer() for line in sentences: text = pre.clean(line) scores = sid.polarity_scores(text) analyzed_data.append((text, getlabel(scores), scores['compound'])) save_data_to_db(analyzed_data) analyzed = Dataset(*analyzed_data, headers=headers) return analyzed
def insert_stf_sheet(request): context={} if request.method == 'POST': stf_resource = FacultyResources() dataset = Dataset() new_sheet = request.FILES['mysheet'] if not new_sheet.name.endswith('xlsx'): context['message']="File must be in excel formate only..." return render(request,'add_stf_multi.html',context) import_data = dataset.load(new_sheet.read(),format='xlsx') for data in import_data: value = FacultyData( data[0], data[1], data[2], data[3], data[4], data[5], ) value.save() return redirect('/manage_faculty/')