Esempio n. 1
0
def dump_program_reports():
    print('Running program report dump...')
    dataset = Dataset()
    dataset.headers = ['Site ID', 'Mobile', 'Timestamp', 'Group', 'Program', 'Period code', 'Period number', 'Atot', 'Arel', 'Tin', 'Tout', 'Dead', 'DefT', 'Dcur', 'Dmed']

    for report in ProgramReport.objects.select_related('group', 'program').order_by('created'):
        if not report.reporter.mobile.startswith('+'):
            continue

        dataset.append([
            report.site.hcid,
            report.reporter.mobile,
            timegm(report.created.utctimetuple()),
            report.group.code,
            report.program.code,
            report.period_code,
            report.period_number,
            report.new_marasmic_patients,
            report.readmitted_patients,
            report.patients_transferred_in,
            report.patients_transferred_out,
            report.patient_deaths,
            report.unconfirmed_patient_defaults,
            report.patients_cured,
            report.unresponsive_patients
        ])

    with open('program_reports.csv', 'w') as f:
        f.write(dataset.csv)

    print('Done')
Esempio n. 2
0
def export(request):
    contacts_resource = ExportResource()
    dataset = contacts_resource.export()

    #convert Dataset to List
    my_list = []
    for i in dataset:
        my_list.append(list(i))

    #Replace blank to the ID of the user
    count = 0
    for x in my_list:
        my_list[count][0] = ''

        my_list[count][1] = request.user
        count = count + 1

    #creating new dataset then add Headers
    my_data = Dataset()
    my_data.headers = ([
        'id', 'created_by', 'first_name', 'last_name', 'contact_number',
        'address'
    ])

    for x in my_list:
        my_data.append(x)

    response = HttpResponse(my_data.csv, content_type='text/csv')
    response['Content-Disposition'] = 'attachment; filename="contacts.csv"'
    return response
Esempio n. 3
0
def member_query(db):
    node_id = request.params.get('node_id')
    realname = request.params.get('realname')
    idcard = request.params.get('idcard')
    mobile = request.params.get('mobile')
    _query = db.query(
        models.SlcMember,
        models.SlcNode.node_name
    ).filter(
        models.SlcNode.id == models.SlcMember.node_id
    )
    if idcard:
        _query = _query.filter(models.SlcMember.idcard==idcard)
    if mobile:
        _query = _query.filter(models.SlcMember.mobile==mobile)
    if node_id:
        _query = _query.filter(models.SlcMember.node_id == node_id)
    if realname:
        _query = _query.filter(models.SlcMember.realname.like('%'+realname+'%'))

    if request.path == '/member':
        return render("bus_member_list", page_data = get_page_data(_query),
                       node_list=db.query(models.SlcNode),**request.params)
    elif request.path == "/member/export":
        data = Dataset()
        data.append((u'区域',u'姓名',u'用户名',u'证件号',u'邮箱', u'联系电话', u'地址', u'创建时间'))
        for i,_node_name in _query:
            data.append((
                _node_name, i.realname, i.member_name,i.idcard,
                i.email,i.mobile, i.address,i.create_time
            ))
        name = u"RADIUS-MEMBER-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls"
        with open(u'./static/xls/%s' % name, 'wb') as f:
            f.write(data.xls)
        return static_file(name, root='./static/xls',download=True)
Esempio n. 4
0
    def exportDivSchedulesRefFormat(self, startgameday, prefix=""):
        headers = ['Game#', 'Game#', 'Tourn Match#','Date', 'Day', 'Time', 'Division', 'Round', 'Home', 'Visitor', 'Field', 'cr_trust', 'ar_trust', 'm_trust']
        datasheet = Dataset(title=prefix)
        datasheet.headers = list(headers)

        schedule_list = self.dbinterface.findDivisionSchedulePHMSARefFormat(startgameday)
        tabformat_list = [(_offset+x[match_id_CONST], x[match_id_CONST], tournMapGamedayIdToCalendar(x[gameday_id_CONST]), tournMapGamedayIdToDate(x[gameday_id_CONST]),
            datetime.strptime(x[start_time_CONST],"%H:%M").strftime("%I:%M %p"),
            x[age_CONST]+x[gen_CONST], x[round_CONST],
            x[home_CONST], x[away_CONST],
            self.fieldinfo[self.findexerGet(x[venue_CONST])]['name'],
            _reftrust_level[_rindexerGet(getTournDivID(x[age_CONST], x[gen_CONST]))]['cr'],
            _reftrust_level[_rindexerGet(getTournDivID(x[age_CONST], x[gen_CONST]))]['ar'],
            _reftrust_level[_rindexerGet(getTournDivID(x[age_CONST], x[gen_CONST]))]['ment'])
            for x in schedule_list] if prefix else [(mapGamedayIdToCalendar(x[gameday_id_CONST],format=1), 'Saturday',
                datetime.strptime(x[start_time_CONST],"%H:%M").strftime("%I:%M %p"),
                x[age_CONST]+x[gen_CONST],
                x[home_CONST], x[away_CONST],
                self.fieldinfo[self.findexerGet(x[venue_CONST])]['name'])
                for x in schedule_list]
        if prefix:
            atabformat_list = [(_offset+i, j[0], j[1], j[2], j[3], j[4], j[5], j[6], j[7], j[8], j[9], j[10], j[11], j[12]) for i,j in enumerate(tabformat_list)]
        else:
            atabformat_list = tabformat_list
        for tabformat in atabformat_list:
            datasheet.append(tabformat)
        sheet_xls_relpath = prefix+'_RefFormat.xls'
        sheet_xls_abspath = os.path.join('/home/henry/workspace/datagraph/bottle_baseball/download/xls',
                                         sheet_xls_relpath)
        with open(sheet_xls_abspath,'wb') as f:
            f.write(datasheet.xls)
        f.close()
Esempio n. 5
0
 def generate_divxls(self, genxls_id):
     headers = [
         'Game Date', 'Day', 'Time', 'Division', 'Home', 'Visitor', 'Venue'
     ]
     datasheet_list = list()
     for divinfo in self.divinfo_list:
         div_id = divinfo[genxls_id]
         div_age = divinfo['div_age']
         div_gen = divinfo['div_gen']
         div_str = div_age + div_gen
         datasheet = Dataset(title=div_str)
         datasheet.headers = list(headers)
         match_list = self.sdbinterface.get_schedule(genxls_id,
                                                     div_age=div_age,
                                                     div_gen=div_gen)
         # note conversions for time from 24-hour to am/pm format
         tabformat_list = [
             (x['game_date'], parser.parse(x['game_date']).strftime("%a"),
              datetime.strptime(x['start_time'],
                                "%H:%M").strftime("%I:%M%p"), div_str,
              y['home'], y['away'], self.fieldinfo_list[self.findexerGet(
                  y['venue'])]['field_name']) for x in match_list
             for y in x['gameday_data']
         ]
         for tabformat in tabformat_list:
             datasheet.append(tabformat)
         datasheet_list.append(datasheet)
     book = Databook(datasheet_list)
     bookname_xls_relpath = self.schedcol_name + "_byDiv.xls"
     bookname_xls_fullpath = os.path.join(self.dir_path,
                                          bookname_xls_relpath)
     with open(bookname_xls_fullpath, 'wb') as f:
         f.write(book.xls)
     f.close()
     return [{'path': bookname_xls_relpath}]
Esempio n. 6
0
 def generate_fieldxls(self):
     headers = [
         'Game Date', 'Day', 'Time', 'Division', 'Home', 'Visitor', 'Venue'
     ]
     datasheet_list = list()
     for fieldinfo in self.fieldinfo_list:
         field_name = fieldinfo['field_name']
         field_id = fieldinfo['field_id']
         datasheet = Dataset(title=field_name)
         datasheet.headers = list(headers)
         match_list = self.sdbinterface.get_schedule('field_id',
                                                     field_id=field_id)
         tabformat_list = [
             (x['game_date'], parser.parse(x['game_date']).strftime("%a"),
              datetime.strptime(x['start_time'],
                                "%H:%M").strftime("%I:%M%p"),
              x['div_age'] + x['div_gen'], x['home'], x['away'], field_name)
             for x in match_list
         ]
         for tabformat in tabformat_list:
             datasheet.append(tabformat)
         datasheet_list.append(datasheet)
     book = Databook(datasheet_list)
     bookname_xls_relpath = self.schedcol_name + "_byField.xls"
     bookname_xls_fullpath = os.path.join(self.dir_path,
                                          bookname_xls_relpath)
     with open(bookname_xls_fullpath, 'wb') as f:
         f.write(book.xls)
     f.close()
     return [{'path': bookname_xls_relpath}]
Esempio n. 7
0
def emergency(current_user):
    # prepare user info
    user_data = user_schema.dump(
        User.query.filter_by(id=current_user.id).first())
    User.query.filter_by(id=current_user.id).first().set_critical_state()
    data = Dataset()
    data.headers = [
        'First Name', 'Last Name', 'Email', 'Address', 'State', 'Age',
        'Travel History', 'Telephone'
    ]
    for i in [(user_data['first_name'], user_data['last_name'],
               user_data['email'], user_data['address'], user_data['state'],
               user_data['age'], user_data['travel_history'], user_data['tel'])
              ]:
        data.append(i)
    with open(f'{os.getcwd()}/user_dat.xlsx', 'wb') as file:
        print(file.name)
        file.write(data.export('xlsx'))
        # actually send the message
        try:
            result = EmergencyMail("Emergency Report!",
                                   render_template('Emergency.html'),
                                   file.name)
            if result:
                return jsonify({'Sent Email': True}), 200
            else:
                return jsonify({'Email not sent': True}), 500
        except Exception as e:
            raise e
            return jsonify({'Sent Email': False}), 500
        file.close()
    def build_csv_from_data(self, form_data, response_data):
        id_to_label = self.get_header_maps(form_data)
        headers = ["id"]
        for c in id_to_label.values():
            headers.append(re.sub("[\,\n\r]+", "", c))

        data = Dataset(headers=headers)
        for response_info in response_data:
            response = response_info.get("responses", {})
            row_id = response_info["id"]
            row = [row_id]
            for pk in response.keys():
                value = response.get(pk)
                if isinstance(value, str) or not value:
                    row.append(value)
                # attachment
                elif isinstance(value, list) and value[0].get("filename"):
                    links = " ".join(
                        [self.attachment_link(rec) for rec in value])
                    row.append(links)
                elif value.get("checked"):
                    row.append(", ".join(value.get("checked")))
                elif value.get("other_text"):
                    row.append(value.get("other_text"))
                else:
                    logger.error("Unhandled value type: %s (%s)." %
                                 (value, type(value)))
                    logger.error("Response data structure: %s" % (response))
                    row.append(None)
            data.append(row)
        return data.export("csv")
Esempio n. 9
0
def get_dataset(ufile):
    #stream = ufile.read().decode('utf-8')
    stream = ufile.read().decode('ISO-8859-1')
    #print('stream',stream)
    lines = stream.split('\r\n')
    logger.info('%d lines found in imported file %s', len(lines), ufile)
    #print('lines',lines)
    hdr = None
    val = []
    for x in range(len(lines)):
        if x == 0:
            hdr = lines[x].split(',')
        else:
            val.append(lines[x].split(','))
    #print('hdr',hdr)
    #val = val[:-1]
    #print('val',val)

    dataset = Dataset(headers=hdr)
    for i in range(len(val)):
        try:
            dataset.append(val[i])
        except:
            logger.warning('Ingnoring row %d due to errors', i + 1)

    logger.debug('dataset.csv : %s', dataset.csv)
    return dataset
Esempio n. 10
0
    def render_to_response(self, context, **response_kwargs):
        """If exporting, generate a csv."""
        if 'export' in self.request.GET:
            data = Dataset()
            data.headers = (
                u'Name', u'Email', u'Phone', u'Zip', u'State', u'Joined',
                u'Last login', u'Total Groups Joined',
                u'Flags received', u'Messages sent', u'Staff?', u'Superuser?',
                u'Banned?', u'Visits'
            )

            for user in self.get_queryset():
                data.append((
                    user.get_real_name(), user.email, user.phone, user.zip_code,
                    user.state, user.date_joined, user.last_login,
                    user.total_groups_joined, user.flags_received,
                    user.messages_sent, user.is_staff, user.is_superuser,
                    user.is_banned, user.visit_count
                ))

            response = HttpResponse(
                data.csv,
                content_type='text/csv'
            )
            response['Content-Disposition'] = 'attachment; filename=users.csv'
            return response
        else:
            return super(UserReportListView, self).render_to_response(
                context, **response_kwargs)
Esempio n. 11
0
 def test_file(self):
     # Simulate pyreports.io.File object
     file = MagicMock()
     file.raw_data = ['Matteo\n', 'Guadrini\n', '35']
     data = Dataset()
     for line in file.raw_data:
         data.append([line])
     # Read file data
     file.read = MagicMock(return_value=data)
     lines = file.read()
     self.assertIsInstance(lines, Dataset)
     # Write file data
     read_data = ''.join(file.raw_data)
     with open(f'{tmp_folder}/test_file.txt', 'w') as wf:
         wf.write(read_data)
     with patch('__main__.open', mock_open(read_data=read_data)):
         with open(f'{tmp_folder}/test_file.txt') as rf:
             result = rf.read()
     self.assertEqual(read_data, result)
     # Real pyreports.io.File object
     file_real = pyreports.io.TextFile(f'{tmp_folder}/test_file.txt')
     real_data = file_real.read()
     self.assertIsInstance(real_data, Dataset)
     file_real.write(real_data)
     self.assertEqual(file_real.read()[0][0], 'Matteo')
Esempio n. 12
0
 def exportDivSchedules(self, startgameday, prefix=""):
     headers = ['Match ID', 'Gameday#', 'Game Date', 'Day', 'Time', 'Division', 'Home', 'Away', 'Field', '', 'Comment']
     datasheet_list = []
     for division in self.leaguedivinfo:
         div_id = division['div_id']
         div_age = division['div_age']
         div_gen = division['div_gen']
         div_str =  div_age + div_gen
         datasheet = Dataset(title=div_str)
         datasheet.headers = list(headers)
         divdata_list = self.dbinterface.findElimTournDivisionSchedule(div_age, div_gen, min_game_id=startgameday)
         tabformat_list = [(y[match_id_CONST], x[gameday_id_CONST], tournMapGamedayIdToCalendar(x[gameday_id_CONST]), tournMapGamedayIdToDate(x[gameday_id_CONST]), datetime.strptime(x[start_time_CONST],"%H:%M").strftime("%I:%M %p"), div_str, y[home_CONST], y[away_CONST], self.fieldinfo[self.findexerGet(y[venue_CONST])]['name'], '', y[comment_CONST]) for x in divdata_list for y in x[gameday_data_CONST]]
         for tabformat in tabformat_list:
             datasheet.append(tabformat)
         datasheet.append_separator("Prefix Legend: 'S'-Seeded Team#, 'W'-Winning Team (See Match ID), 'L'-Losing Team)")
         datasheet_list.append(datasheet)
     book = Databook(datasheet_list)
     cdir = os.path.dirname(__file__)
     bookname_xls = prefix+'.xls'
     bookname_html = prefix+'.html'
     booknamefull_xls = os.path.join('/home/henry/workspace/datagraph/bottle_baseball/download/xls', bookname_xls)
     booknamefull_html = os.path.join('~/workspace/datagraph/bottle_baseball/download/html', bookname_html)
     with open(booknamefull_xls,'wb') as f:
         f.write(book.xls)
     f.close()
Esempio n. 13
0
    def import_data(self,
                    dataset,
                    dry_run=False,
                    raise_errors=False,
                    use_transactions=None,
                    collect_failed_rows=False,
                    **kwargs):
        headers = []
        for header in dataset.headers:
            if header in self.__header_map:
                headers.append(self.__header_map[header])
            else:
                headers.append(header)

        _dataset = Dataset(headers=headers)
        for row in dataset:
            _dataset.append(row)
        dataset = _dataset
        return super(self.__class__,
                     self).import_data(dataset=dataset,
                                       dry_run=dry_run,
                                       raise_errors=raise_errors,
                                       use_transactions=use_transactions,
                                       collect_failed_rows=collect_failed_rows,
                                       **kwargs)
Esempio n. 14
0
 def exportDivTeamSchedules(self, div_id, age, gen, numteams, prefix=""):
     headers = ['Gameday#', 'Game Date', 'Day', 'Start Time', 'Venue', 'Home Team', 'Away Team']
     datasheet_list = []
     for team_id in range(1, numteams+1):
         team_str = age+gen+str(team_id)
         datasheet = Dataset(title=team_str)
         datasheet.headers = list(headers)
         teamdata_list = self.dbinterface.findTeamSchedule(age, gen, team_id)
         tabformat_list = [(x[gameday_id_CONST], tournMapGamedayIdToCalendar(x[gameday_id_CONST]), tournMapGamedayIdToDate(x[gameday_id_CONST]),
                            datetime.strptime(x[start_time_CONST],"%H:%M").strftime("%I:%M %p"),
                            self.fieldinfo[self.findexerGet(x[venue_CONST])]['name'],
                            x[home_CONST], x[away_CONST]) for x in teamdata_list]
         for tabformat in tabformat_list:
             datasheet.append(tabformat)
         datasheet_list.append(datasheet)
     book = Databook(datasheet_list)
     cdir = os.path.dirname(__file__)
     bookname_xls = prefix+age + gen +'_schedule.xls'
     bookname_html = prefix+age + gen +'_schedule.html'
     booknamefull_xls = os.path.join('/home/henry/workspace/datagraph/bottle_baseball/download/xls', bookname_xls)
     booknamefull_html = os.path.join('~/workspace/datagraph/bottle_baseball/download/html', bookname_html)
     with open(booknamefull_xls,'wb') as f:
         f.write(book.xls)
     f.close()
     '''
Esempio n. 15
0
def member_query(db):
    node_id = request.params.get('node_id')
    realname = request.params.get('realname')
    idcard = request.params.get('idcard')
    mobile = request.params.get('mobile')
    _query = db.query(models.SlcMember, models.SlcNode.node_name).filter(
        models.SlcNode.id == models.SlcMember.node_id)
    if idcard:
        _query = _query.filter(models.SlcMember.idcard == idcard)
    if mobile:
        _query = _query.filter(models.SlcMember.mobile == mobile)
    if node_id:
        _query = _query.filter(models.SlcMember.node_id == node_id)
    if realname:
        _query = _query.filter(
            models.SlcMember.realname.like('%' + realname + '%'))

    if request.path == '/member':
        return render("bus_member_list",
                      page_data=get_page_data(_query),
                      node_list=db.query(models.SlcNode),
                      **request.params)
    elif request.path == "/member/export":
        data = Dataset()
        data.append(
            (u'区域', u'姓名', u'用户名', u'证件号', u'邮箱', u'联系电话', u'地址', u'创建时间'))
        for i, _node_name in _query:
            data.append((_node_name, i.realname, i.member_name, i.idcard,
                         i.email, i.mobile, i.address, i.create_time))
        name = u"RADIUS-MEMBER-" + datetime.datetime.now().strftime(
            "%Y%m%d-%H%M%S") + ".xls"
        with open(u'./static/xls/%s' % name, 'wb') as f:
            f.write(data.xls)
        return static_file(name, root='./static/xls', download=True)
Esempio n. 16
0
    def render_to_response(self, context, **response_kwargs):
        """If exporting, generate a csv."""
        if 'export' in self.request.GET:
            data = Dataset()
            data.headers = ('Name', 'Messages', 'Threads', 'Replies',
                            'Posters', 'Flagged messages', 'Category', 'Tags',
                            'State', 'Members', 'Admins', 'Private',
                            'Published', 'Moderated', 'Featured',
                            'Member list published', 'Created', 'Created By',
                            'Photos', 'Photo clicks', 'Links', 'Link clicks')

            for group in self.get_queryset():
                data.append(
                    (group.group.name, group.message_count, group.thread_count,
                     group.reply_count, group.posters,
                     group.flagged, group.category.name,
                     groups_tags_string([group]), group.state,
                     group.member_count, group.owner_count, group.private,
                     group.published, group.moderated, group.featured,
                     group.member_list_published, group.created_at,
                     group.created_by, group.image_count, group.image_clicks
                     or 0, group.link_count, group.link_clicks or 0))

            response = HttpResponse(data.csv, content_type='text/csv')
            response['Content-Disposition'] = 'attachment; filename=groups.csv'
            return response
        else:
            return super(GroupReportListView,
                         self).render_to_response(context, **response_kwargs)
Esempio n. 17
0
    def export(self, queryset=None, task_meta=None):
        if queryset is None:
            queryset = self.get_queryset()
        headers = self.get_export_headers()
        data = Dataset(headers=headers)

        if isinstance(queryset, QuerySet):
            # Iterate without the queryset cache, to avoid wasting memory when
            # exporting large datasets.
            iterable = queryset.iterator()
        else:
            iterable = queryset

        if task_meta is not None:  # initialize the total amount accross multiple resources
            self.num_done = task_meta['done']

        for obj in iterable:
            data.append(self.export_resource(obj))

            if task_meta is not None:
                self._update_task_state(task_meta)

            logger.debug('Num done: %d' % self.num_done)

        return data
Esempio n. 18
0
    def render_to_response(self, context, **response_kwargs):
        """If exporting, generate a csv."""
        if 'export' in self.request.GET:
            data = Dataset()
            data.headers = (
                u'Name', u'Email', u'Phone', u'Zip', u'State', u'Joined',
                u'Last login', u'Total Groups Joined',
                u'Flags received', u'Messages sent', u'Staff?', u'Superuser?',
                u'Banned?', u'Visits'
            )

            for user in self.get_queryset():
                data.append((
                    user, user.email, user.phone, user.zip_code, user.state,
                    user.date_joined, user.last_login,
                    user.total_groups_joined,
                    user.flags_received, user.messages_sent,
                    user.is_staff, user.is_superuser, user.is_banned,
                    user.visit_count
                ))

            response = HttpResponse(
                data.csv,
                content_type='text/csv'
            )
            response['Content-Disposition'] = 'attachment; filename=users.csv'
            return response
        else:
            return super(UserReportListView, self).render_to_response(
                context, **response_kwargs)
Esempio n. 19
0
    def render_to_response(self, context, **response_kwargs):
        """If exporting, generate a csv."""
        if 'export' in self.request.GET:
            data = Dataset()
            data.headers = (
                'Name', 'Messages', 'Threads', 'Replies', 'Posters',
                'Category', 'Tags', 'State', 'Members', 'Admins', 'Private',
                'Published', 'Moderated', 'Featured', 'Member list published',
                'Created', 'Created By'
            )

            for group in self.get_queryset():
                data.append((
                    group.group.name, group.message_count, group.thread_count,
                    group.reply_count, group.posters,
                    group.category.name, groups_tags_string([group]),
                    group.state, group.member_count, group.owner_count,
                    group.private, group.published, group.moderated,
                    group.featured, group.member_list_published,
                    group.created_at, group.created_by
                ))

            response = HttpResponse(
                data.csv,
                content_type='text/csv'
            )
            response['Content-Disposition'] = 'attachment; filename=groups.csv'
            return response
        else:
            return super(GroupReportListView, self).render_to_response(
                context, **response_kwargs)
Esempio n. 20
0
 def generate_elimdivxls(self, genxls_id):
     headers = ['Match ID', 'Game Date', 'Day', 'Time', 'Division', 'Home',
         'Visitor', 'Venue', 'Round', 'Comment']
     datasheet_list = list()
     for divinfo in self.divinfo_list:
         div_id = divinfo[genxls_id]
         div_age = divinfo['div_age']
         div_gen = divinfo['div_gen']
         div_str = div_age + div_gen
         datasheet = Dataset(title=div_str)
         datasheet.headers = list(headers)
         match_list = self.sdbinterface.get_schedule(genxls_id, div_age=div_age,
             div_gen=div_gen, elim_flag=True)
         # note conversions for time from 24-hour to am/pm format
         tabformat_list = [(y['match_id'], x['game_date'],
             parser.parse(x['game_date']).strftime("%a"),
             datetime.strptime(x['start_time'], "%H:%M").strftime("%I:%M%p"),
             div_str, self.team_map(div_id, y['home']), self.team_map(div_id, y['away']),
             self.fieldinfo_list[self.findexerGet(y['venue'])]['field_name'],
             y['around'], y['comment']) for x in match_list for y in x['gameday_data']]
         for tabformat in tabformat_list:
             datasheet.append(tabformat)
         datasheet_list.append(datasheet)
     book = Databook(datasheet_list)
     bookname_xls_relpath = self.schedcol_name + "_byDivision.xls"
     bookname_xls_fullpath = os.path.join(self.dir_path, bookname_xls_relpath)
     with open(bookname_xls_fullpath,'wb') as f:
         f.write(book.xls)
     f.close()
     return [{'path':bookname_xls_relpath}]
Esempio n. 21
0
def display_table(
    records: Sequence[Any],
    headers: Sequence[str],
    attrs: Sequence[str],
    tablefmt: str = "fancy_grid",
) -> None:
    """

    Args:
      records: Sequence[Any]:
      headers: Sequence[str]:
      attrs: Sequence[str]:
      tablefmt: str:  (Default value = "fancy_grid")

    Returns:

    """
    if len(records) == 0:
        display_error("No results found")
    else:
        data = Dataset()
        data.headers = headers
        for record in records:
            data.append([getattr(record, a) for a in attrs])

        click.echo(data.export("cli", tablefmt=tablefmt))
Esempio n. 22
0
def csv_output_computers():
    """Utility function to generate a CSV output of computers information from
    Incredibus data, for audit/cleansing purposes.
    """
    computers = Computer.objects.all()
    d = Dataset()
    d.headers = [
        'ID', 'HOSTNAME', 'CHASSIS', 'PROBABLE OWNER EMAIL',
        'PROBABLE OWNER CC', 'MANAGED BY EMAIL', 'ASSET NO.', 'SERIAL NO.'
    ]

    for i in computers:
        row = [i.pk, i.sam_account_name[:-1], i.chassis]
        if i.probable_owner:
            row += [
                i.probable_owner.email.lower(), i.probable_owner.cost_centre
            ]
        else:
            row += ['', '']
        if i.managed_by:
            row += [i.managed_by.email.lower()]
        else:
            row += ['']
        row += ['', i.serial_number]
        d.append(row)

    f = open('/tmp/computers.csv', 'w')
    f.write(d.csv)
    f.close()
Esempio n. 23
0
 def generate_fieldxls(self):
     headers = ['Game Date', 'Day', 'Time', 'Division', 'Home',
     'Visitor', 'Venue']
     datasheet_list = list()
     for fieldinfo in self.fieldinfo_list:
         field_name = fieldinfo['field_name']
         field_id = fieldinfo['field_id']
         datasheet = Dataset(title=field_name)
         datasheet.headers = list(headers)
         match_list = self.sdbinterface.get_schedule('field_id',
             field_id=field_id)
         tabformat_list = [(x['game_date'],
             parser.parse(x['game_date']).strftime("%a"),
             datetime.strptime(x['start_time'], "%H:%M").strftime("%I:%M%p"),
             x['div_age']+x['div_gen'], x['home'], x['away'], field_name)
             for x in match_list]
         for tabformat in tabformat_list:
             datasheet.append(tabformat)
         datasheet_list.append(datasheet)
     book = Databook(datasheet_list)
     bookname_xls_relpath = self.schedcol_name + "_byField.xls"
     bookname_xls_fullpath = os.path.join(self.dir_path, bookname_xls_relpath)
     with open(bookname_xls_fullpath,'wb') as f:
         f.write(book.xls)
     f.close()
     return [{'path':bookname_xls_relpath}]
Esempio n. 24
0
 def exportTeamSchedules(self, div_id, age, gen, numteams, prefix=""):
     headers = [
         'Gameday#', 'Game Date', 'Start Time', 'Venue', 'Home Team',
         'Away Team'
     ]
     cdir = os.path.dirname(__file__)
     for team_id in range(1, numteams + 1):
         team_str = age + gen + str(team_id)
         datasheet = Dataset(title=team_str)
         datasheet.headers = list(headers)
         teamdata_list = self.dbinterface.findTeamSchedule(
             age, gen, team_id)
         tabformat_list = [
             (x[gameday_id_CONST],
              mapGamedayIdToCalendar(x[gameday_id_CONST]),
              datetime.strptime(x[start_time_CONST],
                                "%H:%M").strftime("%I:%M %p"),
              self.fieldinfo[self.findexerGet(x[venue_CONST])]['name'],
              x[home_CONST], x[away_CONST]) for x in teamdata_list
         ]
         for tabformat in tabformat_list:
             datasheet.append(tabformat)
         if team_id < 10:
             team_id_str = '0' + str(team_id)
         else:
             team_id_str = str(team_id)
         sheet_xls_relpath = prefix + age + gen + team_id_str + '_schedule.xls'
         sheet_xls_abspath = os.path.join(
             '/home/henry/workspace/datagraph/bottle_baseball/download/xls',
             sheet_xls_relpath)
         with open(sheet_xls_abspath, 'wb') as f:
             f.write(datasheet.xls)
         f.close()
Esempio n. 25
0
def csv_output_computers():
    """Utility function to generate a CSV output of computers information from
    Incredibus data, for audit/cleansing purposes.
    """
    computers = Computer.objects.all()
    d = Dataset()
    d.headers = [
        'ID',
        'HOSTNAME',
        'CHASSIS',
        'PROBABLE OWNER EMAIL',
        'PROBABLE OWNER CC',
        'MANAGED BY EMAIL',
        'ASSET NO.',
        'SERIAL NO.'
    ]

    for i in computers:
        row = [i.pk, i.sam_account_name[:-1], i.chassis]
        if i.probable_owner:
            row += [i.probable_owner.email.lower(), i.probable_owner.cost_centre]
        else:
            row += ['', '']
        if i.managed_by:
            row += [i.managed_by.email.lower()]
        else:
            row += ['']
        row += ['', i.serial_number]
        d.append(row)

    f = open('/tmp/computers.csv', 'w')
    f.write(d.csv)
    f.close()
Esempio n. 26
0
    def create(self, request):
        """
            To import CSV file to DB
            URL Structure: /assignment/api/
            Required Fields: myfile
        """

        adult_resource = AdultResource()
        dataset = Dataset()
        myfile = request.FILES['myfile']

        file_data = dataset.load(myfile.read().decode('utf-8'), format='csv')
        data = Dataset()
        data.headers = ('id', 'age', 'work', 'fnlwgt', 'education',
                        'education_num', 'marital_status', 'occupation',
                        'relationship', 'race', 'sex', 'capital_gain',
                        'capital_loss', 'hours_per_week', 'native_country',
                        'salary')

        for i in range(0, len(file_data) - 1):
            l = list(file_data[i])
            l.insert(0, i + 1)
            data.append(tuple(l))

        result = adult_resource.import_data(
            data, dry_run=True)  # Test the data import
        if not result.has_errors():
            adult_resource.import_data(data,
                                       dry_run=False)  # Actually import now
        pass

        return Response('Success')
Esempio n. 27
0
    def export(self, queryset=None, task_meta=None):
        if queryset is None:
            queryset = self.get_queryset()
        headers = self.get_export_headers()
        data = Dataset(headers=headers)

        if isinstance(queryset, QuerySet):
            # Iterate without the queryset cache, to avoid wasting memory when
            # exporting large datasets.
            iterable = queryset.iterator()
        else:
            iterable = queryset

        if task_meta is not None:  # initialize the total amount across multiple resources
            self.num_done = task_meta['done']

        for obj in iterable:
            data.append(self.export_resource(obj))

            if task_meta is not None:
                self._update_task_state(task_meta)

            logger.debug('Num done: %d' % self.num_done)

        return data
Esempio n. 28
0
    def post(self):
        node_id = self.get_argument('node_id',None)
        accept_type = self.get_argument('accept_type',None)
        account_number = self.get_argument('account_number',None)
        operator_name = self.get_argument('operator_name',None)
        query_begin_time = self.get_argument('query_begin_time',None)
        query_end_time = self.get_argument('query_end_time',None)
        opr_nodes = self.get_opr_nodes()
        _query = self.db.query(
            models.TrAcceptLog.id,
            models.TrAcceptLog.accept_type,
            models.TrAcceptLog.accept_time,
            models.TrAcceptLog.accept_desc,
            models.TrAcceptLog.operator_name,
            models.TrAcceptLog.accept_source,
            models.TrAcceptLog.account_number,
            models.TrCustomer.node_id,
            models.TrNode.node_name
        ).filter(
            models.TrAcceptLog.account_number == models.TrAccount.account_number,
            models.TrCustomer.customer_id == models.TrAccount.customer_id,
            models.TrNode.id == models.TrCustomer.node_id
        )
        if operator_name:
            _query = _query.filter(models.TrAcceptLog.operator_name == operator_name)
        if node_id:
            _query = _query.filter(models.TrCustomer.node_id == node_id)
        else:
            _query = _query.filter(models.TrCustomer.node_id.in_([i.id for i in opr_nodes]))
        if account_number:
            _query = _query.filter(models.TrAcceptLog.account_number.like('%' + account_number + '%'))
        if accept_type:
            _query = _query.filter(models.TrAcceptLog.accept_type == accept_type)
        if query_begin_time:
            _query = _query.filter(models.TrAcceptLog.accept_time >= query_begin_time + ' 00:00:00')
        if query_end_time:
            _query = _query.filter(models.TrAcceptLog.accept_time <= query_end_time + ' 23:59:59')
        _query = _query.order_by(models.TrAcceptLog.accept_time.desc())
        type_map = ACCEPT_TYPES

        if self.request.path == '/admin/customer/acceptlog':
            return self.render(
                "acceptlog_list.html",
                page_data=self.get_page_data(_query),
                node_list=opr_nodes,
                type_map=type_map,
                get_orderid=lambda aid: self.db.query(models.TrCustomerOrder.order_id).filter_by(accept_id=aid).scalar(),
                **self.get_params()
            )
        elif self.request.path == '/admin/customer/acceptlog/export':
            data = Dataset()
            data.append((u'区域', u'上网账号', u'受理类型', u'受理时间', u'受理渠道', u'操作员', u'受理描述'))
            for i in _query:
                data.append((
                    i.node_name, i.account_number, type_map.get(i.accept_type),
                    i.accept_time, i.accept_source, i.operator_name, i.accept_desc
                ))
            name = u"RADIUS-ACCEPTLOG-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls"
            return self.export_file(name, data)
Esempio n. 29
0
def acceptlog_query(db,render):
    node_id = request.params.get('node_id')
    accept_type = request.params.get('accept_type')
    account_number = request.params.get('account_number')
    operator_name = request.params.get('operator_name')
    query_begin_time = request.params.get('query_begin_time')
    query_end_time = request.params.get('query_end_time')
    opr_nodes = get_opr_nodes(db)
    _query = db.query(
        models.SlcRadAcceptLog.id,
        models.SlcRadAcceptLog.accept_type,
        models.SlcRadAcceptLog.accept_time,
        models.SlcRadAcceptLog.accept_desc,
        models.SlcRadAcceptLog.operator_name,
        models.SlcRadAcceptLog.accept_source,
        models.SlcRadAcceptLog.account_number,
        models.SlcMember.node_id,
        models.SlcNode.node_name
    ).filter(
        models.SlcRadAcceptLog.account_number == models.SlcRadAccount.account_number,
        models.SlcMember.member_id == models.SlcRadAccount.member_id,
        models.SlcNode.id == models.SlcMember.node_id
    )
    if operator_name:
        _query = _query.filter(models.SlcRadAcceptLog.operator_name == operator_name)
    if node_id:
        _query = _query.filter(models.SlcMember.node_id == node_id)
    else:
        _query = _query.filter(models.SlcMember.node_id.in_([i.id for i in opr_nodes]))
    if account_number:
        _query = _query.filter(models.SlcRadAcceptLog.account_number.like('%' + account_number + '%'))
    if accept_type:
        _query = _query.filter(models.SlcRadAcceptLog.accept_type == accept_type)
    if query_begin_time:
        _query = _query.filter(models.SlcRadAcceptLog.accept_time >= query_begin_time + ' 00:00:00')
    if query_end_time:
        _query = _query.filter(models.SlcRadAcceptLog.accept_time <= query_end_time + ' 23:59:59')
    _query = _query.order_by(models.SlcRadAcceptLog.accept_time.desc())
    type_map = ACCEPT_TYPES

    if request.path == '/':
        return render(
            "bus_acceptlog_list",
            page_data=get_page_data(_query),
            node_list=opr_nodes,
            type_map=type_map,
            get_orderid=lambda aid: db.query(models.SlcMemberOrder.order_id).filter_by(accept_id=aid).scalar(),
            **request.params
        )
    elif request.path == '/export':
        data = Dataset()
        data.append((u'区域', u'上网账号', u'受理类型', u'受理时间', u'受理渠道', u'操作员', u'受理描述'))
        for i in _query:
            data.append((
                i.node_name, i.account_number, type_map.get(i.accept_type),
                i.accept_time, i.accept_source, i.operator_name, i.accept_desc
            ))
        name = u"RADIUS-ACCEPTLOG-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls"
        return export_file(name, data)
Esempio n. 30
0
def order_query(db, render):
    node_id = request.params.get('node_id')
    product_id = request.params.get('product_id')
    pay_status = request.params.get('pay_status')
    account_number = request.params.get('account_number')
    query_begin_time = request.params.get('query_begin_time')
    query_end_time = request.params.get('query_end_time')
    opr_nodes = get_opr_nodes(db)
    _query = db.query(
        models.SlcMemberOrder, models.SlcMember.node_id,
        models.SlcMember.realname, models.SlcRadProduct.product_name,
        models.SlcNode.node_name).filter(
            models.SlcMemberOrder.product_id == models.SlcRadProduct.id,
            models.SlcMemberOrder.member_id == models.SlcMember.member_id,
            models.SlcNode.id == models.SlcMember.node_id)
    if node_id:
        _query = _query.filter(models.SlcMember.node_id == node_id)
    else:
        _query = _query.filter(
            models.SlcMember.node_id.in_([i.id for i in opr_nodes]))
    if account_number:
        _query = _query.filter(
            models.SlcMemberOrder.account_number.like('%' + account_number +
                                                      '%'))
    if product_id:
        _query = _query.filter(models.SlcMemberOrder.product_id == product_id)
    if pay_status:
        _query = _query.filter(models.SlcMemberOrder.pay_status == pay_status)
    if query_begin_time:
        _query = _query.filter(
            models.SlcMemberOrder.create_time >= query_begin_time +
            ' 00:00:00')
    if query_end_time:
        _query = _query.filter(
            models.SlcMemberOrder.create_time <= query_end_time + ' 23:59:59')
    _query = _query.order_by(models.SlcMemberOrder.create_time.desc())

    if request.path == '/':
        return render("bus_order_list",
                      node_list=opr_nodes,
                      products=db.query(
                          models.SlcRadProduct).filter_by(product_status=0),
                      page_data=get_page_data(_query),
                      **request.params)
    elif request.path == '/export':
        data = Dataset()
        data.append((u'区域', u"用户姓名", u'上网账号', u'资费', u"订购时间", u'订单费用', u'实缴费用',
                     u'支付状态', u'订购渠道', u'订单描述'))
        _f2y = utils.fen2yuan
        _fms = utils.fmt_second
        _pst = {0: u'未支付', 1: u'已支付', 2: u'已取消'}
        for i, _, _realname, _product_name, _node_name in _query:
            data.append(
                (_node_name, _realname, i.account_number, _product_name,
                 i.create_time, _f2y(i.order_fee), _f2y(i.actual_fee),
                 _pst.get(i.pay_status), i.order_source, i.order_desc))
        name = u"RADIUS-ORDERS-" + datetime.datetime.now().strftime(
            "%Y%m%d-%H%M%S") + ".xls"
        return export_file(name, data)
Esempio n. 31
0
def query_dataset(cursor, stmt, *args, **kwargs) -> Dataset:
    cursor.execute(stmt, args)
    table_name = kwargs.get("table_name", "")
    dataset = Dataset(title=table_name)
    dataset.headers = (desc[0] for desc in cursor.description)
    for row in cursor:
        dataset.append(row)
    return dataset
Esempio n. 32
0
def compare_webcrawl_ga_with_accounts():
    from tablib import Dataset
    domain_names = Dataset().load(open(DATA_DIR + 'domain_names.csv').read())
    ga_domains = Dataset().load(open(DATA_DIR + 'ga_domains.csv').read())
    ga_billing_subscribers = Dataset().load(
        open(DATA_DIR + 'analytics_usage_201905.csv').read())
    ga_billing_subscriber_codes = [
        subscriber['ID'] for subscriber in ga_billing_subscribers.dict
    ]
    ga_data_subscribers = Dataset().load(
        open(DATA_DIR + 'ga_accounts_views_index.csv').read())
    ga_data_subscribers_codes = [
        subscriber['property_id'] for subscriber in ga_data_subscribers.dict
    ]
    ga_subscriber_codes = list(set().union(ga_billing_subscriber_codes,
                                           ga_data_subscribers_codes))
    agency_hostnames = {domain: agency for domain, agency in domain_names}

    subscriber_agencies = set()
    for domain in ga_domains.dict:
        agency = agency_hostnames.get(
            domain['hostname'],
            agency_hostnames.get(domain['domain_name'], "Unknown Agency"))
        if "GTM" not in domain['ga_code']:
            if domain['ga_code'] in ga_subscriber_codes:
                subscriber_agencies.add(agency)
    non_subscriber_websites = Dataset()
    non_subscriber_websites.headers = ["agency", "hostname", "ga_code"]
    subscriber_websites_not_subscribed = Dataset()
    subscriber_websites_not_subscribed.headers = [
        "agency", "hostname", "ga_code"
    ]
    for domain in ga_domains.dict:
        agency = agency_hostnames.get(
            domain['hostname'],
            agency_hostnames.get(domain['domain_name'], "Unknown Agency"))
        if "GTM" not in domain['ga_code']:
            if domain[
                    'ga_code'] not in ga_subscriber_codes and agency not in subscriber_agencies:
                print("{}: {} has non-subscriber UA code: {}".format(
                    agency, domain['hostname'], domain['ga_code']))
                non_subscriber_websites.append(
                    [agency, domain['hostname'], domain['ga_code']])
            elif domain[
                    'ga_code'] not in ga_subscriber_codes and agency in subscriber_agencies:
                print(
                    "{}: {} has non-subscriber UA code but is a subscribing agency: {}"
                    .format(agency, domain['hostname'], domain['ga_code']))
                subscriber_websites_not_subscribed.append(
                    [agency, domain['hostname'], domain['ga_code']])
    with open(DATA_DIR + '/non_subscriber_websites.csv', 'wt',
              newline='') as f:
        f.write(non_subscriber_websites.csv)
    with open(DATA_DIR + '/subscriber_websites_not_subscribed.csv',
              'wt',
              newline='') as f:
        f.write(subscriber_websites_not_subscribed.csv)
Esempio n. 33
0
def order_query(db, render):
    node_id = request.params.get('node_id')
    product_id = request.params.get('product_id')
    pay_status = request.params.get('pay_status')
    account_number = request.params.get('account_number')
    query_begin_time = request.params.get('query_begin_time')
    query_end_time = request.params.get('query_end_time')
    opr_nodes = get_opr_nodes(db)
    _query = db.query(
        models.SlcMemberOrder,
        models.SlcMember.node_id,
        models.SlcMember.realname,
        models.SlcRadProduct.product_name,
        models.SlcNode.node_name
    ).filter(
        models.SlcMemberOrder.product_id == models.SlcRadProduct.id,
        models.SlcMemberOrder.member_id == models.SlcMember.member_id,
        models.SlcNode.id == models.SlcMember.node_id
    )
    if node_id:
        _query = _query.filter(models.SlcMember.node_id == node_id)
    else:
        _query = _query.filter(models.SlcMember.node_id.in_([i.id for i in opr_nodes]))
    if account_number:
        _query = _query.filter(models.SlcMemberOrder.account_number.like('%' + account_number + '%'))
    if product_id:
        _query = _query.filter(models.SlcMemberOrder.product_id == product_id)
    if pay_status:
        _query = _query.filter(models.SlcMemberOrder.pay_status == pay_status)
    if query_begin_time:
        _query = _query.filter(models.SlcMemberOrder.create_time >= query_begin_time + ' 00:00:00')
    if query_end_time:
        _query = _query.filter(models.SlcMemberOrder.create_time <= query_end_time + ' 23:59:59')
    _query = _query.order_by(models.SlcMemberOrder.create_time.desc())

    if request.path == '/':
        return render("bus_order_list",
                      node_list=opr_nodes,
                      products=db.query(models.SlcRadProduct).filter_by(product_status=0),
                      page_data=get_page_data(_query), **request.params)
    elif request.path == '/export':
        data = Dataset()
        data.append((
            u'区域', u"用户姓名", u'上网账号', u'资费', u"订购时间",
            u'订单费用', u'实缴费用', u'支付状态', u'订购渠道', u'订单描述'
        ))
        _f2y = utils.fen2yuan
        _fms = utils.fmt_second
        _pst = {0: u'未支付', 1: u'已支付', 2: u'已取消'}
        for i, _, _realname, _product_name, _node_name in _query:
            data.append((
                _node_name, _realname, i.account_number, _product_name,
                i.create_time, _f2y(i.order_fee), _f2y(i.actual_fee),
                _pst.get(i.pay_status), i.order_source, i.order_desc
            ))
        name = u"RADIUS-ORDERS-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls"
        return export_file(name, data)
Esempio n. 34
0
    def post(self):
        node_id = self.get_argument('node_id',None)
        product_id = self.get_argument('product_id',None)
        pay_status = self.get_argument('pay_status',None)
        account_number = self.get_argument('account_number',None)
        query_begin_time = self.get_argument('query_begin_time',None)
        query_end_time = self.get_argument('query_end_time',None)
        opr_nodes = self.get_opr_nodes()
        _query = self.db.query(
            models.TrCustomerOrder,
            models.TrCustomer.node_id,
            models.TrCustomer.realname,
            models.TrProduct.product_name,
            models.TrNode.node_name
        ).filter(
            models.TrCustomerOrder.product_id == models.TrProduct.id,
            models.TrCustomerOrder.customer_id == models.TrCustomer.customer_id,
            models.TrNode.id == models.TrCustomer.node_id
        )
        if node_id:
            _query = _query.filter(models.TrCustomer.node_id == node_id)
        else:
            _query = _query.filter(models.TrCustomer.node_id.in_([i.id for i in opr_nodes]))
        if account_number:
            _query = _query.filter(models.TrCustomerOrder.account_number.like('%' + account_number + '%'))
        if product_id:
            _query = _query.filter(models.TrCustomerOrder.product_id == product_id)
        if pay_status:
            _query = _query.filter(models.TrCustomerOrder.pay_status == pay_status)
        if query_begin_time:
            _query = _query.filter(models.TrCustomerOrder.create_time >= query_begin_time + ' 00:00:00')
        if query_end_time:
            _query = _query.filter(models.TrCustomerOrder.create_time <= query_end_time + ' 23:59:59')
        _query = _query.order_by(models.TrCustomerOrder.create_time.desc())

        if self.request.path == '/admin/customer/order':
            return self.render("order_list.html",
                          node_list=opr_nodes,
                          products=self.db.query(models.TrProduct).filter_by(product_status=0),
                          page_data=self.get_page_data(_query), **self.get_params())
        elif self.request.path == '/admin/customer/order/export':
            data = Dataset()
            data.append((
                u'区域', u"用户姓名", u'上网账号', u'资费', u"订购时间",
                u'订单费用', u'实缴费用', u'支付状态', u'订购渠道', u'订单描述'
            ))
            _f2y = utils.fen2yuan
            _fms = utils.fmt_second
            _pst = {0: u'未支付', 1: u'已支付', 2: u'已取消'}
            for i, _, _realname, _product_name, _node_name in _query:
                data.append((
                    _node_name, _realname, i.account_number, _product_name,
                    i.create_time, _f2y(i.order_fee), _f2y(i.actual_fee),
                    _pst.get(i.pay_status), i.order_source, i.order_desc
                ))
            name = u"RADIUS-ORDERS-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls"
            return self.export_file(name, data)
    def test_logentry_creation_with_import_obj_exception(self):
        # from https://mail.python.org/pipermail/python-dev/2008-January/076194.html
        def monkeypatch_method(cls):
            def decorator(func):
                setattr(cls, func.__name__, func)
                return func

            return decorator

        # Cause an exception in import_row, but only after import is confirmed,
        # so a failure only occurs when ImportMixin.process_import is called.
        class R(BookResource):
            def import_obj(self, obj, data, dry_run):
                if dry_run:
                    super().import_obj(obj, data, dry_run)
                else:
                    raise Exception

        @monkeypatch_method(BookAdmin)
        def get_resource_class(self):
            return R

        # Verify that when an exception occurs in import_row, when raise_errors is False,
        # the returned row result has a correct import_type value,
        # so generating log entries does not fail.
        @monkeypatch_method(BookAdmin)
        def process_dataset(self, dataset, confirm_form, request, *args,
                            **kwargs):
            resource = self.get_import_resource_class()(
                **self.get_import_resource_kwargs(request, *args, **kwargs))
            return resource.import_data(
                dataset,
                dry_run=False,
                raise_errors=False,
                file_name=confirm_form.cleaned_data['original_file_name'],
                user=request.user,
                **kwargs)

        dataset = Dataset(headers=["id", "name", "author_email"])
        dataset.append([1, "Test 1", "*****@*****.**"])
        input_format = '0'
        content = dataset.csv
        f = SimpleUploadedFile("data.csv",
                               content.encode(),
                               content_type="text/csv")
        data = {
            "input_format": input_format,
            "import_file": f,
        }
        response = self.client.post('/admin/core/book/import/', data)
        self.assertEqual(response.status_code, 200)
        confirm_form = response.context['confirm_form']
        data = confirm_form.initial
        response = self.client.post('/admin/core/book/process_import/',
                                    data,
                                    follow=True)
        self.assertEqual(response.status_code, 200)
Esempio n. 36
0
def acceptlog_query(db):
    node_id = request.params.get('node_id')
    accept_type = request.params.get('accept_type')
    account_number = request.params.get('account_number')
    operator_name = request.params.get('operator_name')
    query_begin_time = request.params.get('query_begin_time')
    query_end_time = request.params.get('query_end_time')
    _query = db.query(
        models.SlcRadAcceptLog.id,
        models.SlcRadAcceptLog.accept_type,
        models.SlcRadAcceptLog.accept_time,
        models.SlcRadAcceptLog.accept_desc,
        models.SlcRadAcceptLog.operator_name,
        models.SlcRadAcceptLog.accept_source,
        models.SlcRadAcceptLog.account_number,
        models.SlcMember.node_id,
        models.SlcNode.node_name
    ).filter(
            models.SlcRadAcceptLog.account_number == models.SlcRadAccount.account_number,
            models.SlcMember.member_id == models.SlcRadAccount.member_id,
            models.SlcNode.id == models.SlcMember.node_id
    )
    if operator_name:
        _query = _query.filter(models.SlcRadAcceptLog.operator_name == operator_name)
    if node_id:
        _query = _query.filter(models.SlcMember.node_id == node_id)
    if account_number:
        _query = _query.filter(models.SlcRadAcceptLog.account_number.like('%'+account_number+'%'))
    if accept_type:
        _query = _query.filter(models.SlcRadAcceptLog.accept_type == accept_type)
    if query_begin_time:
        _query = _query.filter(models.SlcRadAcceptLog.accept_time >= query_begin_time+' 00:00:00')
    if query_end_time:
        _query = _query.filter(models.SlcRadAcceptLog.accept_time <= query_end_time+' 23:59:59')
    _query = _query.order_by(models.SlcRadAcceptLog.accept_time.desc())
    type_map = {'open':u'开户','pause':u'停机','resume':u'复机','cancel':u'销户','next':u'续费','charge':u'充值'}
    if request.path == '/acceptlog':
        return render(
            "bus_acceptlog_list",
            page_data = get_page_data(_query),
            node_list=db.query(models.SlcNode),
            type_map = type_map,
            get_orderid = lambda aid:db.query(models.SlcMemberOrder.order_id).filter_by(accept_id=aid).scalar(),
            **request.params
        )
    elif request.path == '/acceptlog/export':
        data = Dataset()
        data.append((u'区域',u'上网账号',u'受理类型',u'受理时间',u'受理渠道',u'操作员',u'受理描述'))
        for i in _query:
            data.append((
                i.node_name, i.account_number, type_map.get(i.accept_type),
                i.accept_time,i.accept_source,i.operator_name,i.accept_desc
            ))
        name = u"RADIUS-ACCEPTLOG-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls"
        with open(u'./static/xls/%s' % name, 'wb') as f:
            f.write(data.xls)
        return static_file(name, root='./static/xls',download=True)
Esempio n. 37
0
    def post(self):
        node_id = self.get_argument('node_id', None)
        account_number = self.get_argument('account_number', None)
        query_begin_time = self.get_argument('query_begin_time', None)
        query_end_time = self.get_argument('query_end_time', None)
        opr_nodes = self.get_opr_nodes()
        _query = self.db.query(
            models.TrBilling, models.TrCustomer.node_id,
            models.TrNode.node_name).filter(
                models.TrBilling.account_number ==
                models.TrAccount.account_number,
                models.TrCustomer.customer_id == models.TrAccount.customer_id,
                models.TrNode.id == models.TrCustomer.node_id)
        if node_id:
            _query = _query.filter(models.TrCustomer.node_id == node_id)
        else:
            _query = _query.filter(
                models.TrCustomer.node_id.in_(i.id for i in opr_nodes))
        if account_number:
            _query = _query.filter(
                models.TrBilling.account_number.like('%' + account_number +
                                                     '%'))
        if query_begin_time:
            _query = _query.filter(
                models.TrBilling.create_time >= query_begin_time + ' 00:00:00')
        if query_end_time:
            _query = _query.filter(
                models.TrBilling.create_time <= query_end_time + ' 23:59:59')
        _query = _query.order_by(models.TrBilling.create_time.desc())

        if self.request.path == '/admin/customer/billing':
            return self.render("billing_list.html",
                               node_list=opr_nodes,
                               page_data=self.get_page_data(_query),
                               **self.get_params())

        elif self.request.path == '/admin/customer/billing/export':
            data = Dataset()
            data.append((u'区域', u'上网账号', u'BAS地址', u'会话编号', u'记账开始时间', u'会话时长',
                         u'已扣时长', u"已扣流量", u'应扣费用', u'实扣费用', u'剩余余额', u'剩余时长',
                         u'剩余流量', u'是否扣费', u'扣费时间'))
            _f2y = utils.fen2yuan
            _fms = utils.fmt_second
            _k2m = utils.kb2mb
            _s2h = utils.sec2hour
            for i, _, _node_name in _query:
                data.append(
                    (_node_name, i.account_number, i.nas_addr,
                     i.acct_session_id, i.acct_start_time,
                     _fms(i.acct_session_time), _fms(i.acct_times),
                     _k2m(i.acct_flows), _f2y(i.acct_fee), _f2y(i.actual_fee),
                     _f2y(i.balance), _s2h(i.time_length), _k2m(i.flow_length),
                     (i.is_deduct == 0 and u'否' or u'是'), i.create_time))
            name = u"RADIUS-BILLING-" + datetime.datetime.now().strftime(
                "%Y%m%d-%H%M%S") + ".xls"
            return self.export_file(name, data)
Esempio n. 38
0
 def table_to_dataset(self, table, exclude_columns):
     """Transform a table to a tablib dataset."""
     dataset = Dataset()
     for i, row in enumerate(
             table.as_values(exclude_columns=exclude_columns)):
         if i == 0:
             dataset.headers = row
         else:
             dataset.append(row)
     return dataset
Esempio n. 39
0
 def delete_row(self, n):
     n = n - 2
     if n == -1:
         new_data = Dataset()
         new_data.headers = self.data[0]
         for row in self.data[1:]:
             new_data.append(row)
         self.data = new_data
     else:
         del self.data[n]
Esempio n. 40
0
 def drop(self):
     n = len(self.data) - 1
     if n == -1:
         new_data = Dataset()
         new_data.headers = self.data[0]
         for row in self.data[1:]:
             new_data.append(row)
         self.data = new_data
     else:
         del self.data[n]
Esempio n. 41
0
def billing_query(db, render):
    node_id = request.params.get('node_id')
    account_number = request.params.get('account_number')
    query_begin_time = request.params.get('query_begin_time')
    query_end_time = request.params.get('query_end_time')
    opr_nodes = get_opr_nodes(db)
    _query = db.query(
        models.SlcRadBilling, models.SlcMember.node_id,
        models.SlcNode.node_name).filter(
            models.SlcRadBilling.account_number ==
            models.SlcRadAccount.account_number,
            models.SlcMember.member_id == models.SlcRadAccount.member_id,
            models.SlcNode.id == models.SlcMember.node_id)
    if node_id:
        _query = _query.filter(models.SlcMember.node_id == node_id)
    else:
        _query = _query.filter(
            models.SlcMember.node_id.in_(i.id for i in opr_nodes))
    if account_number:
        _query = _query.filter(
            models.SlcRadBilling.account_number.like('%' + account_number +
                                                     '%'))
    if query_begin_time:
        _query = _query.filter(
            models.SlcRadBilling.create_time >= query_begin_time + ' 00:00:00')
    if query_end_time:
        _query = _query.filter(
            models.SlcRadBilling.create_time <= query_end_time + ' 23:59:59')
    _query = _query.order_by(models.SlcRadBilling.create_time.desc())
    if request.path == '/':
        return render("bus_billing_list",
                      node_list=opr_nodes,
                      page_data=get_page_data(_query),
                      **request.params)
    elif request.path == '/export':
        data = Dataset()
        data.append((u'区域', u'上网账号', u'BAS地址', u'会话编号', u'记账开始时间', u'会话时长',
                     u'已扣时长', u"已扣流量", u'应扣费用', u'实扣费用', u'剩余余额', u'剩余时长',
                     u'剩余流量', u'是否扣费', u'扣费时间'))
        _f2y = utils.fen2yuan
        _fms = utils.fmt_second
        _k2m = utils.kb2mb
        _s2h = utils.sec2hour
        for i, _, _node_name in _query:
            data.append(
                (_node_name, i.account_number, i.nas_addr, i.acct_session_id,
                 i.acct_start_time, _fms(i.acct_session_time),
                 _fms(i.acct_times), _k2m(i.acct_flows), _f2y(i.acct_fee),
                 _f2y(i.actual_fee), _f2y(i.balance), _s2h(i.time_length),
                 _k2m(i.flow_length), (i.is_deduct == 0 and u'否'
                                       or u'是'), i.create_time))
        name = u"RADIUS-BILLING-" + datetime.datetime.now().strftime(
            "%Y%m%d-%H%M%S") + ".xls"
        return export_file(name, data)
Esempio n. 42
0
def combine_tally():
    from tablib import Dataset
    data = Dataset()
    for f in glob.glob(DATA_DIR + 'tally_69211100_20190425.csv-*'):
        d = Dataset().load(open(f, 'rt').read())
        for row in d:
            data.append(row)

    with open(DATA_DIR + 'tally_69211100_20190425.csv', 'wt', newline='')  as f:
        f.write('path,hits\n')
        f.write(data.csv)
Esempio n. 43
0
    def post(self):
        node_id = self.get_argument('node_id',None)
        account_number = self.get_argument('account_number',None)
        query_begin_time = self.get_argument('query_begin_time',None)
        query_end_time = self.get_argument('query_end_time',None)
        opr_nodes = self.get_opr_nodes()
        _query = self.db.query(
            models.TrBilling,
            models.TrCustomer.node_id,
            models.TrNode.node_name
        ).filter(
            models.TrBilling.account_number == models.TrAccount.account_number,
            models.TrCustomer.customer_id == models.TrAccount.customer_id,
            models.TrNode.id == models.TrCustomer.node_id
        )
        if node_id:
            _query = _query.filter(models.TrCustomer.node_id == node_id)
        else:
            _query = _query.filter(models.TrCustomer.node_id.in_(i.id for i in opr_nodes))
        if account_number:
            _query = _query.filter(models.TrBilling.account_number.like('%' + account_number + '%'))
        if query_begin_time:
            _query = _query.filter(models.TrBilling.create_time >= query_begin_time + ' 00:00:00')
        if query_end_time:
            _query = _query.filter(models.TrBilling.create_time <= query_end_time + ' 23:59:59')
        _query = _query.order_by(models.TrBilling.create_time.desc())

        if self.request.path == '/admin/customer/billing':
            return self.render("billing_list.html",
                          node_list=opr_nodes,
                          page_data=self.get_page_data(_query), **self.get_params())

        elif request.path == '/admin/customer/billing/export':
            data = Dataset()
            data.append((
                u'区域', u'上网账号', u'BAS地址', u'会话编号', u'记账开始时间', u'会话时长',
                u'已扣时长', u"已扣流量", u'应扣费用', u'实扣费用', u'剩余余额',
                u'剩余时长', u'剩余流量', u'是否扣费', u'扣费时间'
            ))
            _f2y = utils.fen2yuan
            _fms = utils.fmt_second
            _k2m = utils.kb2mb
            _s2h = utils.sec2hour
            for i, _, _node_name in _query:
                data.append((
                    _node_name, i.account_number, i.nas_addr, i.acct_session_id,
                    i.acct_start_time, _fms(i.acct_session_time), _fms(i.acct_times), _k2m(i.acct_flows),
                    _f2y(i.acct_fee), _f2y(i.actual_fee), _f2y(i.balance),
                    _s2h(i.time_length), _k2m(i.flow_length),
                    (i.is_deduct == 0 and u'否' or u'是'), i.create_time
                ))
            name = u"RADIUS-BILLING-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls"
            return self.export_file(name, data)
Esempio n. 44
0
def skip_rows(dataset: Dataset, num_rows: int = 0, col_skip: int = 1) -> None:
    if num_rows <= 0:
        return
    dataset_headers = dataset[num_rows - 1]
    dataset_data = dataset[num_rows:]
    dataset.wipe()
    dataset.headers = dataset_headers
    for r in dataset_data:
        vals = set(("" if c is None else c) for c in r[col_skip:])
        if len(vals) == 1 and "" in vals:
            continue
        dataset.append(tuple(str_normalize(c) if isinstance(c, str) else ("" if c is None else c) for c in r))
Esempio n. 45
0
def card_list(db):   
    product_id = request.params.get('product_id')
    card_type = request.params.get('card_type') 
    card_status = request.params.get('card_status')
    batch_no = request.params.get('batch_no')
    query_begin_time = request.params.get('query_begin_time')
    query_end_time = request.params.get('query_end_time')
    
    _query = db.query(models.SlcRechargerCard)
    if product_id and card_type == '0':
        _query = _query.filter(models.SlcRechargerCard.product_id==product_id)
    if card_type:
        _query = _query.filter(models.SlcRechargerCard.card_type==card_type)
    if batch_no:
        _query = _query.filter(models.SlcRechargerCard.batch_no==batch_no)
    if card_status:
        _query = _query.filter(models.SlcRechargerCard.card_status==card_status)
    if query_begin_time:
        _query = _query.filter(models.SlcRechargerCard.create_time >= query_begin_time+' 00:00:00')
    if query_end_time:
        _query = _query.filter(models.SlcRechargerCard.create_time <= query_end_time+' 23:59:59')
    
    products = db.query(models.SlcRadProduct).filter(
        models.SlcRadProduct.product_status == 0,
        models.SlcRadProduct.product_policy.in_([0,2])
    )
    if request.path == '/list':
        print "total:",_query.count()
        return render("card_list", 
            page_data = get_page_data(_query),
            card_types = forms.card_types,
            card_states = forms.card_states,
            products = products,
            colors = {0:'',1:'class="success"',2:'class="warning"',3:'class="danger"'},
            **request.params
        )
    elif request.path == '/export':
        data = Dataset()
        data.append((
            u'批次号',u'充值卡号',u'充值卡密码',u'充值卡类型',u'状态',
            u'资费id', u'面值/售价',u"授权月数",u"过期时间",u'创建时间'
         ))
        print "total:",_query.count()
        for i in _query:
            data.append((
                i.batch_no, i.card_number, utils.decrypt(i.card_passwd),forms.card_types[i.card_type],
                forms.card_states[i.card_status],get_product_name(db,i.product_id),utils.fen2yuan(i.fee_value),
                i.months,i.expire_date,i.create_time
            ))
        name = u"RADIUS-CARD-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls"
        with open(u'./static/xls/%s' % name, 'wb') as f:
            f.write(data.xls)
        return static_file(name, root='./static/xls',download=True)
Esempio n. 46
0
    def seven_largest_single_data_sheet(self):
        work_sheet = Dataset(title="7 Largest Disbursements")
        work_sheet.headers = ['iso', 'country', 'position', 'shown amount', 'shown donor']

        for country in models.Recipient.objects.all():
            table = LargestDisbursementTable(country=country).as_dictionary()["table"]

            for position, disbursement in enumerate(table):
                formatted = disbursement["disbursement"]
                donor = disbursement["donor"]
                work_sheet.append([country.iso3, country.name, position + 1, formatted,  donor])

        return work_sheet
Esempio n. 47
0
    def five_largest_graph_data_sheet(self):
        work_sheet = Dataset(title="Five Largest Graph")
        work_sheet.headers = ['iso', 'country', 'position', 'shown percentage', 'real percentage', 'donor']

        for country in models.Recipient.objects.all():
            table = FiveLargestGraph(country=country).as_list()
            for position, disbursement in enumerate(table):
                real = disbursement["percentage"]["real"]
                formatted = disbursement["percentage"]["formatted"]
                donor = disbursement["name"]
                work_sheet.append([country.iso3, country.name, position + 1, formatted, real, donor])

        return work_sheet
Esempio n. 48
0
def clean_csv_headers(csv):
    """
    Remove commas, line breaks, etc, anything that will screw
    up the translation from CSV -> database table. CSVKit, in particular,
    doesn't like header columns with these chars in it.
    """
    data = Dataset().load(csv, format="csv")
    headers = [re.sub("[,\"'\n]", "", h) for h in data.headers]

    new_data = Dataset(headers=headers)
    for row in data:
        new_data.append(row)
    return new_data.export("csv")
Esempio n. 49
0
    def export(self, empty=False):
        serializer = self.empty_serializer
        dataset = Dataset(headers=self.get_export_header(serializer))

        if not empty:
            for instance in self.get_export_queryset():
                dataset.append(self.get_export_row(serializer, instance))

        return ExportResult(
            filename=self.get_export_filename(),
            dataset=dataset,
            file_formats=self.file_formats
        )
Esempio n. 50
0
    def other_disbursements_data_sheet(self):
        work_sheet = Dataset(title="Other disbursements")
        work_sheet.headers = ['iso', 'country', 'amount of other disbursements vs 7 largest']

        for country in models.Recipient.objects.all():
            re_disb = re.compile("Other (\d+) Disb\s*")
            disbursements = models.Disbursement.objects.filter(country=country)
            other_disbursements = disbursements.get(donor__contains="Other ")
            ndisb = int(re_disb.match(other_disbursements.donor).groups()[0])

            total_disbursements_count = disbursements.count() - 1 + ndisb
            work_sheet.append([country.iso3, country.name, total_disbursements_count])

        return work_sheet
Esempio n. 51
0
    def multilateral_and_foundation_table_data_sheet(self):
        work_sheet = Dataset(title="Multilateral-Foundation")
        work_sheet.headers = ['iso', 'country', 'number of disbursements', 'total shown disbursement', 'total real disbursement']

        for country in models.Recipient.objects.all():
            table = MultilateralAndFoundationDisbursementSourcesTable(country=country).as_dictionary()

            if len(table):
                number = table["total"]["number_of_disbursements"]
                amount_formatted = table["total"]["amount"]["formatted"]
                amount_real = table["total"]["amount"].get("real", '')
                work_sheet.append([country.iso3, country.name, number, amount_formatted, amount_real])

        return work_sheet
    def test_logentry_creation_with_import_obj_exception(self):
        # from https://mail.python.org/pipermail/python-dev/2008-January/076194.html
        def monkeypatch_method(cls):
            def decorator(func):
                setattr(cls, func.__name__, func)
                return func
            return decorator

        # Cause an exception in import_row, but only after import is confirmed,
        # so a failure only occurs when ImportMixin.process_import is called.
        class R(BookResource):
            def import_obj(self, obj, data, dry_run):
                if dry_run:
                    super().import_obj(obj, data, dry_run)
                else:
                    raise Exception

        @monkeypatch_method(BookAdmin)
        def get_resource_class(self):
            return R

        # Verify that when an exception occurs in import_row, when raise_errors is False,
        # the returned row result has a correct import_type value,
        # so generating log entries does not fail.
        @monkeypatch_method(BookAdmin)
        def process_dataset(self, dataset, confirm_form, request, *args, **kwargs):
            resource = self.get_import_resource_class()(**self.get_import_resource_kwargs(request, *args, **kwargs))
            return resource.import_data(dataset,
                                        dry_run=False,
                                        raise_errors=False,
                                        file_name=confirm_form.cleaned_data['original_file_name'],
                                        user=request.user,
                                        **kwargs)

        dataset = Dataset(headers=["id","name","author_email"])
        dataset.append([1, "Test 1", "*****@*****.**"])
        input_format = '0'
        content = dataset.csv
        f = SimpleUploadedFile("data.csv", content.encode(), content_type="text/csv")
        data = {
            "input_format": input_format,
            "import_file": f,
        }
        response = self.client.post('/admin/core/book/import/', data)
        self.assertEqual(response.status_code, 200)
        confirm_form = response.context['confirm_form']
        data = confirm_form.initial
        response = self.client.post('/admin/core/book/process_import/', data,
                                    follow=True)
        self.assertEqual(response.status_code, 200)
Esempio n. 53
0
def dump_connections():
    print('Running connection dump...')
    dataset = Dataset()
    dataset.headers = ['Identity', 'Backend']

    for connection in Connection.objects.filter(identity__startswith='+'):
        dataset.append([
            connection.identity,
            connection.backend.name
        ])

    with open('connections.csv', 'w') as f:
        f.write(dataset.csv)

    print('Done')
Esempio n. 54
0
    def _import(self):
        """Execute the import."""
        import_dataset = Dataset()
        fieldtypes = self._prepare_dataset_to_import()
        import_dataset.headers = self.read_dataset.headers

        inProj = Proj(init='epsg:25831')
        outProj = Proj(init='epsg:4326')

        for row in self.read_dataset.dict:
            # Ignore rows with emtpy lat or lon
            if row['lon'] is not None and row['lat'] is not None:
                row['lon'], row['lat'] = transform(
                    inProj,
                    outProj,
                    row['lon'],
                    row['lat']
                )
                row = self._check_fieldtypes(row, fieldtypes)
                new = []
                for key in row:
                    new.append(row[key])

                import_dataset.append(new)

        db = connection.cursor()
        import_dataset.headers = None

        with tempfile.NamedTemporaryFile() as f:
                f.write(import_dataset.csv)
                f.seek(0)

                try:
                    db.copy_from(f, 'storm_drain',
                                 columns=(self.read_dataset.headers),
                                 sep=",",
                                 null='null')
                    self._add_version(
                        self.request,
                        self.read_dataset.dict[0]['version']
                    )
                    self.response = {
                        'success': True,
                        'headers': self.read_dataset.headers
                    }
                except Exception as e:
                    error = str(e).replace('\n', ' ').replace('\r', '')
                    self.response = {'success': False, 'err': error}
Esempio n. 55
0
def billing_query(db):
    node_id = request.params.get('node_id')
    account_number = request.params.get('account_number')
    query_begin_time = request.params.get('query_begin_time')
    query_end_time = request.params.get('query_end_time')
    _query = db.query(
        models.SlcRadBilling,
        models.SlcMember.node_id,
        models.SlcNode.node_name
    ).filter(
        models.SlcRadBilling.account_number == models.SlcRadAccount.account_number,
        models.SlcMember.member_id == models.SlcRadAccount.member_id,
        models.SlcNode.id == models.SlcMember.node_id
    )
    if node_id:
        _query = _query.filter(models.SlcMember.node_id == node_id)
    if account_number:
        _query = _query.filter(models.SlcRadBilling.account_number.like('%'+account_number+'%'))
    if query_begin_time:
        _query = _query.filter(models.SlcRadBilling.create_time >= query_begin_time+' 00:00:00')
    if query_end_time:
        _query = _query.filter(models.SlcRadBilling.create_time <= query_end_time+' 23:59:59')
    _query = _query.order_by(models.SlcRadBilling.create_time.desc())
    if request.path == '/billing':
        return render("bus_billing_list",
            node_list=db.query(models.SlcNode),
            page_data=get_page_data(_query),**request.params)
    elif request.path == '/billing/export':
        data = Dataset()
        data.append((
            u'区域',u'上网账号',u'BAS地址',u'会话编号',u'记账开始时间',u'会话时长',
            u'扣费时长',u'应扣费用',u'实扣费用',u'当前余额',u'是否扣费',u'扣费时间'
        ))
        _f2y = utils.fen2yuan
        _fms = utils.fmt_second
        for i,_,_node_name in _query:
            data.append((
                _node_name, i.account_number, i.nas_addr,i.acct_session_id,
                i.acct_start_time,_fms(i.acct_session_time),_fms(i.acct_length),
                _f2y(i.acct_fee),_f2y(i.actual_fee),_f2y(i.balance),
                (i.is_deduct==0 and u'否' or u'是'),i.create_time
            ))
        name = u"RADIUS-BILLING-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".xls"
        with open(u'./static/xls/%s' % name, 'wb') as f:
            f.write(data.xls)
        return static_file(name, root='./static/xls',download=True)
Esempio n. 56
0
    def get(self, request):
        work_sheet = Dataset()
        work_sheet.headers = ['iso', 'recipient', 'year', 'mdgpurpose', 'real value', 'shown value']

        for country in models.Recipient.objects.all():
            data = DisbursementPurposeTable(country).as_dictionary()
            for year in data['years']:
                for purpose in data['names']:
                    try:
                        real = data['data'][year][purpose]['real']
                    except:
                        real = ''
                    shown = data['data'][year][purpose]['formatted']
                    work_sheet.append([country.iso3, country.name, year, purpose, real, shown])

        response = HttpResponse(work_sheet.xls, mimetype='application/ms-excel')
        response['Content-Disposition'] = 'attachment; filename=%s.xls' % u"purpose_disbursements"

        return response
Esempio n. 57
0
def main():
    writer = open("/Users/Kristen/PycharmProjects/proj/Other/utils/pypi.csv", "a+")
    Cache = dict()
    incomplete = set()
    Data = Dataset(title="packages", headers=["name", "dict"])

    for package in by_key:
        try:
            print("trying {}".format(package))
            _package = PyPiPackage(package)
            Cache[package] = _package
            writer = open("/Users/Kristen/PycharmProjects/proj/Other/utils/pypi.json", "w+")
            Data.append(package, _package.__reduce__())
            writer.write(Data.json)
            print("{} added to file ".format(package))
        except AttributeError:
            print("XXXX for {}".format(package))
            incomplete.add(package)
            pass
Esempio n. 58
0
    def get_dataset(self, fields):
        headers = [field.rpartition('-')[0] for field in fields]
        dataset = Dataset(headers=headers)

        for submission in self.queryset.only('data').iterator():
            row_data = []
            form_fields = [field for field in submission.get_form_data()
                           if field.field_id in fields]

            for header in fields:
                for field in form_fields:
                    if field.field_id == header:
                        row_data.append(field.value)
                        break
                else:
                    row_data.append('')

            if row_data:
                dataset.append(row_data)
        return dataset