示例#1
0
def compare_webcrawl_ga_with_accounts():
    from tablib import Dataset
    domain_names = Dataset().load(open(DATA_DIR + 'domain_names.csv').read())
    ga_domains = Dataset().load(open(DATA_DIR + 'ga_domains.csv').read())
    ga_billing_subscribers = Dataset().load(
        open(DATA_DIR + 'analytics_usage_201905.csv').read())
    ga_billing_subscriber_codes = [
        subscriber['ID'] for subscriber in ga_billing_subscribers.dict
    ]
    ga_data_subscribers = Dataset().load(
        open(DATA_DIR + 'ga_accounts_views_index.csv').read())
    ga_data_subscribers_codes = [
        subscriber['property_id'] for subscriber in ga_data_subscribers.dict
    ]
    ga_subscriber_codes = list(set().union(ga_billing_subscriber_codes,
                                           ga_data_subscribers_codes))
    agency_hostnames = {domain: agency for domain, agency in domain_names}

    subscriber_agencies = set()
    for domain in ga_domains.dict:
        agency = agency_hostnames.get(
            domain['hostname'],
            agency_hostnames.get(domain['domain_name'], "Unknown Agency"))
        if "GTM" not in domain['ga_code']:
            if domain['ga_code'] in ga_subscriber_codes:
                subscriber_agencies.add(agency)
    non_subscriber_websites = Dataset()
    non_subscriber_websites.headers = ["agency", "hostname", "ga_code"]
    subscriber_websites_not_subscribed = Dataset()
    subscriber_websites_not_subscribed.headers = [
        "agency", "hostname", "ga_code"
    ]
    for domain in ga_domains.dict:
        agency = agency_hostnames.get(
            domain['hostname'],
            agency_hostnames.get(domain['domain_name'], "Unknown Agency"))
        if "GTM" not in domain['ga_code']:
            if domain[
                    'ga_code'] not in ga_subscriber_codes and agency not in subscriber_agencies:
                print("{}: {} has non-subscriber UA code: {}".format(
                    agency, domain['hostname'], domain['ga_code']))
                non_subscriber_websites.append(
                    [agency, domain['hostname'], domain['ga_code']])
            elif domain[
                    'ga_code'] not in ga_subscriber_codes and agency in subscriber_agencies:
                print(
                    "{}: {} has non-subscriber UA code but is a subscribing agency: {}"
                    .format(agency, domain['hostname'], domain['ga_code']))
                subscriber_websites_not_subscribed.append(
                    [agency, domain['hostname'], domain['ga_code']])
    with open(DATA_DIR + '/non_subscriber_websites.csv', 'wt',
              newline='') as f:
        f.write(non_subscriber_websites.csv)
    with open(DATA_DIR + '/subscriber_websites_not_subscribed.csv',
              'wt',
              newline='') as f:
        f.write(subscriber_websites_not_subscribed.csv)
示例#2
0
    def _import(self):
        """Execute the import."""
        import_dataset = Dataset()
        fieldtypes = self._prepare_dataset_to_import()
        import_dataset.headers = self.read_dataset.headers

        inProj = Proj(init='epsg:25831')
        outProj = Proj(init='epsg:4326')

        for row in self.read_dataset.dict:
            # Ignore rows with emtpy lat or lon
            if row['lon'] is not None and row['lat'] is not None:
                row['lon'], row['lat'] = transform(
                    inProj,
                    outProj,
                    row['lon'],
                    row['lat']
                )
                row = self._check_fieldtypes(row, fieldtypes)
                new = []
                for key in row:
                    new.append(row[key])

                import_dataset.append(new)

        db = connection.cursor()
        import_dataset.headers = None

        with tempfile.NamedTemporaryFile() as f:
                f.write(import_dataset.csv)
                f.seek(0)

                try:
                    db.copy_from(f, 'storm_drain',
                                 columns=(self.read_dataset.headers),
                                 sep=",",
                                 null='null')
                    self._add_version(
                        self.request,
                        self.read_dataset.dict[0]['version']
                    )
                    self.response = {
                        'success': True,
                        'headers': self.read_dataset.headers
                    }
                except Exception as e:
                    error = str(e).replace('\n', ' ').replace('\r', '')
                    self.response = {'success': False, 'err': error}
示例#3
0
 def generate_elimdivxls(self, genxls_id):
     headers = ['Match ID', 'Game Date', 'Day', 'Time', 'Division', 'Home',
         'Visitor', 'Venue', 'Round', 'Comment']
     datasheet_list = list()
     for divinfo in self.divinfo_list:
         div_id = divinfo[genxls_id]
         div_age = divinfo['div_age']
         div_gen = divinfo['div_gen']
         div_str = div_age + div_gen
         datasheet = Dataset(title=div_str)
         datasheet.headers = list(headers)
         match_list = self.sdbinterface.get_schedule(genxls_id, div_age=div_age,
             div_gen=div_gen, elim_flag=True)
         # note conversions for time from 24-hour to am/pm format
         tabformat_list = [(y['match_id'], x['game_date'],
             parser.parse(x['game_date']).strftime("%a"),
             datetime.strptime(x['start_time'], "%H:%M").strftime("%I:%M%p"),
             div_str, self.team_map(div_id, y['home']), self.team_map(div_id, y['away']),
             self.fieldinfo_list[self.findexerGet(y['venue'])]['field_name'],
             y['around'], y['comment']) for x in match_list for y in x['gameday_data']]
         for tabformat in tabformat_list:
             datasheet.append(tabformat)
         datasheet_list.append(datasheet)
     book = Databook(datasheet_list)
     bookname_xls_relpath = self.schedcol_name + "_byDivision.xls"
     bookname_xls_fullpath = os.path.join(self.dir_path, bookname_xls_relpath)
     with open(bookname_xls_fullpath,'wb') as f:
         f.write(book.xls)
     f.close()
     return [{'path':bookname_xls_relpath}]
示例#4
0
文件: views.py 项目: rubper/ANF-2020
def agregar_cuenta_Xls(request,empresa):
    if request.method == 'POST':
        if len(request.FILES)!=0:
            cuenta_Resoucer = CuentaResouce()
            archivo = request.FILES['subircuenta']
            if not archivo.name.endswith('xlsx'):
                messages.error(request,'Error:El formato es incorrecto debe de ser en formato .xlsx')
                return redirect('Empresa:cuentas',empresa)
            dato = Dataset()
            dato.headers = ('codigo','nombre','tipo','naturaleza','Razon')
            importado= dato.load(archivo.read(),format='xlsx')
            for cuen in importado:
                e = Empresa.objects.get(idEmpresa=empresa)
                cuenta = Cuenta(
                    idEmpresa=e,
                    codigo_cuenta =cuen[0],
                    nombre_cuenta = cuen[1],
                    tipo_cuenta = cuen[2],
                    naturaleza_cuenta = cuen[3],
                    idSobreNombre =cuen[4]
                )
                #guarda los datos hasta encontrar uno vacio de archivo subido
                if(cuenta.codigo_cuenta != None):
                    cuenta.save()
                e=None
            messages.info(request, 'Ha importado las cuentas, exitosamente')
        else:
            messages.error(request,'no a elegido un archivo')
            return redirect('Empresa:cuentas',empresa)
        return redirect('Empresa:cuentas',empresa)
    else:
        return render(request,'cuenta/importar.html',{'empresa':empresa})
示例#5
0
 def exportDivSchedules(self, startgameday, prefix=""):
     headers = ['Match ID', 'Gameday#', 'Game Date', 'Day', 'Time', 'Division', 'Home', 'Away', 'Field', '', 'Comment']
     datasheet_list = []
     for division in self.leaguedivinfo:
         div_id = division['div_id']
         div_age = division['div_age']
         div_gen = division['div_gen']
         div_str =  div_age + div_gen
         datasheet = Dataset(title=div_str)
         datasheet.headers = list(headers)
         divdata_list = self.dbinterface.findElimTournDivisionSchedule(div_age, div_gen, min_game_id=startgameday)
         tabformat_list = [(y[match_id_CONST], x[gameday_id_CONST], tournMapGamedayIdToCalendar(x[gameday_id_CONST]), tournMapGamedayIdToDate(x[gameday_id_CONST]), datetime.strptime(x[start_time_CONST],"%H:%M").strftime("%I:%M %p"), div_str, y[home_CONST], y[away_CONST], self.fieldinfo[self.findexerGet(y[venue_CONST])]['name'], '', y[comment_CONST]) for x in divdata_list for y in x[gameday_data_CONST]]
         for tabformat in tabformat_list:
             datasheet.append(tabformat)
         datasheet.append_separator("Prefix Legend: 'S'-Seeded Team#, 'W'-Winning Team (See Match ID), 'L'-Losing Team)")
         datasheet_list.append(datasheet)
     book = Databook(datasheet_list)
     cdir = os.path.dirname(__file__)
     bookname_xls = prefix+'.xls'
     bookname_html = prefix+'.html'
     booknamefull_xls = os.path.join('/home/henry/workspace/datagraph/bottle_baseball/download/xls', bookname_xls)
     booknamefull_html = os.path.join('~/workspace/datagraph/bottle_baseball/download/html', bookname_html)
     with open(booknamefull_xls,'wb') as f:
         f.write(book.xls)
     f.close()
示例#6
0
def dump_program_reports():
    print('Running program report dump...')
    dataset = Dataset()
    dataset.headers = ['Site ID', 'Mobile', 'Timestamp', 'Group', 'Program', 'Period code', 'Period number', 'Atot', 'Arel', 'Tin', 'Tout', 'Dead', 'DefT', 'Dcur', 'Dmed']

    for report in ProgramReport.objects.select_related('group', 'program').order_by('created'):
        if not report.reporter.mobile.startswith('+'):
            continue

        dataset.append([
            report.site.hcid,
            report.reporter.mobile,
            timegm(report.created.utctimetuple()),
            report.group.code,
            report.program.code,
            report.period_code,
            report.period_number,
            report.new_marasmic_patients,
            report.readmitted_patients,
            report.patients_transferred_in,
            report.patients_transferred_out,
            report.patient_deaths,
            report.unconfirmed_patient_defaults,
            report.patients_cured,
            report.unresponsive_patients
        ])

    with open('program_reports.csv', 'w') as f:
        f.write(dataset.csv)

    print('Done')
示例#7
0
    def read(self, query: str, limit: int = None) -> Dataset:
        """Run a read(select) query to the database.

        It should return a list of tuples,
        the first tuple is metadata, each element is a tuple of column name and data type,
        the rest are the records.

        :param query: the sql query
        :param limit: the maximum number of records to return
        :return: a list of tuples
        """
        data = Dataset()
        cursor = self.connection.cursor()
        query = self.add_row_limit_in_query(query, limit)
        cursor.execute(query)
        result = cursor.fetchall()

        if cursor.description:
            data.headers = [
                '{0}({1})'.format(d[0], self.get_data_type(d[1]))
                for d in cursor.description
            ]
        data.extend(result)
        cursor.close()

        return data
示例#8
0
def display_table(
    records: Sequence[Any],
    headers: Sequence[str],
    attrs: Sequence[str],
    tablefmt: str = "fancy_grid",
) -> None:
    """

    Args:
      records: Sequence[Any]:
      headers: Sequence[str]:
      attrs: Sequence[str]:
      tablefmt: str:  (Default value = "fancy_grid")

    Returns:

    """
    if len(records) == 0:
        display_error("No results found")
    else:
        data = Dataset()
        data.headers = headers
        for record in records:
            data.append([getattr(record, a) for a in attrs])

        click.echo(data.export("cli", tablefmt=tablefmt))
示例#9
0
 def generate_divxls(self, genxls_id):
     headers = [
         'Game Date', 'Day', 'Time', 'Division', 'Home', 'Visitor', 'Venue'
     ]
     datasheet_list = list()
     for divinfo in self.divinfo_list:
         div_id = divinfo[genxls_id]
         div_age = divinfo['div_age']
         div_gen = divinfo['div_gen']
         div_str = div_age + div_gen
         datasheet = Dataset(title=div_str)
         datasheet.headers = list(headers)
         match_list = self.sdbinterface.get_schedule(genxls_id,
                                                     div_age=div_age,
                                                     div_gen=div_gen)
         # note conversions for time from 24-hour to am/pm format
         tabformat_list = [
             (x['game_date'], parser.parse(x['game_date']).strftime("%a"),
              datetime.strptime(x['start_time'],
                                "%H:%M").strftime("%I:%M%p"), div_str,
              y['home'], y['away'], self.fieldinfo_list[self.findexerGet(
                  y['venue'])]['field_name']) for x in match_list
             for y in x['gameday_data']
         ]
         for tabformat in tabformat_list:
             datasheet.append(tabformat)
         datasheet_list.append(datasheet)
     book = Databook(datasheet_list)
     bookname_xls_relpath = self.schedcol_name + "_byDiv.xls"
     bookname_xls_fullpath = os.path.join(self.dir_path,
                                          bookname_xls_relpath)
     with open(bookname_xls_fullpath, 'wb') as f:
         f.write(book.xls)
     f.close()
     return [{'path': bookname_xls_relpath}]
示例#10
0
def export(request):
    contacts_resource = ExportResource()
    dataset = contacts_resource.export()

    #convert Dataset to List
    my_list = []
    for i in dataset:
        my_list.append(list(i))

    #Replace blank to the ID of the user
    count = 0
    for x in my_list:
        my_list[count][0] = ''

        my_list[count][1] = request.user
        count = count + 1

    #creating new dataset then add Headers
    my_data = Dataset()
    my_data.headers = ([
        'id', 'created_by', 'first_name', 'last_name', 'contact_number',
        'address'
    ])

    for x in my_list:
        my_data.append(x)

    response = HttpResponse(my_data.csv, content_type='text/csv')
    response['Content-Disposition'] = 'attachment; filename="contacts.csv"'
    return response
示例#11
0
 def exportTeamSchedules(self, div_id, age, gen, numteams, prefix=""):
     headers = [
         'Gameday#', 'Game Date', 'Start Time', 'Venue', 'Home Team',
         'Away Team'
     ]
     cdir = os.path.dirname(__file__)
     for team_id in range(1, numteams + 1):
         team_str = age + gen + str(team_id)
         datasheet = Dataset(title=team_str)
         datasheet.headers = list(headers)
         teamdata_list = self.dbinterface.findTeamSchedule(
             age, gen, team_id)
         tabformat_list = [
             (x[gameday_id_CONST],
              mapGamedayIdToCalendar(x[gameday_id_CONST]),
              datetime.strptime(x[start_time_CONST],
                                "%H:%M").strftime("%I:%M %p"),
              self.fieldinfo[self.findexerGet(x[venue_CONST])]['name'],
              x[home_CONST], x[away_CONST]) for x in teamdata_list
         ]
         for tabformat in tabformat_list:
             datasheet.append(tabformat)
         if team_id < 10:
             team_id_str = '0' + str(team_id)
         else:
             team_id_str = str(team_id)
         sheet_xls_relpath = prefix + age + gen + team_id_str + '_schedule.xls'
         sheet_xls_abspath = os.path.join(
             '/home/henry/workspace/datagraph/bottle_baseball/download/xls',
             sheet_xls_relpath)
         with open(sheet_xls_abspath, 'wb') as f:
             f.write(datasheet.xls)
         f.close()
示例#12
0
 def generate_fieldxls(self):
     headers = [
         'Game Date', 'Day', 'Time', 'Division', 'Home', 'Visitor', 'Venue'
     ]
     datasheet_list = list()
     for fieldinfo in self.fieldinfo_list:
         field_name = fieldinfo['field_name']
         field_id = fieldinfo['field_id']
         datasheet = Dataset(title=field_name)
         datasheet.headers = list(headers)
         match_list = self.sdbinterface.get_schedule('field_id',
                                                     field_id=field_id)
         tabformat_list = [
             (x['game_date'], parser.parse(x['game_date']).strftime("%a"),
              datetime.strptime(x['start_time'],
                                "%H:%M").strftime("%I:%M%p"),
              x['div_age'] + x['div_gen'], x['home'], x['away'], field_name)
             for x in match_list
         ]
         for tabformat in tabformat_list:
             datasheet.append(tabformat)
         datasheet_list.append(datasheet)
     book = Databook(datasheet_list)
     bookname_xls_relpath = self.schedcol_name + "_byField.xls"
     bookname_xls_fullpath = os.path.join(self.dir_path,
                                          bookname_xls_relpath)
     with open(bookname_xls_fullpath, 'wb') as f:
         f.write(book.xls)
     f.close()
     return [{'path': bookname_xls_relpath}]
def insert_att_sheet(request):
    context={}
    if request.method == 'POST':
        att_resource = AttendanceResources()
        dataset = Dataset()
        dataset.headers=['Roll_no','Shift','Name', 'Roll_in','Roll_out','Attendace_time']
        new_sheet = request.FILES['mysheet']

        if not new_sheet.name.endswith('xlsx'):
            context['message']="File must be in excel formate only..."
            return render(request,'Engineer/add_att.html',context)
        import_data = dataset.load(new_sheet.read(),format='xlsx')

        for data in import_data:
           #   value = Attendance_Master(
           #       data[0],
           #       data[1],
           #       data[2],
           #       data[3],
           #       data[4],
           #       data[5],
           # )
           #   value.save()
            # print(data[0], data[1], data[2], data[3], data[4], data[5])
            Attendance_Master.objects.update_or_create(Roll_no= data[0],Shift=Shift_table(shift_id=3), Name=data[2], Roll_in=data[3],Roll_out=data[4],Attendace_time=data[5])

        return redirect('/manage_att/')
示例#14
0
文件: utils.py 项目: adonm/oim-cms
def csv_output_computers():
    """Utility function to generate a CSV output of computers information from
    Incredibus data, for audit/cleansing purposes.
    """
    computers = Computer.objects.all()
    d = Dataset()
    d.headers = [
        'ID',
        'HOSTNAME',
        'CHASSIS',
        'PROBABLE OWNER EMAIL',
        'PROBABLE OWNER CC',
        'MANAGED BY EMAIL',
        'ASSET NO.',
        'SERIAL NO.'
    ]

    for i in computers:
        row = [i.pk, i.sam_account_name[:-1], i.chassis]
        if i.probable_owner:
            row += [i.probable_owner.email.lower(), i.probable_owner.cost_centre]
        else:
            row += ['', '']
        if i.managed_by:
            row += [i.managed_by.email.lower()]
        else:
            row += ['']
        row += ['', i.serial_number]
        d.append(row)

    f = open('/tmp/computers.csv', 'w')
    f.write(d.csv)
    f.close()
示例#15
0
文件: views.py 项目: mklaber/connect
    def render_to_response(self, context, **response_kwargs):
        """If exporting, generate a csv."""
        if 'export' in self.request.GET:
            data = Dataset()
            data.headers = ('Name', 'Messages', 'Threads', 'Replies',
                            'Posters', 'Flagged messages', 'Category', 'Tags',
                            'State', 'Members', 'Admins', 'Private',
                            'Published', 'Moderated', 'Featured',
                            'Member list published', 'Created', 'Created By',
                            'Photos', 'Photo clicks', 'Links', 'Link clicks')

            for group in self.get_queryset():
                data.append(
                    (group.group.name, group.message_count, group.thread_count,
                     group.reply_count, group.posters,
                     group.flagged, group.category.name,
                     groups_tags_string([group]), group.state,
                     group.member_count, group.owner_count, group.private,
                     group.published, group.moderated, group.featured,
                     group.member_list_published, group.created_at,
                     group.created_by, group.image_count, group.image_clicks
                     or 0, group.link_count, group.link_clicks or 0))

            response = HttpResponse(data.csv, content_type='text/csv')
            response['Content-Disposition'] = 'attachment; filename=groups.csv'
            return response
        else:
            return super(GroupReportListView,
                         self).render_to_response(context, **response_kwargs)
示例#16
0
文件: utils.py 项目: adonm/oim-cms
def csv_output_computers():
    """Utility function to generate a CSV output of computers information from
    Incredibus data, for audit/cleansing purposes.
    """
    computers = Computer.objects.all()
    d = Dataset()
    d.headers = [
        'ID', 'HOSTNAME', 'CHASSIS', 'PROBABLE OWNER EMAIL',
        'PROBABLE OWNER CC', 'MANAGED BY EMAIL', 'ASSET NO.', 'SERIAL NO.'
    ]

    for i in computers:
        row = [i.pk, i.sam_account_name[:-1], i.chassis]
        if i.probable_owner:
            row += [
                i.probable_owner.email.lower(), i.probable_owner.cost_centre
            ]
        else:
            row += ['', '']
        if i.managed_by:
            row += [i.managed_by.email.lower()]
        else:
            row += ['']
        row += ['', i.serial_number]
        d.append(row)

    f = open('/tmp/computers.csv', 'w')
    f.write(d.csv)
    f.close()
示例#17
0
    def create(self, request):
        """
            To import CSV file to DB
            URL Structure: /assignment/api/
            Required Fields: myfile
        """

        adult_resource = AdultResource()
        dataset = Dataset()
        myfile = request.FILES['myfile']

        file_data = dataset.load(myfile.read().decode('utf-8'), format='csv')
        data = Dataset()
        data.headers = ('id', 'age', 'work', 'fnlwgt', 'education',
                        'education_num', 'marital_status', 'occupation',
                        'relationship', 'race', 'sex', 'capital_gain',
                        'capital_loss', 'hours_per_week', 'native_country',
                        'salary')

        for i in range(0, len(file_data) - 1):
            l = list(file_data[i])
            l.insert(0, i + 1)
            data.append(tuple(l))

        result = adult_resource.import_data(
            data, dry_run=True)  # Test the data import
        if not result.has_errors():
            adult_resource.import_data(data,
                                       dry_run=False)  # Actually import now
        pass

        return Response('Success')
示例#18
0
 def exportDivTeamSchedules(self, div_id, age, gen, numteams, prefix=""):
     headers = ['Gameday#', 'Game Date', 'Day', 'Start Time', 'Venue', 'Home Team', 'Away Team']
     datasheet_list = []
     for team_id in range(1, numteams+1):
         team_str = age+gen+str(team_id)
         datasheet = Dataset(title=team_str)
         datasheet.headers = list(headers)
         teamdata_list = self.dbinterface.findTeamSchedule(age, gen, team_id)
         tabformat_list = [(x[gameday_id_CONST], tournMapGamedayIdToCalendar(x[gameday_id_CONST]), tournMapGamedayIdToDate(x[gameday_id_CONST]),
                            datetime.strptime(x[start_time_CONST],"%H:%M").strftime("%I:%M %p"),
                            self.fieldinfo[self.findexerGet(x[venue_CONST])]['name'],
                            x[home_CONST], x[away_CONST]) for x in teamdata_list]
         for tabformat in tabformat_list:
             datasheet.append(tabformat)
         datasheet_list.append(datasheet)
     book = Databook(datasheet_list)
     cdir = os.path.dirname(__file__)
     bookname_xls = prefix+age + gen +'_schedule.xls'
     bookname_html = prefix+age + gen +'_schedule.html'
     booknamefull_xls = os.path.join('/home/henry/workspace/datagraph/bottle_baseball/download/xls', bookname_xls)
     booknamefull_html = os.path.join('~/workspace/datagraph/bottle_baseball/download/html', bookname_html)
     with open(booknamefull_xls,'wb') as f:
         f.write(book.xls)
     f.close()
     '''
示例#19
0
 def generate_fieldxls(self):
     headers = ['Game Date', 'Day', 'Time', 'Division', 'Home',
     'Visitor', 'Venue']
     datasheet_list = list()
     for fieldinfo in self.fieldinfo_list:
         field_name = fieldinfo['field_name']
         field_id = fieldinfo['field_id']
         datasheet = Dataset(title=field_name)
         datasheet.headers = list(headers)
         match_list = self.sdbinterface.get_schedule('field_id',
             field_id=field_id)
         tabformat_list = [(x['game_date'],
             parser.parse(x['game_date']).strftime("%a"),
             datetime.strptime(x['start_time'], "%H:%M").strftime("%I:%M%p"),
             x['div_age']+x['div_gen'], x['home'], x['away'], field_name)
             for x in match_list]
         for tabformat in tabformat_list:
             datasheet.append(tabformat)
         datasheet_list.append(datasheet)
     book = Databook(datasheet_list)
     bookname_xls_relpath = self.schedcol_name + "_byField.xls"
     bookname_xls_fullpath = os.path.join(self.dir_path, bookname_xls_relpath)
     with open(bookname_xls_fullpath,'wb') as f:
         f.write(book.xls)
     f.close()
     return [{'path':bookname_xls_relpath}]
示例#20
0
def emergency(current_user):
    # prepare user info
    user_data = user_schema.dump(
        User.query.filter_by(id=current_user.id).first())
    User.query.filter_by(id=current_user.id).first().set_critical_state()
    data = Dataset()
    data.headers = [
        'First Name', 'Last Name', 'Email', 'Address', 'State', 'Age',
        'Travel History', 'Telephone'
    ]
    for i in [(user_data['first_name'], user_data['last_name'],
               user_data['email'], user_data['address'], user_data['state'],
               user_data['age'], user_data['travel_history'], user_data['tel'])
              ]:
        data.append(i)
    with open(f'{os.getcwd()}/user_dat.xlsx', 'wb') as file:
        print(file.name)
        file.write(data.export('xlsx'))
        # actually send the message
        try:
            result = EmergencyMail("Emergency Report!",
                                   render_template('Emergency.html'),
                                   file.name)
            if result:
                return jsonify({'Sent Email': True}), 200
            else:
                return jsonify({'Email not sent': True}), 500
        except Exception as e:
            raise e
            return jsonify({'Sent Email': False}), 500
        file.close()
示例#21
0
    def exportDivSchedulesRefFormat(self, startgameday, prefix=""):
        headers = ['Game#', 'Game#', 'Tourn Match#','Date', 'Day', 'Time', 'Division', 'Round', 'Home', 'Visitor', 'Field', 'cr_trust', 'ar_trust', 'm_trust']
        datasheet = Dataset(title=prefix)
        datasheet.headers = list(headers)

        schedule_list = self.dbinterface.findDivisionSchedulePHMSARefFormat(startgameday)
        tabformat_list = [(_offset+x[match_id_CONST], x[match_id_CONST], tournMapGamedayIdToCalendar(x[gameday_id_CONST]), tournMapGamedayIdToDate(x[gameday_id_CONST]),
            datetime.strptime(x[start_time_CONST],"%H:%M").strftime("%I:%M %p"),
            x[age_CONST]+x[gen_CONST], x[round_CONST],
            x[home_CONST], x[away_CONST],
            self.fieldinfo[self.findexerGet(x[venue_CONST])]['name'],
            _reftrust_level[_rindexerGet(getTournDivID(x[age_CONST], x[gen_CONST]))]['cr'],
            _reftrust_level[_rindexerGet(getTournDivID(x[age_CONST], x[gen_CONST]))]['ar'],
            _reftrust_level[_rindexerGet(getTournDivID(x[age_CONST], x[gen_CONST]))]['ment'])
            for x in schedule_list] if prefix else [(mapGamedayIdToCalendar(x[gameday_id_CONST],format=1), 'Saturday',
                datetime.strptime(x[start_time_CONST],"%H:%M").strftime("%I:%M %p"),
                x[age_CONST]+x[gen_CONST],
                x[home_CONST], x[away_CONST],
                self.fieldinfo[self.findexerGet(x[venue_CONST])]['name'])
                for x in schedule_list]
        if prefix:
            atabformat_list = [(_offset+i, j[0], j[1], j[2], j[3], j[4], j[5], j[6], j[7], j[8], j[9], j[10], j[11], j[12]) for i,j in enumerate(tabformat_list)]
        else:
            atabformat_list = tabformat_list
        for tabformat in atabformat_list:
            datasheet.append(tabformat)
        sheet_xls_relpath = prefix+'_RefFormat.xls'
        sheet_xls_abspath = os.path.join('/home/henry/workspace/datagraph/bottle_baseball/download/xls',
                                         sheet_xls_relpath)
        with open(sheet_xls_abspath,'wb') as f:
            f.write(datasheet.xls)
        f.close()
示例#22
0
文件: views.py 项目: mgifford/connect
    def render_to_response(self, context, **response_kwargs):
        """If exporting, generate a csv."""
        if 'export' in self.request.GET:
            data = Dataset()
            data.headers = (
                'Name', 'Messages', 'Threads', 'Replies', 'Posters',
                'Category', 'Tags', 'State', 'Members', 'Admins', 'Private',
                'Published', 'Moderated', 'Featured', 'Member list published',
                'Created', 'Created By'
            )

            for group in self.get_queryset():
                data.append((
                    group.group.name, group.message_count, group.thread_count,
                    group.reply_count, group.posters,
                    group.category.name, groups_tags_string([group]),
                    group.state, group.member_count, group.owner_count,
                    group.private, group.published, group.moderated,
                    group.featured, group.member_list_published,
                    group.created_at, group.created_by
                ))

            response = HttpResponse(
                data.csv,
                content_type='text/csv'
            )
            response['Content-Disposition'] = 'attachment; filename=groups.csv'
            return response
        else:
            return super(GroupReportListView, self).render_to_response(
                context, **response_kwargs)
示例#23
0
    def render_to_response(self, context, **response_kwargs):
        """If exporting, generate a csv."""
        if 'export' in self.request.GET:
            data = Dataset()
            data.headers = (
                u'Name', u'Email', u'Phone', u'Zip', u'State', u'Joined',
                u'Last login', u'Total Groups Joined',
                u'Flags received', u'Messages sent', u'Staff?', u'Superuser?',
                u'Banned?', u'Visits'
            )

            for user in self.get_queryset():
                data.append((
                    user.get_real_name(), user.email, user.phone, user.zip_code,
                    user.state, user.date_joined, user.last_login,
                    user.total_groups_joined, user.flags_received,
                    user.messages_sent, user.is_staff, user.is_superuser,
                    user.is_banned, user.visit_count
                ))

            response = HttpResponse(
                data.csv,
                content_type='text/csv'
            )
            response['Content-Disposition'] = 'attachment; filename=users.csv'
            return response
        else:
            return super(UserReportListView, self).render_to_response(
                context, **response_kwargs)
示例#24
0
文件: views.py 项目: mgifford/connect
    def render_to_response(self, context, **response_kwargs):
        """If exporting, generate a csv."""
        if 'export' in self.request.GET:
            data = Dataset()
            data.headers = (
                u'Name', u'Email', u'Phone', u'Zip', u'State', u'Joined',
                u'Last login', u'Total Groups Joined',
                u'Flags received', u'Messages sent', u'Staff?', u'Superuser?',
                u'Banned?', u'Visits'
            )

            for user in self.get_queryset():
                data.append((
                    user, user.email, user.phone, user.zip_code, user.state,
                    user.date_joined, user.last_login,
                    user.total_groups_joined,
                    user.flags_received, user.messages_sent,
                    user.is_staff, user.is_superuser, user.is_banned,
                    user.visit_count
                ))

            response = HttpResponse(
                data.csv,
                content_type='text/csv'
            )
            response['Content-Disposition'] = 'attachment; filename=users.csv'
            return response
        else:
            return super(UserReportListView, self).render_to_response(
                context, **response_kwargs)
示例#25
0
def dataset(data, headers=None):
    """ `data` is a list of dicts.
    """
    dataset = Dataset()
    dataset.dict = data
    if headers:
        dataset.headers = headers
    return dataset
示例#26
0
def query_dataset(cursor, stmt, *args, **kwargs) -> Dataset:
    cursor.execute(stmt, args)
    table_name = kwargs.get("table_name", "")
    dataset = Dataset(title=table_name)
    dataset.headers = (desc[0] for desc in cursor.description)
    for row in cursor:
        dataset.append(row)
    return dataset
示例#27
0
 def test_chunks(self):
     data = Dataset(*[('Matteo', 'Guadrini',
                       35), ('Arthur', 'Dent', 42), ('Ford', 'Prefect',
                                                     42)])
     data.extend([('Matteo', 'Guadrini', 35), ('Arthur', 'Dent', 42),
                  ('Ford', 'Prefect', 42)])
     data.headers = ['name', 'surname', 'age']
     self.assertEqual(
         list(pyreports.chunks(data, 4))[0][0], ('Matteo', 'Guadrini', 35))
示例#28
0
 def generate_refereexls(self):
     headers = [
         'Match#', 'Date', 'Day', 'Time', 'Division', 'Week#', 'Home',
         'Visitor', 'Field', 'cr_trust', 'ar_trust', 'm_trust'
     ]
     datasheet = Dataset(title="Referee Scheduler Compatible")
     datasheet.headers = list(headers)
     file_list = list()
     return file_list
示例#29
0
 def drop(self):
     n = len(self.data) - 1
     if n == -1:
         new_data = Dataset()
         new_data.headers = self.data[0]
         for row in self.data[1:]:
             new_data.append(row)
         self.data = new_data
     else:
         del self.data[n]
示例#30
0
 def table_to_dataset(self, table, exclude_columns):
     """Transform a table to a tablib dataset."""
     dataset = Dataset()
     for i, row in enumerate(
             table.as_values(exclude_columns=exclude_columns)):
         if i == 0:
             dataset.headers = row
         else:
             dataset.append(row)
     return dataset
示例#31
0
    def _import(self):
        """Execute the import."""
        import_dataset = Dataset()
        fieldtypes = self._prepare_dataset_to_import()
        import_dataset.headers = self.read_dataset.headers

        inProj = Proj(init='epsg:25831')
        outProj = Proj(init='epsg:4326')

        for row in self.read_dataset.dict:
            # Ignore rows with emtpy lat or lon
            if row['lon'] is not None and row['lat'] is not None:
                row['lon'], row['lat'] = transform(inProj, outProj, row['lon'],
                                                   row['lat'])
                row = self._check_fieldtypes(row, fieldtypes)
                new = []
                for key in row:
                    new.append(row[key])

                import_dataset.append(new)

        db = connection.cursor()
        import_dataset.headers = None

        with tempfile.NamedTemporaryFile() as f:
            f.write(import_dataset.csv)
            f.seek(0)

            try:
                db.copy_from(f,
                             'storm_drain',
                             columns=(self.read_dataset.headers),
                             sep=",",
                             null='null')
                self._add_version(self.request,
                                  self.read_dataset.dict[0]['version'])
                self.response = {
                    'success': True,
                    'headers': self.read_dataset.headers
                }
            except Exception as e:
                error = str(e).replace('\n', ' ').replace('\r', '')
                self.response = {'success': False, 'err': error}
示例#32
0
 def delete_row(self, n):
     n = n - 2
     if n == -1:
         new_data = Dataset()
         new_data.headers = self.data[0]
         for row in self.data[1:]:
             new_data.append(row)
         self.data = new_data
     else:
         del self.data[n]
示例#33
0
def fetch_timetable(browser, link):
    # Utility method to return a nice Dataset from a timetable url
    if debug:
        puts('Fetching timetable from %s' % link)
    response = browser.follow_link(link)
    soup = BeautifulSoup(response.read())
    table = soup.find('table')
    timetable = []
    for row in table.findAll('tr'):
        title = None
        title_test = row.find('td')
        if title_test.find('span'):
            title = title_test.getText()
            values = []
            for col in row.findAll('td')[1:]:
                value = col.getText()
                if value == '&nbsp;':
                    value = None
                if isinstance(value, basestring) and ':' in value:
                    try:
                        time = value.strip().split(':')
                        time = datetime.time(*[int(s) for s in time])
                    except:
                        pass
                    else:
                        value = time
                values.append(value)
            timetable.append((title, values))

    while len(timetable):
        if 'TRAIN NO.' not in timetable[0][0]:
            del timetable[0]
        else:
            break

    train_nums = timetable[0]
    data = Dataset()
    data.headers = train_nums[1]
    if debug:
        puts(repr(data.headers))
    for place, times in timetable[1:]:
        if debug:
            puts(repr((place, times)))
        data.rpush(times, tags=[place.title().replace('`S', "'s")])

    #Strip out TRAIN NO. columns
    while 1:
        try:
            del data['TRAIN NO.']
        except:
            break

    return data
示例#34
0
def skip_rows(dataset: Dataset, num_rows: int = 0, col_skip: int = 1) -> None:
    if num_rows <= 0:
        return
    dataset_headers = dataset[num_rows - 1]
    dataset_data = dataset[num_rows:]
    dataset.wipe()
    dataset.headers = dataset_headers
    for r in dataset_data:
        vals = set(("" if c is None else c) for c in r[col_skip:])
        if len(vals) == 1 and "" in vals:
            continue
        dataset.append(tuple(str_normalize(c) if isinstance(c, str) else ("" if c is None else c) for c in r))
    def test_file_to_dataset_incorrect(self):
        """If input file is not correctly decoded, returns an error."""
        dataset = Dataset()
        dataset.append_col(['row1', 'row2'])
        dataset.headers = ['col1']
        encoded_dataset = dataset.export('csv').encode('utf-16')

        bio = io.BytesIO(encoded_dataset)
        uploaded_file = self.create_uploaded_file(file=bio, )
        uploaded_dataset, error_msg = self.file_to_dataset(uploaded_file)

        self.assertIsNone(uploaded_dataset)
        self.assertIsNotNone(error_msg)
示例#36
0
def to_dataset(observations: AnyObservations) -> Dataset:
    """Convert observations to a generic tabular dataset. This can be converted to any of the
    `formats supported by tablib <https://tablib.readthedocs.io/en/stable/formats>`_.
    """
    if isinstance(observations, Dataset):
        return observations

    flat_observations = flatten_observations(observations, flatten_lists=True)
    dataset = Dataset()
    headers, flat_observations = _fix_dimensions(flat_observations)
    dataset.headers = headers
    dataset.extend([item.values() for item in flat_observations])
    return dataset
示例#37
0
 def help():
     helps = Dataset()
     helps.headers = ['Input Form', 'Description']
     helps.append(['?', 'Print this help document.'])
     helps.append(['help', 'Print this help document.'])
     helps.append(['.limit <INTEGER>', 'Set the query limit.'])
     helps.append(['.tables [DATABASE]',
                   'Fetch the list of tables in current database or from the specified database.'])
     helps.append(['.ddl <[DATABASE.]TABLE>',
                   'Fetch the DDL of the table in current database or other if it is fully qualified.'])
     helps.append(['.quit', 'Quit.'])
     helps.append(['<QUERY>', 'Any SQL query, can span multiple lines, and end with a ";".'])
     print(tabulate(helps, tablefmt='fancy_grid'))
示例#38
0
    def seven_largest_single_data_sheet(self):
        work_sheet = Dataset(title="7 Largest Disbursements")
        work_sheet.headers = ['iso', 'country', 'position', 'shown amount', 'shown donor']

        for country in models.Recipient.objects.all():
            table = LargestDisbursementTable(country=country).as_dictionary()["table"]

            for position, disbursement in enumerate(table):
                formatted = disbursement["disbursement"]
                donor = disbursement["donor"]
                work_sheet.append([country.iso3, country.name, position + 1, formatted,  donor])

        return work_sheet
示例#39
0
    def five_largest_graph_data_sheet(self):
        work_sheet = Dataset(title="Five Largest Graph")
        work_sheet.headers = ['iso', 'country', 'position', 'shown percentage', 'real percentage', 'donor']

        for country in models.Recipient.objects.all():
            table = FiveLargestGraph(country=country).as_list()
            for position, disbursement in enumerate(table):
                real = disbursement["percentage"]["real"]
                formatted = disbursement["percentage"]["formatted"]
                donor = disbursement["name"]
                work_sheet.append([country.iso3, country.name, position + 1, formatted, real, donor])

        return work_sheet
示例#40
0
    def exportDivSchedulesRefFormat(self, startgameday, prefix=""):
        headers = [
            'Game#', 'Game#', 'Tourn Match#', 'Date', 'Day', 'Time',
            'Division', 'Round', 'Home', 'Visitor', 'Field', 'cr_trust',
            'ar_trust', 'm_trust'
        ]
        datasheet = Dataset(title=prefix)
        datasheet.headers = list(headers)

        schedule_list = self.dbinterface.findDivisionSchedulePHMSARefFormat(
            startgameday)
        tabformat_list = [
            (_offset + x[match_id_CONST], x[match_id_CONST],
             tournMapGamedayIdToCalendar(x[gameday_id_CONST]),
             tournMapGamedayIdToDate(x[gameday_id_CONST]),
             datetime.strptime(x[start_time_CONST],
                               "%H:%M").strftime("%I:%M %p"), x[age_CONST] +
             x[gen_CONST], x[round_CONST], x[home_CONST], x[away_CONST],
             self.fieldinfo[self.findexerGet(x[venue_CONST])]['name'],
             _reftrust_level[_rindexerGet(
                 getTournDivID(x[age_CONST], x[gen_CONST]))]['cr'],
             _reftrust_level[_rindexerGet(
                 getTournDivID(x[age_CONST], x[gen_CONST]))]['ar'],
             _reftrust_level[_rindexerGet(
                 getTournDivID(x[age_CONST], x[gen_CONST]))]['ment'])
            for x in schedule_list
        ] if prefix else [
            (mapGamedayIdToCalendar(x[gameday_id_CONST], format=1), 'Saturday',
             datetime.strptime(x[start_time_CONST],
                               "%H:%M").strftime("%I:%M %p"),
             x[age_CONST] + x[gen_CONST], x[home_CONST], x[away_CONST],
             self.fieldinfo[self.findexerGet(x[venue_CONST])]['name'])
            for x in schedule_list
        ]
        if prefix:
            atabformat_list = [
                (_offset + i, j[0], j[1], j[2], j[3], j[4], j[5], j[6], j[7],
                 j[8], j[9], j[10], j[11], j[12])
                for i, j in enumerate(tabformat_list)
            ]
        else:
            atabformat_list = tabformat_list
        for tabformat in atabformat_list:
            datasheet.append(tabformat)
        sheet_xls_relpath = prefix + '_RefFormat.xls'
        sheet_xls_abspath = os.path.join(
            '/home/henry/workspace/datagraph/bottle_baseball/download/xls',
            sheet_xls_relpath)
        with open(sheet_xls_abspath, 'wb') as f:
            f.write(datasheet.xls)
        f.close()
示例#41
0
def to_xls(root: Path, output_file: Path):
    data = Dataset()
    data.title = f"{root.name} CMS"
    data.headers = ['name', 'de', 'en', 'fr', 'it', 'uri']
    rows = to_dict_table(collect_all(root))

    for row in to_row_tuples(rows):
        data.append(row)

    if output_file is None:
        output_file = Path.cwd() / 'output.xls'

    with open(output_file, 'wb') as out:
        out.write(data.export('xls'))
示例#42
0
    def other_disbursements_data_sheet(self):
        work_sheet = Dataset(title="Other disbursements")
        work_sheet.headers = ['iso', 'country', 'amount of other disbursements vs 7 largest']

        for country in models.Recipient.objects.all():
            re_disb = re.compile("Other (\d+) Disb\s*")
            disbursements = models.Disbursement.objects.filter(country=country)
            other_disbursements = disbursements.get(donor__contains="Other ")
            ndisb = int(re_disb.match(other_disbursements.donor).groups()[0])

            total_disbursements_count = disbursements.count() - 1 + ndisb
            work_sheet.append([country.iso3, country.name, total_disbursements_count])

        return work_sheet
示例#43
0
    def multilateral_and_foundation_table_data_sheet(self):
        work_sheet = Dataset(title="Multilateral-Foundation")
        work_sheet.headers = ['iso', 'country', 'number of disbursements', 'total shown disbursement', 'total real disbursement']

        for country in models.Recipient.objects.all():
            table = MultilateralAndFoundationDisbursementSourcesTable(country=country).as_dictionary()

            if len(table):
                number = table["total"]["number_of_disbursements"]
                amount_formatted = table["total"]["amount"]["formatted"]
                amount_real = table["total"]["amount"].get("real", '')
                work_sheet.append([country.iso3, country.name, number, amount_formatted, amount_real])

        return work_sheet
    def handle(self, *args, **options):
        empty_related_projects = RelatedProject.objects\
            .select_related('project', 'related_project')\
            .filter(related_project__isnull=True, related_iati_id__exact='')
        empty_relations = RelatedProject.objects\
            .select_related('project', 'related_project')\
            .filter(relation__exact='')

        if options['delete']:
            empty_related_projects.delete()
            empty_relations.delete()
        else:
            problematic_relations = empty_related_projects.union(
                empty_relations).order_by('-project_id')
            dataset = Dataset()
            dataset.headers = (
                'project_id',
                'project_title',
                'project_date_end',
                'project_status',
                'program_title',
                'related_project_id',
                'related_project_title',
                'related_project_date_end',
                'related_project_status',
                'related_iati_id',
                'relation',
                'id',
            )
            for item in problematic_relations:
                project = item.project
                related_project = item.related_project
                program = project.get_program()
                dataset.append([
                    project.id,
                    project.title,
                    project.date_end_planned,
                    project.show_plain_status(),
                    program.title if program else None,
                    related_project.id if related_project else None,
                    related_project.title if related_project else None,
                    related_project.date_end_planned
                    if related_project else None,
                    related_project.show_plain_status()
                    if related_project else None,
                    item.related_iati_id,
                    item.iati_relation_unicode(),
                    item.id,
                ])
            print(dataset.export('csv'))
示例#45
0
    def _import(self):
        """Execute the import."""
        import_dataset = Dataset()
        fieldtypes = self._prepare_dataset_to_import()
        import_dataset.headers = self.read_dataset.headers

        for row in self.read_dataset.dict:
            row = self._check_fieldtypes(row, fieldtypes)
            new = []
            for key in row:
                new.append(row[key])

            import_dataset.append(new)

        db = connection.cursor()
        import_dataset.headers = None

        with tempfile.NamedTemporaryFile(delete=False) as f:
                print f.name
                f.write(import_dataset.get_csv(delimiter='\t'))
                f.seek(0)

                try:
                    # Delete all elements before new import
                    self.model.objects.all().delete()
                    db.copy_from(f, self.model._meta.db_table,
                                 columns=(self.read_dataset.headers),
                                 sep="\t",
                                 null='')
                    self.response = {
                        'success': True,
                        'headers': self.read_dataset.headers
                    }
                except Exception as e:
                    error = str(e).replace('\n', ' ').replace('\r', '')
                    self.response = {'success': False, 'err': error}
示例#46
0
def dump_connections():
    print('Running connection dump...')
    dataset = Dataset()
    dataset.headers = ['Identity', 'Backend']

    for connection in Connection.objects.filter(identity__startswith='+'):
        dataset.append([
            connection.identity,
            connection.backend.name
        ])

    with open('connections.csv', 'w') as f:
        f.write(dataset.csv)

    print('Done')
示例#47
0
    def get(self, request):
        work_sheet = Dataset()
        work_sheet.headers = ['iso', 'recipient', 'year', 'mdgpurpose', 'real value', 'shown value']

        for country in models.Recipient.objects.all():
            data = DisbursementPurposeTable(country).as_dictionary()
            for year in data['years']:
                for purpose in data['names']:
                    try:
                        real = data['data'][year][purpose]['real']
                    except:
                        real = ''
                    shown = data['data'][year][purpose]['formatted']
                    work_sheet.append([country.iso3, country.name, year, purpose, real, shown])

        response = HttpResponse(work_sheet.xls, mimetype='application/ms-excel')
        response['Content-Disposition'] = 'attachment; filename=%s.xls' % u"purpose_disbursements"

        return response
示例#48
0
    def get(self, request):
        work_sheet = Dataset()
        work_sheet.headers = ['iso', 'country', 'indicator', 'year', 'real value', 'shown value']

        for country in models.Recipient.objects.all():
            data = IndicatorTable(country).as_dictionary()
            for year in data['years']:
                for indicator in data['names']:
                    indicator_name = IndicatorCreator.GENERIC_INDICATOR_NAMES[indicator]
                    try:
                        real = data['data'][year][indicator]['real']
                    except:
                        real = ''
                    shown = data['data'][year][indicator]['formatted']
                    work_sheet.append([country.iso3, country.name, indicator_name, year, real, shown])

        response = HttpResponse(work_sheet.xls, mimetype='application/ms-excel')
        response['Content-Disposition'] = 'attachment; filename=%s.xls' % u"table_1"

        return response
示例#49
0
def dump_stockouts():
    print('Running stockout dump...')
    dataset = Dataset()
    dataset.headers = ['Site ID', 'Mobile', 'Timestamp', 'Items']

    for stockout in StockOutReport.objects.order_by('created'):
        if not stockout.reporter.mobile.startswith('+'):
            continue

        dataset.append([
            stockout.site.hcid,
            stockout.reporter.mobile,
            timegm(stockout.created.utctimetuple()),
            stockout.summary
        ])

    with open('stockouts.csv', 'w') as f:
        f.write(dataset.csv)

    print('Done')
示例#50
0
def _incident_csv(form_pk, location_type_pk, location_pk=None):
    """Given an incident form id, a location type id, and optionally
    a location id, return a CSV file of the number of incidents of each
    type (form field tag) that has occurred, either for the entire
    deployment or under the given location for each location of the
    specified location type. Only submissions sent in by participants
    are used for generating the data.

    Sample output would be:

    LOC | A | B | ... | Z | TOT
    NY  | 2 | 0 | ... | 5 |  7

    `param form_pk`: a `class`Form id
    `param location_type_pk`: a `class`LocationType id
    `param location_pk`: an optional `class`Location id. if given, only
    submissions under that location will be queried.

    `returns`: a string of bytes (str) containing the CSV data.
    """
    form = services.forms.get_or_404(pk=form_pk, form_type='INCIDENT')
    location_type = services.location_types.objects.get_or_404(
        pk=location_type_pk)
    if location_pk:
        location = services.locations.get_or_404(pk=location_pk)
        qs = services.submissions.find(submission_type='O', form=form) \
            .filter_in(location)
    else:
        qs = services.submissions.find(submission_type='O', form=form)

    event = get_event()
    tags = [fi.name for group in form.groups for fi in group.fields]
    qs = qs(created__lte=event.end_date, created__gte=event.start_date)
    df = qs.dataframe()
    ds = Dataset()
    ds.headers = ['LOC'] + tags + ['TOT']

    for summary in incidents_csv(df, location_type.name, tags):
        ds.append([summary.get(heading) for heading in ds.headers])

    return ds.csv
示例#51
0
def dump_personnel():
    print('Running worker dump...')
    dataset = Dataset()
    dataset.headers = ['Site ID', 'Name', 'Position', 'Email', 'Mobile']

    for worker in Personnel.objects.all():
        if not worker.mobile.startswith('+'):
            continue

        dataset.append([
            worker.site.hcid,
            worker.name,
            worker.position.code,
            worker.email,
            worker.mobile
        ])

    with open('workers.csv', 'w') as f:
        f.write(dataset.csv)

    print('Done')
def mergeDebaters(dataset):
    dataset.headers = IN_FIELDS
    debaters_old = dataset.dict
    english_debaters, ihs_debaters = [], []
    for d in debaters_old:
        if d['course_id'] == ENGLISH_COURSE_ID:
            english_debaters.append(d)
        elif d['course_id'] == IHS_COURSE_ID:
            ihs_debaters.append(d)
    dataset_new = Dataset()
    dataset_new.headers = OUT_FIELDS
    debaters_zipped = zipDebaters(english_debaters, ihs_debaters)
    debaters_zipped.reverse()
    for d in debaters_zipped:
        debater = splitName(d)
        #convert dictionary to a tuple and append it to the dataset
        l = []
        for key in OUT_FIELDS:
            l.append(debater[key])
        dataset_new.append(tuple(l))
    return dataset_new
示例#53
0
def dump_messages():
    print('Running message dump...')
    dataset = Dataset()
    dataset.headers = ['Connection', 'Backend', 'Direction', 'Timestamp', 'Text']

    for message in Message.objects.all():
        if not message.connection.identity.startswith('+'):
            continue

        dataset.append([
            message.connection.identity,
            message.connection.backend.name,
            message.direction,
            timegm(message.date.utctimetuple()),
            message.text
        ])

    with open('messages.csv', 'w') as f:
        f.write(dataset.csv)

    print('Done')
示例#54
0
def dump_stock_reports():
    print('Running stock report dump...')
    dataset = Dataset()
    dataset.headers = ['Site ID', 'Mobile', 'Timestamp', 'Items']

    for stock_report in StockReport.objects.order_by('created'):
        if not stock_report.reporter.mobile.startswith('+'):
            continue

        summary = '; '.join(['{} {} {}'.format(log.item.code, log.last_quantity_received, log.current_holding) for log in stock_report.logs.all()])

        dataset.append([
            stock_report.site.hcid,
            stock_report.reporter.mobile,
            timegm(stock_report.created.utctimetuple()),
            summary
        ])

    with open('stock_reports.csv', 'w') as f:
        f.write(dataset.csv)

    print('Done')
示例#55
0
 def exportFieldSchedule(self, startgameday, prefix=""):
     headers = ['Game#', 'Date', 'Day', 'Time', 'Division', 'Round', 'Home', 'Visitor']
     datasheet_list = []
     for field in self.fieldinfo:
         field_name = field['name']
         field_id = field['field_id']
         datasheet = Dataset(title=field_name)
         datasheet.headers = list(headers)
         fielddata_list = self.dbinterface.findFieldSchedule(field_id, min_game_id=startgameday, tourntype='E')
         tabformat_list = [(x[match_id_CONST], tournMapGamedayIdToCalendar(x[gameday_id_CONST]), tournMapGamedayIdToDate(x[gameday_id_CONST]), datetime.strptime(x[start_time_CONST],"%H:%M").strftime("%I:%M %p"), x[age_CONST]+x[gen_CONST], x[round_CONST], x[home_CONST], x[away_CONST]) for x in fielddata_list]
         for tabformat in tabformat_list:
             datasheet.append(tabformat)
         #datasheet.append_separator("Prefix Legend: 'S'-Seeded Team#, 'W'-Winning Team (See Match ID), 'L'-Losing Team)")
         datasheet_list.append(datasheet)
     book = Databook(datasheet_list)
     cdir = os.path.dirname(__file__)
     bookname_xls = prefix+'_byField.xls'
     bookname_html = prefix+'byField.html'
     booknamefull_xls = os.path.join('/home/henry/workspace/datagraph/bottle_baseball/download/xls', bookname_xls)
     booknamefull_html = os.path.join('~/workspace/datagraph/bottle_baseball/download/html', bookname_html)
     with open(booknamefull_xls,'wb') as f:
         f.write(book.xls)
     f.close()
示例#56
0
 def exportTeamSchedules(self, div_id, age, gen, numteams, prefix=""):
     headers = ['Gameday#', 'Game Date', 'Start Time', 'Venue', 'Home Team', 'Away Team']
     cdir = os.path.dirname(__file__)
     for team_id in range(1, numteams+1):
         team_str = age+gen+str(team_id)
         datasheet = Dataset(title=team_str)
         datasheet.headers = list(headers)
         teamdata_list = self.dbinterface.findTeamSchedule(age, gen, team_id)
         tabformat_list = [(x[gameday_id_CONST], mapGamedayIdToCalendar(x[gameday_id_CONST]),
                            datetime.strptime(x[start_time_CONST],"%H:%M").strftime("%I:%M %p"),
                            self.fieldinfo[self.findexerGet(x[venue_CONST])]['name'],
                            x[home_CONST], x[away_CONST]) for x in teamdata_list]
         for tabformat in tabformat_list:
             datasheet.append(tabformat)
         if team_id < 10:
             team_id_str = '0'+str(team_id)
         else:
             team_id_str = str(team_id)
         sheet_xls_relpath = prefix+age + gen + team_id_str+ '_schedule.xls'
         sheet_xls_abspath = os.path.join('/home/henry/workspace/datagraph/bottle_baseball/download/xls',
                                          sheet_xls_relpath)
         with open(sheet_xls_abspath,'wb') as f:
             f.write(datasheet.xls)
         f.close()
示例#57
0
 def generate_divteamxls(self, div_id_property):
     headers = ['Game Date', 'Day', 'Time', 'Division', 'Home',
     'Visitor', 'Venue']
     file_list = list()
     for divinfo in self.divinfo_list:
         div_id = divinfo[div_id_property]
         div_age = divinfo['div_age']
         div_gen = divinfo['div_gen']
         div_str = div_age + div_gen
         totalteams = divinfo['totalteams']
         datasheet_list = list()
         teamrange = range(1,totalteams+1)
         for team_id in teamrange:
             team_str = div_str + str(team_id)
             datasheet = Dataset(title=team_str)
             datasheet.headers = list(headers)
             match_list = self.sdbinterface.get_schedule('team_id',
                 div_age=div_age, div_gen=div_gen, team_id=team_id)
             tabformat_list = [(x['game_date'],
                 parser.parse(x['game_date']).strftime("%a"),
                 datetime.strptime(x['start_time'], "%H:%M").strftime("%I:%M%p"),
                 div_str, x['home'], x['away'],
                 self.fieldinfo_list[self.findexerGet(x['venue'])]['field_name'])
                 for x in match_list]
             for tabformat in tabformat_list:
                 datasheet.append(tabformat)
             datasheet_list.append(datasheet)
         book = Databook(datasheet_list)
         bookname_xls_relpath = self.schedcol_name + div_str+"_byTeam.xls"
         bookname_xls_fullpath = os.path.join(self.dir_path,
             bookname_xls_relpath)
         with open(bookname_xls_fullpath,'wb') as f:
            f.write(book.xls)
         f.close()
         file_list.append({'path':bookname_xls_relpath, 'mdata':div_str})
     return file_list
示例#58
0
    def export_view(self, request, form_url=''):
        """The 'export' admin view for this model."""

        info = self.opts.app_label, self.opts.model_name

        if not self.has_export_permission(request):
            raise PermissionDenied

        form = SubmissionExportForm(data=request.POST if request.method == 'POST' else None)

        if form.is_valid():
            data = form.cleaned_data
            queryset = self.get_queryset(request) \
                .filter(plugin_id=data.get('form')) \
                .select_related('created_by', 'plugin', )

            from_date, to_date = data.get('from_date'), data.get('to_date')
            headers = data.get('headers', [])

            if from_date:
                queryset = queryset.filter(creation_date__gte=from_date)
            if to_date:
                queryset = queryset.filter(creation_date__lt=to_date + datetime.timedelta(days=1))

            if not queryset.exists():
                message = _('No matching %s found for the given criteria. '
                            'Please try again.') % self.opts.verbose_name_plural
                self.message_user(request, message, level=messages.WARNING)
                if request.is_ajax():
                    data = {
                        'reloadBrowser': True,
                        'submissionCount': 0,
                    }
                    return JsonResponse(data)
                return redirect('admin:%s_%s_export' % info)

            latest_submission = queryset[:1].get()
            dataset = Dataset(title=Truncator(latest_submission.plugin.name).chars(31))

            if not headers:
                headers = [field['label'].strip() for field in latest_submission.form_data]
                for submission in queryset:
                    for field in submission.form_data:
                        label = field['label'].strip()
                        if label not in headers:
                            headers.append(label)

                if request.is_ajax():
                    data = {
                        'reloadBrowser': False,
                        'submissionCount': queryset.count(),
                        'availableHeaders': headers,
                    }
                    return JsonResponse(data)

            headers.extend(['Submitted on', 'Sender IP', 'Referrer URL'])
            dataset.headers = headers

            def humanize(field):
                value = field['value']
                field_type = field['type']

                if value in (None, '', [], (), {}):
                    return None

                if field_type == 'checkbox':
                    value = yesno(bool(value), u'{0},{1}'.format(_('Yes'), _('No')))
                if field_type == 'checkbox_multiple':
                    value = ', '.join(list(value))
                return value

            for submission in queryset:
                row = [None] * len(headers)
                for field in submission.form_data:
                    label = field['label'].strip()
                    if label in headers:
                        row[headers.index(label)] = humanize(field)
                    row[-3] = submission.creation_date.strftime(
                        settings.DJANGOCMS_FORMS_DATETIME_FORMAT)
                    row[-2] = submission.ip
                    row[-1] = submission.referrer
                dataset.append(row)

            mimetype = {
                'xls': 'application/vnd.ms-excel',
                'csv': 'text/csv',
                'html': 'text/html',
                'yaml': 'text/yaml',
                'json': 'application/json',
            }

            file_type = data.get('file_type', 'xls')
            filename = settings.DJANGOCMS_FORMS_EXPORT_FILENAME.format(
                form_name=slugify(latest_submission.plugin.name))
            filename = timezone.now().strftime(filename)
            filename = '%s.%s' % (filename, file_type)

            response = HttpResponse(
                getattr(dataset, file_type), {
                    'content_type': mimetype.get(file_type, 'application/octet-stream')
                })

            response['Content-Disposition'] = 'attachment; filename=%s' % filename
            return response

        # Wrap in all admin layout
        fieldsets = ((None, {'fields': form.fields.keys()}),)
        adminform = AdminForm(form, fieldsets, {}, model_admin=self)
        media = self.media + adminform.media

        context = {
            'title': _('Export %s') % force_text(self.opts.verbose_name_plural),
            'adminform': adminform,
            'is_popup': (IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET),
            'media': mark_safe(media),
            'errors': AdminErrorList(form, ()),
            'app_label': self.opts.app_label,
        }
        return self.render_export_form(request, context, form_url)