Example #1
0
    def post(self, request, *args, **kwargs):

        form = self.form_class(request.POST)

        if form.is_valid():
            instance = form.clean()
            pnt = instance['query_point']
            distance = instance['distance']
            queryset = Observation.objects.filter(gispoint__distance_lte=(pnt, D(km=distance)))

            # Measurements queryset
            qs = Measurement.objects.filter(
                interpolated=False,
                observation__id__in = queryset
            )
            query_path = f'media/tmp/{id_generator()}.csv'
            with open(query_path,'wb') as csv_file:
                write_csv(qs, csv_file)

            return render(request, self.template_name, {
                'query': queryset,
                'instance': instance,
                'query_path': '/' + query_path
            })

        return render(request, self.template_name, {'form': form})
Example #2
0
def make_file(session_id, query):
    # делаем выборку из БД и сохраняем в csv
    users_set = Users.objects.filter(query=query,
                                     session_id=session_id).values(
                                         'creation_date', 'username', 'name',
                                         'location', 'friends', 'followers',
                                         'description')
    with open('users.csv', 'ab') as csv_file:
        write_csv(users_set, csv_file)

    tweets_set = Tweets.objects.filter(query=query,
                                       session_id=session_id).values(
                                           'tweet_date', 'tweet_username',
                                           'tweet_text')
    with open('tweets.csv', 'ab') as csv_file:
        write_csv(tweets_set, csv_file)

    name = datetime.now().strftime('%Y-%m-%d %H-%M-%S')
    tweets_df = pandas.read_csv('tweets.csv')
    user_info_df = pandas.read_csv('users.csv')
    with pandas.ExcelWriter(name + '.xlsx') as writer:
        tweets_df.to_excel(writer, sheet_name='tweets')
        user_info_df.to_excel(writer, sheet_name='user_info')
    with zipfile.ZipFile(name + '.zip', 'w') as zip:
        zip.write(name + '.xlsx')
    os.remove(name + '.xlsx')
    os.remove('tweets.csv')
    os.remove('users.csv')
Example #3
0
def simple_async_csv(job_pk, qs):
    job = ExportJob.objects.get(pk=job_pk)

    file_obj = TemporaryFile()
    write_csv(qs, file_obj)
    job.complete_with(generate_filename(qs), File(file_obj))
    job.save()
Example #4
0
def simple_async_csv(job_pk, qs):
    job = ExportJob.objects.get(pk=job_pk)

    file_obj = TemporaryFile()
    write_csv(qs, file_obj)
    job.complete_with(generate_filename(qs), File(file_obj))
    job.save()
def export_to_csv(modeladmin, request, queryset):
    """
    add an action to export the queryset behind an admin summary page to csv
    """
    field_names = []
    if hasattr(queryset.model, 'csv_fields'):
        field_names = queryset.model.csv_fields

    csv_file_name = (f'{settings.EXPORT_CSV_MEDIA_ROOT}'
                     f'{timezone.localtime():%Y_%m_%d-%H_%M_%S}-'
                     f'{queryset.model._meta.verbose_name}.csv')

    with open(csv_file_name, 'wb') as csv_file:
        try:
            write_csv(queryset.values(*field_names), csv_file)
        except Exception as error:
            modeladmin.message_user(
                request, f'cannot export the data in the'
                f' {queryset.model._meta.verbose_name} queryset to csv:'
                f' {str(error)}',
                level=messages.ERROR)
            return

    modeladmin.message_user(
        request,
        f'Data exported to {socket.getfqdn()}:/{csv_file_name}',
        level=messages.INFO)
Example #6
0
def dump_model(fq_name, fields, dump_id):
    _, temp_file_path = tempfile.mkstemp()
    Model = _model_from_fq_name(fq_name)

    if fields is None:
        queryset = Model.objects.all()
    else:
        queryset = Model.objects.all().values(*fields)

    dt_format = get_dt_formatter()

    field_serializer_map = {
        field.name: dt_format for field in Model._meta.fields
        if isinstance(field, (DateField, TimeField, DateTimeField))}

    with open(temp_file_path, 'w') as f:
        # We specify QUOTE_NONNUMERIC here but the current version of
        # djqscsv coerces everything to a string. Overquoting is better
        # than underquoting.
        write_csv(queryset, f, quoting=csv.QUOTE_NONNUMERIC,
                  field_serializer_map=field_serializer_map)

    model_name = Model.__name__.lower()
    file_name = 'dump/{}/{}.csv'.format(dump_id, model_name)

    with open(temp_file_path, 'r') as f:
        destination_path = _storage.save(file_name, f)
    os.remove(temp_file_path)

    return [destination_path]
Example #7
0
    def handle(self, *args, **kwargs):
        path = kwargs['file_location']

        all_entries = Squirrel.objects.all()

        with open(path, 'wb') as csv_file:
            write_csv(all_entries, csv_file)

        msg = f'You have successfully exported data to {path}.'
        self.stdout.write(self.style.SUCCESS(msg))
def gerar_csv_completo_mp():
    try:
        queryset = Responsavel.objects.filter(enviado_para_mercado_pago=True)
        queryset_to_csv = queryset.annotate(
            get_celular=Concat('ddd_celular', V(' '), 'celular')).values(
                'nome', 'alunos__nome', 'codigo_eol_aluno', 'cpf', 'email',
                'get_celular', 'vinculo', 'data_nascimento', 'nome_mae',
                'status', 'nao_possui_celular', 'nao_possui_email',
                'responsavel_alterado')
        qtd_linhas_qs = queryset_to_csv.count()
        hoje = date.today()
        nome_arquivo = f'responsavel_export_completo_ate_{hoje}.csv'
        path = join(MEDIA_ROOT, nome_arquivo)
        zip_obj = zipfile.ZipFile(path.replace('.csv', '.zip'), 'w')

        log.info('Inicia geração de arquivo CSV.')
        with open(path, 'wb') as csv_file:
            write_csv(queryset_to_csv,
                      csv_file,
                      field_header_map={
                          'nome': 'nome_responsavel',
                          'alunos__nome': 'nome_aluno',
                          'get_celular': 'celular',
                          'nome_mae': 'nome_mae_responsavel'
                      },
                      use_verbose_names=False)

        file = open(path)
        reader = csv.reader(file)
        qtd_linhas_arquivo = len(
            list(reader)) - 1  # qtd de linhas menos o cabeçario
        log.info(f'CSV gerado: Quantidade de linhas: {qtd_linhas_arquivo}')
        log.info('Comprimindo arquivo')
        zip_obj.write(path, basename(path))

        if qtd_linhas_qs == qtd_linhas_arquivo and qtd_linhas_qs > 0:
            log.info('Inicia envio de e-mail para o MP')
            enviar_email_mp(
                assunto=f'Lista completa novos beneficiarios - {hoje}',
                mensagem=(f'E-mail automático. Não responda. ' +
                          f'Clique neste link para fazer download do csv: ' +
                          f'{url(nome_arquivo)}'),
                csv=url(nome_arquivo))
        else:
            # TODO: Verificar uma forma de guardar essa informação no banco
            log.info(
                f'Divergencia no número de linhas da query ({qtd_linhas_qs}) com o número de '
                f'linhas do arquivo gerado ({qtd_linhas_arquivo}) ou query sem registro. '
                'Registros não foram atualizados e e-mail não foi enviado.')
    except Exception as e:
        log.error(
            'Falha no processo de geração de arquivo e envio de e-mail: ' +
            str(e))
    def handle(self, *args, **options):
        filename = options['filename'][0]
        qset = Changeset.objects.filter(
            harmful=True).select_related('user_detail').values(
                'id', 'user', 'editor', 'powerfull_editor', 'comment',
                'source', 'imagery_used', 'date', 'reasons', 'reasons__name',
                'create', 'modify', 'delete', 'bbox', 'is_suspect', 'harmful',
                'checked', 'check_user', 'check_date')
        with open(filename, 'wb') as csv_file:
            write_csv(qset, csv_file)

        self.stdout.write('File {} created.'.format(filename))
    def handle(self, *args, **options):
        values = 'id fdid state name dist_model_score dist_model_score_quartile population ' \
                 'population_class risk_model_deaths risk_model_deaths_quartile risk_model_injuries ' \
                 'risk_model_injuries_quartile risk_model_fires risk_model_fires_quartile risk_model_fires_size0 ' \
                 'risk_model_fires_size0_quartile risk_model_fires_size1 risk_model_fires_size1_quartile ' \
                 'risk_model_fires_size2 risk_model_fires_size2_quartile ' \
                 'risk_model_size1_percent_size2_percent_sum_quartile risk_model_deaths_injuries_sum ' \
                 'risk_model_deaths_injuries_sum_quartile'.split()

        field_map_header = {'risk_model_fires_size0': 'risk_model_fires_size1',
                            'risk_model_fires_size1': 'risk_model_fires_size2',
                            'risk_model_fires_size2': 'risk_model_fires_size3',
                            'risk_model_size1_percent_size2_percent_sum_quartile': 'risk_model_size2_percent_size3_percent_sum',
                            }

        for clazz in range(0, 10):
            djqscsv.write_csv(FireDepartment.objects.filter(archived=False, population_class=clazz).as_quartiles().values(*values), open('/tmp/population_class_{0}.csv'.format(clazz), 'wb'), use_verbose_names=False, field_header_map=field_map_header)

        # by region
        for region in ['West', 'South', 'Midwest', 'Northeast']:
            djqscsv.write_csv(FireDepartment.objects.filter(archived=False, population_class__lte=8, region=region).as_quartiles().values(*values), open('/tmp/non_metropolitan_{0}.csv'.format(region), 'wb'), use_verbose_names=False, field_header_map=field_map_header)

        for region in ['West', 'South', 'Midwest', 'Northeast']:
            for clazz in range(0, 9):
                djqscsv.write_csv(FireDepartment.objects.filter(archived=False, population_class=clazz, region=region).as_quartiles().values(*values), open('/tmp/{0}_population_class_{1}.csv'.format(region, clazz), 'wb'), use_verbose_names=False, field_header_map=field_map_header)

        djqscsv.write_csv(FireDepartment.objects.filter(archived=False, population_class__in=[7, 8], region='Northeast').as_quartiles().values(*values), open('/tmp/{0}_population_class_7_8.csv'.format('Northeast', clazz), 'wb'), use_verbose_names=False, field_header_map=field_map_header)
Example #11
0
    def post(self, request, *args, **kwargs):
        serializer = ExportsSerializer(data=request.data)
        serializer.is_valid(raise_exception=True)
        data = serializer.data
        workspace = data.get('workspace')
        filename = data.get('filename')
        qs = self.get_queryset(workspace)

        if filename == 'False' or not filename:
            return render_to_csv_response(qs)
        else:
            filename = utils.clean_path(filename)
            with open(filename, 'wb') as csv_file:
                write_csv(qs, csv_file)
            return common.message(200, filename)
Example #12
0
File: views.py Project: ihm49/issf
def write_file_csv(filename: str, records: QuerySet, zipfile: ZipFile) -> None:
    """
    Writes records to a specified file in a zipfile.

    :param filename: The filename to write to
    :param records: The records to write
    :param zipfile: The zipfile to write to
    """
    if len(records) > 0:
        csvfile = open('/issf/issf_prod/' + filename, 'wb+')
        djqscsv.write_csv(records, csvfile, use_verbose_names=False)
        csvfile.close()
        zipfile.write(csvfile.name, os.path.basename(csvfile.name))
    else:
        return
Example #13
0
    def build_msg(self, order: Order, address: str) -> EmailMessage:
        body_init = "New order has just been saved on store. Details in attached file\n"
        body_addr = f"Address: {address}\n"
        body_price = f"Amount to pay: {order.total_amount}\n"
        msg = EmailMessage(
            f"New Order {order.id}",
            f"{body_init} Order Data:\n - {body_addr} - {body_price}",
            "Bernini Store <*****@*****.**>",
            ["Orders <*****@*****.**>"])
        products = order.products.all()
        with open("products.csv", "wb") as attachment:
            write_csv(products, attachment)
        msg.attach_file("products.csv")

        return msg
Example #14
0
def get_report():
    """Generates a csv file that stores student name, creation date of
    homework result and teacher name who created homework for all
    completed homeworks.
    """

    qs = HomeworkResult.objects.filter(done=True).values(
        "author__first_name", "created", "homework__author__first_name")

    headers = {
        "author__first_name": "Student_name",
        "created": "Creation_date",
        "homework__author__first_name": "Teacher_name",
    }

    with open("report.csv", "wb") as csv_file:
        write_csv(qs, csv_file, field_header_map=headers)
Example #15
0
def cadastro_de_processos(request):

    #Filtrando os processos por data de atualização
    processos = Processos.objects.order_by('data_atualizacao')

    #Verifica se o botão "cadastrar" foi apertado. Então salva os inputs do request, bem como os resultados 
    #dos métodos que estão na utils.py e salva na Model. Também salva em um .csv e depois renderiza com o template.
    form = ProcessosForm(request.POST)
    if request.method == 'POST' and 'run_script' in request.POST:
        if form.is_valid():
            classe = request.POST.get("classe", None)
            numero = request.POST.get("numero", None)
            descricao = request.POST.get("descricao", None)
            emails = request.POST.get("emails", None)
            incidente_id = get_incidente_id(classe, numero)
            #verifica se o processo já está cadastrado no banco de dados:
            try:
                process_sel = Processos.objects.get(incidente_id = incidente_id)
            except Processos.DoesNotExist:
                data_atualizacao = get_data_atualizacao(incidente_id)
                descricao_atualizacao = get_descricao_atualizacao(incidente_id)
                url = "http://portal.stf.jus.br/processos/detalhe.asp?incidente=" + incidente_id
                b4 = Processos(classe=str(classe), numero=str(numero), descricao=str(descricao), emails=str(emails), incidente_id=str(incidente_id), data_atualizacao = str(data_atualizacao), descricao_atualizacao = str(descricao_atualizacao), url = str(url))
                b4.save()

                #escrevendo uma linha em resultados.csv
                fields=[classe, numero, incidente_id, descricao, data_atualizacao, descricao_atualizacao, emails, url]
                with open(r'resultados.csv', 'a', encoding = 'utf-8') as f:
                    writer = csv.writer(f)
                    writer.writerow(fields)
                ('/cadastro_de_processos/')
            messages.error(request, 'Processo já cadastrado.')
            return redirect('/cadastro_de_processos/')
                         
        else:
            form = ProcessosForm()
    #salvando todos os querysets em um csv
    qs = Processos.objects.all()
    with open('lista_de_processos.csv', 'wb') as csv_file:
        write_csv(qs, csv_file)
    return render(request, 'consulta_a_processos/cadastro_de_processos.html')       
Example #16
0
def generate_task_csv(report, filename):
    queryset = Task.objects.filter(entity=report.entity).filter(
        is_deleted=False)
    if report.employee:
        queryset = queryset.filter(employee=report.employee)
    ranged_queryset = queryset.filter(due_date__range=(report.start_date,
                                                       report.end_date))
    ordered_queryset = ranged_queryset.order_by('status', 'due_date')
    values_queryset = ordered_queryset.values(
        'subject', 'lead__first_name', 'lead__last_name', 'lead__company_name',
        'customer__first_name', 'customer__last_name', 'company__name',
        'status', 'priority', 'due_date', 'deal__name',
        'employee__user__first_name', 'description', 'created_at',
        'notes__note')
    field_header_map = {
        'lead__first_name': 'lead first name',
        'lead__last_name': 'lead last name',
        'lead__company_name': 'lead company name',
        'customer__first_name': 'customer last name',
        'customer__last_name': 'customer last name',
        'deal__name': 'deal name',
        'employee__user__first_name': 'employee name',
        'notes__note': 'note'
    }
    status_dict = {'O': 'Open', 'C': 'Closed', 'P': 'Progress', '': ''}
    priority_dict = {'H': 'High', 'M': 'Medium', 'L': 'Low', '': ''}

    WIB = Zone(WIB_ZONE, False, 'WIB')
    field_serializer_map = {
        'status': (lambda x: status_dict[x]),
        'priority': (lambda x: priority_dict[x]),
        'due_date': (lambda x: x.strftime('%d/%m/%Y')),
        'created_at': (lambda x: x.now(WIB).strftime('%d/%m/%Y %H:%M:%S %Z'))
    }

    with open(filename, 'w') as csv_file:
        write_csv(values_queryset,
                  csv_file,
                  field_header_map=field_header_map,
                  field_serializer_map=field_serializer_map)
Example #17
0
def generate_lead_csv(report, filename):
    queryset = Lead.objects.filter(entity=report.entity).filter(
        is_deleted=False)
    if report.employee:
        queryset = queryset.filter(employee=report.employee)
    ranged_queryset = queryset.filter(created_at__range=(report.start_date,
                                                         report.end_date))
    ordered_queryset = ranged_queryset.order_by('status', 'lead_source')
    values_queryset = ordered_queryset.values(
        'first_name', 'last_name', 'status', 'lead_source', 'company_name',
        'phone', 'mobile_phone', 'email', 'fax', 'position',
        'employee__user__first_name', 'street', 'city', 'state', 'country',
        'pos_code', 'description', 'created_at')

    field_header_map = {'employee__user__first_name': 'employee name'}
    status_dict = {'O': 'Open', 'C': 'Closed', 'CV': 'Converted', '': ''}
    lead_source_dict = {
        'OFA': 'Offline Ads',
        'ONA': 'Online Ads',
        'CC': 'Cold Call',
        'IR': 'Internal Referral',
        'ER': 'External Referral',
        'P': 'Partner',
        'S': 'Sales',
        'TS': 'Trade Show',
        'SR': 'Seminar',
        '': ''
    }
    WIB = Zone(WIB_ZONE, False, 'WIB')
    field_serializer_map = {
        'status': (lambda x: status_dict[x]),
        'lead_source': (lambda x: lead_source_dict[x]),
        'created_at': (lambda x: x.now(WIB).strftime('%d/%m/%Y %H:%M:%S %Z'))
    }

    with open(filename, 'w') as csv_file:
        write_csv(values_queryset,
                  csv_file,
                  field_header_map=field_header_map,
                  field_serializer_map=field_serializer_map)
Example #18
0
def generate_deal_csv(report, filename):
    queryset = Deal.objects.filter(entity=report.entity).filter(
        is_deleted=False)
    if report.employee:
        queryset = queryset.filter(employee=report.employee)
    ranged_queryset = queryset.filter(
        expected_closing_date__range=(report.start_date, report.end_date))
    ordered_queryset = ranged_queryset.order_by('status',
                                                'expected_closing_date')
    values_queryset = ordered_queryset.values(
        'name', 'customer__first_name', 'customer__last_name', 'company__name',
        'status', 'expected_closing_date', 'expected_revenue',
        'employee__user__first_name', 'description', 'created_at')
    field_header_map = {
        'customer__first_name': 'customer first name',
        'customer__last_name': 'customer last name',
        'company__name': 'company name',
        'employee__user__first_name': 'employee name'
    }
    status_dict = {
        'O': 'Open',
        'P': 'Progress',
        'CW': 'Won',
        'CL': 'Lost',
        '': ''
    }
    WIB = Zone(WIB_ZONE, False, 'WIB')
    field_serializer_map = {
        'status': (lambda x: status_dict[x]),
        'expected_closing_date': (lambda x: x.strftime('%d/%m/%Y')),
        'created_at': (lambda x: x.now(WIB).strftime('%d/%m/%Y %H:%M:%S %Z'))
    }

    with open(filename, 'w') as csv_file:
        write_csv(values_queryset,
                  csv_file,
                  field_header_map=field_header_map,
                  field_serializer_map=field_serializer_map)
Example #19
0
    def export_book_loan(self, data):
        # getting report path declared in settings
        base_path = EXPORT_REPORT_URL
        # Initial file name
        file_name = "export_book_loan_"
        try:
            # this if else block defines if the report will be for specific status of loan or for all
            if data.get("status"):
                queryset = BookLoan.objects.filter(status=data["status"])
                file_name += str(data.get("status")) + "_"
            else:
                queryset = BookLoan.objects.all()
                file_name += "all_"

            # adding timestamp to file name to make unique file name
            timestamp = round(datetime.datetime.now().timestamp())
            file_name += str(timestamp) + ".csv"
            file_path = base_path + file_name
            with open(file_path[1:], 'wb') as csv_file:
                write_csv(queryset, csv_file)
            return {"file_path": file_path}
        except:
            return {"error": "Invalid Book Loan Data or file generation failed"}
Example #20
0
def downloadDB(request):
	""" Download database contents as a zipped folder of CSVs
	"""
	#magically get a list of all models
	models = DBManager.get_model_list()
	home_dir = FileSystemManager.getHomeDir()
	file_list = []

	for model in models:
		if model._meta.verbose_name == "intersection task":
			#it doesn't make sense to dump these
			continue
		#get all objects for this model
		queryset = DBManager.dump_model(model)
		#write 1 csv per model, temporarily to homedir
		with open(home_dir + "/" + model._meta.verbose_name+".csv", "wb") as file:
			djqscsv.write_csv(queryset, file, use_verbose_names=False)
			file_list.append(file.name)
			file.close()

	#make sure there isn't an old copy of this file laying around
	if os.path.exists(home_dir + "/db.zip"):
		os.remove(home_dir + "/db.zip")

	#create zip
	FileSystemManager.zip(file_list, "db.zip")

	#wrap up and respond
	with open(home_dir +"/db.zip") as file:
		wrapper = FileWrapper(file)
		response = HttpResponse(wrapper)
		response['Content-Type'] = "application/zip"
		response['Content-Disposition'] = 'attachment; filename=db.zip'
		#clean up
		for file in file_list:
			os.remove(file)
		return response
Example #21
0
def export(filename, export_type, task_id=None):
    """ export pillbox data in json and csv format """

    acccepted_types = ['json', 'yaml', 'xml', 'csv']

    if export_type in acccepted_types:

        pills = PillBoxData.objects.all()
        export_path = join(settings.MEDIA_ROOT, 'export')
        check_create_folder(export_path)
        export_file = join(export_path, '%s.%s' % (filename, export_type))

        if exists(export_file):
            i = 0
            while exists(export_file):
                i += 1
                export_file = join(
                    export_path, '%s_%s.%s' % (filename, str(i), export_type))

        e_file = open(export_file, 'w')

        if export_type == 'csv':
            write_csv(pills, e_file)
            e_file.close()

        else:
            data = serializers.serialize(export_type, pills)
            e_file.write(data)
            e_file.close()

        return 'export/' + ntpath.basename(e_file.name)

    else:
        raise Exception(
            "Incorrect Export Type. Accepted types: 'json', 'yaml', 'xml', 'csv'"
        )
    def _prepare_csv(self):
        """
        generate a comma-separated file with the values in the
        :attr:`Email.data` if required via the :attr:`Email.add_csv` attribute
        value

        If the :attr:`data` is empty, the comma-separated file will not be
        created.

        The file will be named by linking the value of the :attr:`email subject
        <p_soc_auto_base.models.Subscription.email_subject> attribute of the
        :attr:`Email.subscription` instance member with a time stamp.
        The file will be saved under the path described by
        :attr:`p_soc_auto.settings.CSV_MEDIA_ROOT`.
        """
        if not self.add_csv or not self.data:
            return

        filename = 'no_name'
        if self.subscription_obj.email_subject:
            filename = self.subscription_obj.email_subject.\
                replace('in less than', 'soon').\
                replace(' ', '_')

        filename = '{}{:%Y_%m_%d-%H_%M_%S}_{}.csv'.format(
            settings.CSV_MEDIA_ROOT, timezone.localtime(value=timezone.now()),
            filename)

        with open(filename, 'wb') as csv_file:
            write_csv(self.data.values(*self.headers.keys()),
                      csv_file,
                      field_header_map=self.headers)

        LOG.debug('attachment %s ready', filename)

        self.csv_file = filename
Example #23
0
def async_csv_export(job, model, query, display_filters):
    instance = job.instance

    if model == 'species':
        initial_qs = (Species.objects.filter(instance=instance))

        extra_select, values = extra_select_and_values_for_model(
            instance, job, 'treemap_species', 'Species')
        ordered_fields = values + extra_select.keys()
        limited_qs = initial_qs.extra(select=extra_select)\
                               .values(*ordered_fields)
    else:
        # model == 'tree'

        # TODO: if an anonymous job with the given query has been
        # done since the last update to the audit records table,
        # just return that job

        # get the plots for the provided
        # query and turn them into a tree queryset
        initial_qs = Filter(query, display_filters, instance)\
            .get_objects(Tree)

        extra_select_tree, values_tree = extra_select_and_values_for_model(
            instance, job, 'treemap_tree', 'Tree')
        extra_select_plot, values_plot = extra_select_and_values_for_model(
            instance, job, 'treemap_mapfeature', 'Plot', prefix='plot')
        extra_select_sp, values_sp = extra_select_and_values_for_model(
            instance, job, 'treemap_species', 'Species', prefix='species')

        if 'plot__geom' in values_plot:
            values_plot = [f for f in values_plot if f != 'plot__geom']
            values_plot += ['plot__geom__x', 'plot__geom__y']

        get_ll = 'ST_Transform(treemap_mapfeature.the_geom_webmercator, 4326)'
        extra_select = {
            'plot__geom__x': 'ST_X(%s)' % get_ll,
            'plot__geom__y': 'ST_Y(%s)' % get_ll
        }

        extra_select.update(extra_select_tree)
        extra_select.update(extra_select_plot)
        extra_select.update(extra_select_sp)

        ordered_fields = (sorted(values_tree) + sorted(values_plot) +
                          sorted(values_sp))

        if ordered_fields:
            limited_qs = initial_qs.extra(select=extra_select)\
                                   .values(*ordered_fields)
        else:
            limited_qs = initial_qs.none()

    if not initial_qs.exists():
        job.status = ExportJob.EMPTY_QUERYSET_ERROR

    # if the initial queryset was not empty but the limited queryset
    # is empty, it means that there were no fields which the user
    # was allowed to export.
    elif not limited_qs.exists():
        job.status = ExportJob.MODEL_PERMISSION_ERROR
    else:
        csv_file = TemporaryFile()
        write_csv(limited_qs, csv_file, field_order=ordered_fields)
        job.complete_with(generate_filename(limited_qs), File(csv_file))

    job.save()
Example #24
0
def simple_async_csv(job, qs):
    file_obj = TemporaryFile()
    write_csv(qs, file_obj)
    job.complete_with(generate_filename(qs), File(file_obj))
    job.save()
Example #25
0
def async_csv_export(job, model, query, display_filters):
    instance = job.instance

    select = OrderedDict()
    select_params = []
    field_header_map = {}
    field_serializer_map = {}
    if model == 'species':
        initial_qs = (Species.objects.
                      filter(instance=instance))
        values = _values_for_model(instance, job, 'treemap_species',
                                   'Species', select, select_params)
        field_names = values + select.keys()
        limited_qs = (initial_qs
                      .extra(select=select,
                             select_params=select_params)
                      .values(*field_names))
    else:
        # model == 'tree'

        # TODO: if an anonymous job with the given query has been
        # done since the last update to the audit records table,
        # just return that job

        # get the plots for the provided
        # query and turn them into a tree queryset
        initial_qs = Filter(query, display_filters, instance)\
            .get_objects(Plot)

        tree_fields = _values_for_model(
            instance, job, 'treemap_tree', 'Tree',
            select, select_params,
            prefix='tree')
        plot_fields = _values_for_model(
            instance, job, 'treemap_mapfeature', 'Plot',
            select, select_params)
        species_fields = _values_for_model(
            instance, job, 'treemap_species', 'Species',
            select, select_params,
            prefix='tree__species')

        if 'geom' in plot_fields:
            plot_fields = [f for f in plot_fields if f != 'geom']
            plot_fields += ['geom__x', 'geom__y']

        if tree_fields:
            select['tree_present'] = "treemap_tree.id is not null"
            plot_fields += ['tree_present']

        get_ll = 'ST_Transform(treemap_mapfeature.the_geom_webmercator, 4326)'
        select['geom__x'] = 'ST_X(%s)' % get_ll
        select['geom__y'] = 'ST_Y(%s)' % get_ll

        plot_fields += ['updated_by__username']

        field_names = set(tree_fields + plot_fields + species_fields)

        if field_names:
            field_header_map = _csv_field_header_map(field_names)
            field_serializer_map = _csv_field_serializer_map(instance,
                                                             field_names)
            limited_qs = (initial_qs
                          .extra(select=select,
                                 select_params=select_params)
                          .values(*field_header_map.keys()))
        else:
            limited_qs = initial_qs.none()

    if not initial_qs.exists():
        job.status = ExportJob.EMPTY_QUERYSET_ERROR

    # if the initial queryset was not empty but the limited queryset
    # is empty, it means that there were no fields which the user
    # was allowed to export.
    elif not limited_qs.exists():
        job.status = ExportJob.MODEL_PERMISSION_ERROR
    else:
        csv_file = TemporaryFile()
        write_csv(limited_qs, csv_file,
                  field_order=field_header_map.keys(),
                  field_header_map=field_header_map,
                  field_serializer_map=field_serializer_map)
        filename = generate_filename(limited_qs).replace('plot', 'tree')
        job.complete_with(filename, File(csv_file))

    job.save()
Example #26
0
def simple_async_csv(job, qs):
    file_obj = TemporaryFile()
    write_csv(qs, file_obj)
    job.complete_with(generate_filename(qs), File(file_obj))
    job.save()
Example #27
0
from djqscsv import render_to_csv_response, write_csv
from mpa.models import Mpa

qs = Mpa.objects.filter(country='PLW')

with open('/Users/russmo/Desktop/palau_mpatlas_20141128.csv', 'w') as csv_file:
	write_csv(qs, csv_file, use_verbose_names=False)

Example #28
0
def daterange(request):
    try:
        print("TRYING")
        datetime.datetime.strptime(request.POST['datepicker'], '%m/%d/%Y')
    except:
        return render(
            request,
            'vaccine/dashboard2.html',
            {
                # 'error_message': "Select date",
            })
    try:
        print(request.POST['datepicker'])
        one = request.POST['datepicker']
        # print(request.POST['datepicker2'])
        #two = request.POST['datepicker2']
        one = datetime.datetime.strptime(one, '%m/%d/%Y')
        # two = datetime.datetime.strptime(two,'%m/%d/%Y')
    except (KeyError):
        # # Redisplay the question voting form.
        return render(request, 'vaccine/dashboard2.html', {
            'error_message':
            "Please select dates from the calendar options.",
        })
        print("no selection yet")

    def filterData(dateStart, dateEnd):
        filteredOnRange = Personmini.objects.filter(
            datevaccinatednumone__range=[dateStart, dateEnd])
        return filteredOnRange

    def makeDate(datetimeobj):
        x = str(datetimeobj.year) + '-'
        if (len(str(datetimeobj.month)) == 1):
            x = x + '0' + str(datetimeobj.month) + '-'
        else:
            x = x + str(datetimeobj.month) + '-'
        if (len(str(datetimeobj.day)) == 1):
            x = x + '0' + str(datetimeobj.day)
        else:
            x = x + str(datetimeobj.day)
        return x

    def filterDate(date):
        # print(type(date))
        print("filter Date")
        print(makeDate(date))
        x = makeDate(date)
        # x = datetime.datetime.strptime(date, '%m/%d/%Y')

        # filteredDate = Personmini.objects.filter(datevaccinatednumone__date=date.year)
        # return filteredDate
        filteredDate = Personmini.objects.filter(
            datevaccinatednumone__startswith=x)
        return filteredDate

    # vaccinated_people = filterData(one, two)
    # print(makeDate(one))
    vaccinated_people = filterDate(one)
    ## ADD TABLE QUERIES AND MAKE CSV
    print(vaccinated_people)
    # error_message = " "
    # context = {'error_message': error_message}
    with open('vaccinated_data.csv', 'wb') as csv_file:
        write_csv(vaccinated_people, csv_file)
    return render_to_csv_response(vaccinated_people)
Example #29
0
File: tasks.py Project: atogle/OTM2
def csv_export(job_pk, model, query, display_filters):
    job = ExportJob.objects.get(pk=job_pk)
    instance = job.instance

    if model == 'species':
        initial_qs = (Species.objects.
                      filter(instance=instance))

        extra_select, values = extra_select_and_values_for_model(
            instance, job, 'treemap_species', 'species')
        ordered_fields = values + extra_select.keys()
        limited_qs = initial_qs.extra(select=extra_select)\
                               .values(*ordered_fields)
    else:
        # model == 'tree'

        # TODO: if an anonymous job with the given query has been
        # done since the last update to the audit records table,
        # just return that job

        # get the plots for the provided
        # query and turn them into a tree queryset
        initial_qs = Filter(query, display_filters, instance)\
            .get_objects(Tree)

        extra_select_tree, values_tree = extra_select_and_values_for_model(
            instance, job, 'treemap_tree', 'Tree')
        extra_select_plot, values_plot = extra_select_and_values_for_model(
            instance, job, 'treemap_mapfeature', 'Plot',
            prefix='plot')
        extra_select_sp, values_sp = extra_select_and_values_for_model(
            instance, job, 'treemap_species', 'Species',
            prefix='species')

        if 'plot__geom' in values_plot:
            values_plot = [f for f in values_plot if f != 'plot__geom']
            values_plot += ['plot__geom__x', 'plot__geom__y']

        extra_select = {'plot__geom__x':
                        'ST_X(treemap_mapfeature.the_geom_webmercator)',
                        'plot__geom__y':
                        'ST_Y(treemap_mapfeature.the_geom_webmercator)'}

        extra_select.update(extra_select_tree)
        extra_select.update(extra_select_plot)
        extra_select.update(extra_select_sp)

        ordered_fields = (sorted(values_tree) +
                          sorted(values_plot) +
                          sorted(values_sp))

        if ordered_fields:
            limited_qs = initial_qs.extra(select=extra_select)\
                                   .values(*ordered_fields)
        else:
            limited_qs = initial_qs.none()

    if not initial_qs.exists():
        job.status = ExportJob.EMPTY_QUERYSET_ERROR

    # if the initial queryset was not empty but the limited queryset
    # is empty, it means that there were no fields which the user
    # was allowed to export.
    elif not limited_qs.exists():
        job.status = ExportJob.MODEL_PERMISSION_ERROR
    else:
        csv_file = TemporaryFile()

        write_csv(limited_qs, csv_file, field_order=ordered_fields)

        csv_name = generate_filename(limited_qs)
        job.outfile.save(csv_name, File(csv_file))
        job.status = ExportJob.COMPLETE

    job.save()
Example #30
0
def async_csv_export(job, model, query, display_filters):
    instance = job.instance

    select = OrderedDict()
    select_params = []
    field_header_map = {}
    if model == 'species':
        initial_qs = (Species.objects.filter(instance=instance))
        values = _values_for_model(instance, job, 'treemap_species', 'Species',
                                   select, select_params)
        field_names = values + select.keys()
        limited_qs = (initial_qs.extra(
            select=select, select_params=select_params).values(*field_names))
    else:
        # model == 'tree'

        # TODO: if an anonymous job with the given query has been
        # done since the last update to the audit records table,
        # just return that job

        # get the plots for the provided
        # query and turn them into a tree queryset
        initial_qs = Filter(query, display_filters, instance)\
            .get_objects(Plot)

        tree_fields = _values_for_model(instance,
                                        job,
                                        'treemap_tree',
                                        'Tree',
                                        select,
                                        select_params,
                                        prefix='tree')
        plot_fields = _values_for_model(instance, job, 'treemap_mapfeature',
                                        'Plot', select, select_params)
        species_fields = _values_for_model(instance,
                                           job,
                                           'treemap_species',
                                           'Species',
                                           select,
                                           select_params,
                                           prefix='tree__species')

        if 'geom' in plot_fields:
            plot_fields = [f for f in plot_fields if f != 'geom']
            plot_fields += ['geom__x', 'geom__y']

        if tree_fields:
            select['tree_present'] = "treemap_tree.id is not null"
            plot_fields += ['tree_present']

        get_ll = 'ST_Transform(treemap_mapfeature.the_geom_webmercator, 4326)'
        select['geom__x'] = 'ST_X(%s)' % get_ll
        select['geom__y'] = 'ST_Y(%s)' % get_ll

        field_names = set(tree_fields + plot_fields + species_fields)

        if field_names:
            field_header_map = _csv_field_header_map(field_names)
            limited_qs = (initial_qs.extra(
                select=select,
                select_params=select_params).values(*field_header_map.keys()))
        else:
            limited_qs = initial_qs.none()

    if not initial_qs.exists():
        job.status = ExportJob.EMPTY_QUERYSET_ERROR

    # if the initial queryset was not empty but the limited queryset
    # is empty, it means that there were no fields which the user
    # was allowed to export.
    elif not limited_qs.exists():
        job.status = ExportJob.MODEL_PERMISSION_ERROR
    else:
        csv_file = TemporaryFile()
        write_csv(limited_qs,
                  csv_file,
                  field_order=field_header_map.keys(),
                  field_header_map=field_header_map)
        filename = generate_filename(limited_qs).replace('plot', 'tree')
        job.complete_with(filename, File(csv_file))

    job.save()
Example #31
-1
def async_csv_export(job, model, query, display_filters):
    instance = job.instance

    select = OrderedDict()
    select_params = []
    if model == "species":
        initial_qs = Species.objects.filter(instance=instance)
        values = values_for_model(instance, job, "treemap_species", "Species", select, select_params)
        ordered_fields = values + select.keys()
        limited_qs = initial_qs.extra(select=select, select_params=select_params).values(*ordered_fields)
    else:
        # model == 'tree'

        # TODO: if an anonymous job with the given query has been
        # done since the last update to the audit records table,
        # just return that job

        # get the plots for the provided
        # query and turn them into a tree queryset
        initial_qs = Filter(query, display_filters, instance).get_objects(Tree)

        values_tree = values_for_model(instance, job, "treemap_tree", "Tree", select, select_params)
        values_plot = values_for_model(
            instance, job, "treemap_mapfeature", "Plot", select, select_params, prefix="plot"
        )
        values_sp = values_for_model(
            instance, job, "treemap_species", "Species", select, select_params, prefix="species"
        )

        if "plot__geom" in values_plot:
            values_plot = [f for f in values_plot if f != "plot__geom"]
            values_plot += ["plot__geom__x", "plot__geom__y"]

        get_ll = "ST_Transform(treemap_mapfeature.the_geom_webmercator, 4326)"
        select["plot__geom__x"] = "ST_X(%s)" % get_ll
        select["plot__geom__y"] = "ST_Y(%s)" % get_ll

        ordered_fields = sorted(values_tree) + sorted(values_plot) + sorted(values_sp)

        if ordered_fields:
            limited_qs = initial_qs.extra(select=select, select_params=select_params).values(*ordered_fields)
        else:
            limited_qs = initial_qs.none()

    if not initial_qs.exists():
        job.status = ExportJob.EMPTY_QUERYSET_ERROR

    # if the initial queryset was not empty but the limited queryset
    # is empty, it means that there were no fields which the user
    # was allowed to export.
    elif not limited_qs.exists():
        job.status = ExportJob.MODEL_PERMISSION_ERROR
    else:
        csv_file = TemporaryFile()
        write_csv(limited_qs, csv_file, field_order=ordered_fields)
        job.complete_with(generate_filename(limited_qs), File(csv_file))

    job.save()