Beispiel #1
0
 def save (self, content):
     prepared = imap(
         lambda tupl: tuple(unicode(x).encode('utf-8') for x in tupl),
         content
     )
     try:
         with open(self.get_output_filename(), 'wb') as output:
             csvwriter(output).writerows(prepared)
     except IOError:
         stder.write('Cannot handle with {0}'.format(self.output_file))
         exit(1)
    def getSavedFormInputForEdit(self, header=False, delimiter=','):
        """ returns saved as CSV text """
        sbuf = StringIO()
        writer = csvwriter(sbuf, delimiter=delimiter)
        names = self.getColumnNames()
        titles = self.getColumnTitles()

        if header:
            encoded_titles = []
            for t in titles:
                if isinstance(t, unicode):
                    t = t.encode('utf-8')
                encoded_titles.append(t)
            writer.writerow(encoded_titles)
        for row in self.getSavedFormInput():
            def get_data(row, i):
                data = row.get(i, '')
                if self._is_file_data(data):
                    return data.filename
                if isinstance(data, unicode):
                    return data.encode('utf-8')
                return data
            writer.writerow([get_data(row, i) for i in names])
        res = sbuf.getvalue()
        sbuf.close()
        return res
    def get_data_buffer(self, sheetsinfo, policy=None):
        string_buffer = StringIO()
        csvhandler = csvwriter(string_buffer, dialect='excel', delimiter=';')
        sheetsinfo = [s for s in sheetsinfo if len(s['exportables']) > 0]
        for sheetnum, sheetinfo in enumerate(sheetsinfo):
            # title if several tables
            if len(sheetsinfo) >= 2:
                if sheetnum != 0:
                    csvhandler.writerow([''])
                sheet_title = self._format_render(sheetinfo['title'])
                csvhandler.writerow([sheet_title])

            # headers
            headerline = []
            for exportable in sheetinfo['exportables']:
                render = exportable.render_header()
                render = self._format_render(render)
                headerline.append(render)

            csvhandler.writerow(headerline)

            # values
            for obj in sheetinfo['objects']:
                valuesline = []
                for exportable in sheetinfo['exportables']:
                    bound_obj = exportable.field.bind(obj).context if hasattr(exportable, 'field') else obj
                    render = exportable.render_value(bound_obj)
                    render = self._format_render(render)
                    valuesline.append(render)

                if any((v != '' for v in valuesline)):
                    # write row only if there is one not empty line
                    csvhandler.writerow(valuesline)

        return string_buffer
    def getSavedFormInputForEdit(self, header=False, delimiter=","):
        """Returns saved as CSV text"""
        sbuf = StringIO()
        writer = csvwriter(sbuf, delimiter=delimiter)
        names = self.getColumnNames()
        titles = self.getColumnTitles()

        if header:
            encoded_titles = []
            for t in titles:
                if six.PY2 and isinstance(t, six.text_type):
                    t = t.encode("utf-8")
                encoded_titles.append(t)
            writer.writerow(encoded_titles)
        for row in self.getSavedFormInput():

            def get_data(row, i):
                data = row.get(i, "")
                if is_file_data(data):
                    data = data.filename
                if six.PY2 and isinstance(data, six.text_type):
                    return data.encode("utf-8")
                return data

            row_data = [get_data(row, i) for i in names]
            writer.writerow(row_data)
        res = sbuf.getvalue()
        sbuf.close()
        return res
Beispiel #5
0
def factory(filename, cfg):
    # TODO use cfg to set up columns
    fp = open(filename, 'a')
    writer = csvwriter(fp)

    def handler(event):
        if event.name != cfg.name:
            return  # Not the droid we're looking for

        # If it is the equipment we're looking for, see if any
        # if we need to record a value for a column
        try:
            for column, structure in cfg.columns.items():
                if column != event.unit:
                    continue
                structure.record(event.timestamp, event.value)

                if structure.ts == 0:  # First time
                    structure.ts = event.timestamp
                    continue
                if structure.ts >= event.timestamp - cfg.interval:  # Not time yet
                    continue

                if structure.treat is not None:
                    value = structure.treat()
                else:
                    value = event.value
                writer.writerow([event.timestamp, column, value])
                structure.ts = event.timestamp
            fp.flush()
        except Exception, e:
            logger.error(str(e))
Beispiel #6
0
def factory(filename, cfg):
    # TODO use cfg to set up columns
    fp = open(filename, 'a')
    writer = csvwriter(fp)

    def handler(event):
        if event.name != cfg.name:
            return # Not the droid we're looking for

        # If it is the equipment we're looking for, see if any
        # if we need to record a value for a column
        try:
            for column, structure in cfg.columns.items():
                if column != event.unit:
                    continue
                structure.record(event.timestamp, event.value)

                if structure.ts == 0: # First time
                    structure.ts = event.timestamp
                    continue
                if structure.ts >= event.timestamp - cfg.interval: # Not time yet
                    continue

                if structure.treat is not None:
                    value = structure.treat()
                else:
                    value = event.value
                writer.writerow([event.timestamp, column, value])
                structure.ts = event.timestamp
            fp.flush()
        except Exception, e:
            logger.error(str(e))
Beispiel #7
0
    def getSavedFormInputForEdit(self, header=False, delimiter=','):
        """ returns saved as CSV text """
        sbuf = StringIO()
        writer = csvwriter(sbuf, delimiter=delimiter)
        names = self.getColumnNames()
        titles = self.getColumnTitles()

        if header:
            encoded_titles = []
            for t in titles:
                if isinstance(t, unicode):
                    t = t.encode('utf-8')
                encoded_titles.append(t)
            writer.writerow(encoded_titles)
        for row in self.getSavedFormInput():

            def get_data(row, i):
                data = row.get(i, '')
                if is_file_data(data):
                    data = data.filename
                if isinstance(data, unicode):
                    return data.encode('utf-8')
                return data

            writer.writerow([get_data(row, i) for i in names])
        res = sbuf.getvalue()
        sbuf.close()
        return res
Beispiel #8
0
    def get_attachments(self, fields, request):
        """Return all attachments uploaded in form.
        """

        attachments = []

        # if requested, generate CSV attachment of form values
        sendCSV = getattr(self, 'sendCSV', None)
        if sendCSV:
            csvdata = []
        sendXML = getattr(self, 'sendXML', None)
        if sendXML:
            xmlRoot = ET.Element("form")
        for fname in fields:
            field = fields[fname]
            showFields = getattr(self, 'showFields', []) or []

            if sendCSV:
                if not is_file_data(field) and (getattr(self, 'showAll', True)
                                                or fname in showFields):
                    csvdata.append(field)

            if sendXML:
                if not is_file_data(field) and (getattr(self, 'showAll', True)
                                                or fname in showFields):
                    ET.SubElement(xmlRoot, "field", name=fname).text \
                        = self.serialize(field)

            if is_file_data(field) and (getattr(self, 'showAll', True)
                                        or fname in showFields):
                data = field.data
                filename = field.filename
                mimetype, enc = guess_content_type(filename, data, None)
                attachments.append((filename, mimetype, enc, data))

        if sendCSV:
            output = BytesIO()
            writer = csvwriter(output)
            writer.writerow(csvdata)
            csv = output.getvalue()
            now = DateTime().ISO().replace(' ', '-').replace(':', '')
            filename = 'formdata_{0}.csv'.format(now)
            attachments.append((filename, 'text/plain', 'utf-8', csv))

        if sendXML:
            xmlstr = ET.tostring(xmlRoot, encoding='utf8', method='xml')
            now = DateTime().ISO().replace(' ', '-').replace(':', '')
            filename = 'formdata_{0}.xml'.format(now)
            attachments.append((filename, 'text/xml', 'utf-8', xmlstr))

        return attachments
Beispiel #9
0
def csvlist(request):

    LOGGER.debug('CSV list accessed', request, request.GET)
    reqd = request.GET.copy()
    try:
        par = g2p(reqd)
        resins = DocumentIndex.objects.using('sphinx').filter(**par).order_by(
            'posted', 'id')
    except:
        raise Http404
    total = resins.count()
    if total > EXLIM:
        return render(
            request, 'exlim.xhtml', {
                'app': APP,
                'page_title': EXLIM_TITLE,
                'limit': EXLIM,
                'total': total,
                'back': reverse('uds:mainpage')
            })
    resins = list(resins.values_list('id', flat=True))
    res = Document.objects.filter(id__in=resins).order_by('posted',
                                                          'id').distinct()
    response = HttpResponse(content_type='text/csv; charset=utf-8')
    response['Content-Disposition'] = 'attachment; filename=Dokumenty.csv'
    writer = csvwriter(response)
    hdr = (
        'Datum vyvěšení',
        'Soud/státní zastupitelství',
        'Popis dokumentu',
        'Spisová značka/číslo jednací',
        'Agenda',
        'Soubory',
    )
    writer.writerow(hdr)
    for item in res:
        files = File.objects.filter(
            document=item).order_by('fileid').distinct()
        dat = (
            '{:%d.%m.%Y}'.format(item.posted),
            item.publisher.name,
            item.desc,
            item.ref,
            item.agenda.desc,
            ';'.join([
                join(REPO_PREFIX, str(fil.fileid), fil.name) for fil in files
            ]),
        )
        writer.writerow(dat)
    return response
Beispiel #10
0
def partyexport(request):

    LOGGER.debug('Party export page accessed', request)
    uid = request.user.id
    uname = request.user.username
    res = Party.objects.filter(uid=uid).order_by('party', 'party_opt', 'id').distinct()
    response = HttpResponse(content_type='text/csv; charset=utf-8')
    response['Content-Disposition'] = 'attachment; filename=sur.csv'
    writer = csvwriter(response)
    for item in res:
        dat = (item.party + TEXT_OPTS_CA[item.party_opt],)
        writer.writerow(dat)
    LOGGER.info('User "{}" ({:d}) exported parties'.format(uname, uid), request)
    return response
def writeCsvFile(filename, data, delimiter=","):
    dirname = ospath.dirname(filename)
    directory = (dirname if (dirname != "") else ".")
    if (ospath.exists(directory)):
        try:
            filehandle = open(filename, "w")
            writer = csvwriter(filehandle, delimiter=delimiter)
            writer.writerows(data)
            filehandle.close()
            return True, "File written."
        except:
            return False, "ERROR: %s" % str(format_exc())
    else:
        return False, "ERROR: Directory \"%s\" does not exists." % filename
Beispiel #12
0
def csvlist(request):

    LOGGER.debug('CSV list accessed', request, request.GET)
    reqd = request.GET.copy()
    try:
        par = g2p(reqd)
        res = Hearing.objects.filter(**par).order_by('time', 'pk').distinct()
    except:
        raise Http404
    total = res.count()
    if total > EXLIM:
        return render(
            request,
            'exlim.xhtml',
            {'app': APP,
             'page_title': EXLIM_TITLE,
             'limit': EXLIM,
             'total': total,
             'back': reverse('psj:mainpage')})
    response = HttpResponse(content_type='text/csv; charset=utf-8')
    response['Content-Disposition'] = 'attachment; filename=Jednani.csv'
    writer = csvwriter(response)
    hdr = (
        'Soud',
        'Jednací síň',
        'Datum',
        'Čas',
        'Spisová značka',
        'Řešitel',
        'Účastníci řízení',
        'Druh jednání',
        'Neveřejné',
        'Zrušeno',
    )
    writer.writerow(hdr)
    for item in res:
        dat = (
            item.courtroom.court.name,
            item.courtroom.desc,
            '{:%d.%m.%Y}'.format(item.time),
            '{:%H:%M}'.format(item.time),
            composeref(item.senate, item.register, item.number, item.year),
            item.judge.name,
            ';'.join([p['name'] for p in item.parties.values()]),
            item.form.name,
            'ano' if item.closed else 'ne',
            'ano' if item.cancelled else 'ne',
        )
        writer.writerow(dat)
    return response
Beispiel #13
0
def datawriter(exportpath, exportdata):
    global currplane
    writeme = (exportpath, currplane + 1, cellnum, indexnum + 1) + exportdata
    try:
        with open(savedir, 'a', newline="\n", encoding="utf-8") as f:
            mainwriter = csvwriter(f)
            mainwriter.writerow(writeme)
        f.close()
    except AttributeError:
        logevent("Directory appears to be invalid")
    except PermissionError:
        logevent(
            "Unable to write to save file. Please check write permissions.")
    except OSError:
        logevent("OSError, failed to write to save file.")
Beispiel #14
0
 def getSavedFormInputForEdit(self, header=False, delimiter=','):
     """ returns saved as CSV text """
     sbuf = StringIO()
     writer = csvwriter(sbuf, delimiter=delimiter)
     names = self.getColumnNames()
     if header:
         writer.writerow(names)
     for row in self.getSavedFormInput():
         writer.writerow([
             row[i].filename if INamedFile.providedBy(
                 row.get(i, '')) else row.get(i, '')
             for i in names])
     res = sbuf.getvalue()
     sbuf.close()
     return res
 def exportToFile(self,targetFile,clearFile=True):
     if len(self.values) == 0:
         return 0
     
     with open(targetFile,'wb' if clearFile else 'ab') as f:
         csvfile = csvwriter(f, delimiter=self.fielddelimiter,quotechar=self.quotechar)
         
         quotename = '{0}_{1}'.format(self.quoteprefix,self.__serie)
         data = [ (quotename, qdate.strftime(self.dateformat), qvalue)
                 for qdate, qvalue in sorted(self.values.items()) ]
         
         # write CSV
         csvfile.writerows(data)
         
         return len(data)
Beispiel #16
0
 def getSavedFormInputForEdit(self, header=False, delimiter=','):
     """ returns saved as CSV text """
     sbuf = StringIO()
     writer = csvwriter(sbuf, delimiter=delimiter)
     names = self.getColumnNames()
     if header:
         writer.writerow(names)
     for row in self.getSavedFormInput():
         writer.writerow([
             row[i].filename
             if INamedFile.providedBy(row.get(i, '')) else row.get(i, '')
             for i in names
         ])
     res = sbuf.getvalue()
     sbuf.close()
     return res
Beispiel #17
0
def comprobar_rutas(data_file, out_dir, log_file):

    if not os.path.exists(data_file):
        print('\n ¡¡¡no existe el fichero de datos!!! \n')
        exit(0)

    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    # si no existe el fichero de logs, lo creamos
    if not os.path.exists(log_file):
        logs_aux = open_io(log_file, "x")
        logger_aux = csvwriter(logs_aux, delimiter=',')
        logger_aux.writerow(
            ["Tipo", "Texto", "Clave_Imagen", "URL", "Nombre_Fichero"])
        logs_aux.close()
Beispiel #18
0
def partyexport(request):

    LOGGER.debug('Party export page accessed', request)
    uid = request.user.id
    uname = request.user.username
    res = Party.objects.filter(uid=uid).order_by('party', 'party_opt',
                                                 'id').distinct()
    response = HttpResponse(content_type='text/csv; charset=utf-8')
    response['Content-Disposition'] = 'attachment; filename=sur.csv'
    writer = csvwriter(response)
    for item in res:
        dat = (item.party + TEXT_OPTS_CA[item.party_opt], )
        writer.writerow(dat)
    LOGGER.info('User "{}" ({:d}) exported parties'.format(uname, uid),
                request)
    return response
Beispiel #19
0
def procexport(request):

    LOGGER.debug('Proceedings export page accessed', request)
    uid = request.user.id
    uname = request.user.username
    res = Proceedings.objects.filter(uid=uid).order_by('desc', 'pk').distinct()
    response = HttpResponse(content_type='text/csv; charset=utf-8')
    response['Content-Disposition'] = 'attachment; filename=szr.csv'
    writer = csvwriter(response)
    for proc in res:
        dat = (proc.desc, proc.court.id,
               composeref(proc.senate, proc.register, proc.number, proc.year))
        writer.writerow(dat)
    LOGGER.info('User "{}" ({:d}) exported proceedings'.format(uname, uid),
                request)
    return response
Beispiel #20
0
    def _parse_prv(self, prv_path):

        temp_data = [StringIO() for _ in range(len(PRV.record_types))]
        temp_writers = [csvwriter(x) for x in temp_data]

        line_processors = {
            k: getattr(self, "_process_{}line".format(v))
            for k, v in PRV.record_types.items()
        }

        with zipopen(prv_path, "rt") as prv_fh:
            headerline = next(prv_fh)
            self.metadata = _parse_paraver_headerline(headerline)

            # Skip the communicator lines for now
            try:
                skiplines = int(headerline[headerline.rfind("),") + 2 :])
            except ValueError:
                skiplines = 0
            for i in range(skiplines):
                next(prv_fh)

            for line in prv_fh:
                # Skip comment lines
                if line.startswith("#"):
                    continue
                line = [int(x) for x in line.split(":")]
                line_processors[line[0]](line, temp_writers[line[0] - 1])

            for iattr, attrname in PRV.record_types.items():
                temp_data[iattr - 1].seek(0)
                try:
                    setattr(
                        self,
                        attrname,
                        pd.read_csv(
                            temp_data[iattr - 1],
                            names=PRV.colnames[attrname],
                            dtype=PRV.coltypes[attrname],
                        ),
                    )
                    getattr(self, attrname).set_index(
                        ["task", "thread", "time"], inplace=True
                    )
                    getattr(self, attrname).sort_index(inplace=True)
                except pd.errors.EmptyDataError:
                    setattr(self, attrname, None)
Beispiel #21
0
    def export_time_series():
        q = session.query(ExperimentData).filter_by(experiment_id = config['experiment_id'])

        if q.count == 0:
            print "Could not find any non-empty time series with experiement id \"%s\"" % config['experiment_id']
            return

        with open(config['output_filename'], "wb") as f:
            print "Exporting data in csv format to file \"%s\"..." % config['output_filename']
            from csv import writer as csvwriter
            w = csvwriter(f, delimiter=",")
            i = 0
            for r in q:
                w.writerow([r.time, r.voltage, r.current, r.power, r.temperature, r.cpus_online])
                i += 1
        print "Exported %i samples." % i
        print "Row order: time, voltage, current, power, temperature"
    def __call__(self):
        string_buffer = StringIO()
        csvhandler = csvwriter(string_buffer, dialect='excel', delimiter=';')
        policy = self.request.get('excelexport.policy', '')
        datasource = getMultiAdapter((self.context, self.request),
                               interface=IDataSource, name=policy)
        self.set_headers(datasource)
        sheetsinfo = datasource.get_sheets_data()

        sheetsinfo = [s for s in sheetsinfo if len(s['exportables']) > 0]
        for sheetnum, sheetinfo in enumerate(sheetsinfo):
            # title if several tables
            if len(sheetsinfo) >= 2:
                if sheetnum != 0:
                    csvhandler.writerow([''])
                sheet_title = self._format_render(sheetinfo['title'])
                csvhandler.writerow([sheet_title])

            # headers
            headerline = []
            for exportable in sheetinfo['exportables']:
                render = exportable.render_header()
                render = self._format_render(render)
                headerline.append(render)

            csvhandler.writerow(headerline)

            # values
            for obj in sheetinfo['objects']:
                valuesline = []
                for exportable in sheetinfo['exportables']:
                    bound_obj = obj
                    if hasattr(exportable, 'field'):
                        bound_obj = exportable.field.bind(obj).context

                    render = exportable.render_value(bound_obj)
                    render = self._format_render(render)
                    valuesline.append(render)

                if any((v != '' for v in valuesline)):
                    # write row only if there is one not empty line
                    csvhandler.writerow(valuesline)

        return string_buffer.getvalue()
Beispiel #23
0
def csvlist(request):

    LOGGER.debug('CSV list accessed', request, request.GET)
    reqd = request.GET.copy()
    try:
        par = g2p(reqd)
        res = Decision.objects.filter(**par).order_by('date', 'pk').distinct()
    except:
        raise Http404
    total = res.count()
    if total > EXLIM:
        return render(
            request,
            'exlim.xhtml',
            {'app': APP,
             'page_title': EXLIM_TITLE,
             'limit': EXLIM,
             'total': total,
             'back': reverse('udn:mainpage')})
    response = HttpResponse(content_type='text/csv; charset=utf-8')
    response['Content-Disposition'] = 'attachment; filename=Rozhodnuti.csv'
    writer = csvwriter(response)
    hdr = (
        'Soud',
        'Datum',
        'Číslo jednací',
        'Oblast',
        'Účastníci řízení',
        'Zkrácené znění',
        'Anonymisované znění',
    )
    writer.writerow(hdr)
    for item in res:
        dat = (
            SUPREME_ADMINISTRATIVE_COURT_NAME,
            '{:%d.%m.%Y}'.format(item.date),
            composeref(item.senate, item.register, item.number, item.year, item.page),
            item.agenda.desc,
            ';'.join([par['name'] for par in item.parties.values()]),
            join(REPO_PREFIX, item.filename),
            join(REPO_PREFIX, item.anonfilename) if item.anonfilename else '',
        )
        writer.writerow(dat)
    return response
Beispiel #24
0
def insexport(request):

    LOGGER.debug('Proceedings export page accessed', request)
    uid = request.user.id
    uname = request.user.username
    res = Insolvency.objects.filter(uid=uid).order_by('desc', 'pk').distinct()
    response = HttpResponse(content_type='text/csv; charset=utf-8')
    response['Content-Disposition'] = 'attachment; filename=sir.csv'
    writer = csvwriter(response)
    for idx in res:
        dat = (
            idx.desc,
            str(idx.number),
            str(idx.year),
            'ano' if idx.detailed else 'ne',
        )
        writer.writerow(dat)
    LOGGER.info('User "{}" ({:d}) exported proceedings'.format(uname, uid), request)
    return response
Beispiel #25
0
async def run(r):
    url = "http://api.myntra.com/assorted/v1/urlrouter/?path={}"
    baseUrl = 'https://www.jabong.com/'
    tasks = []

    # Fetch all responses within one Client session,
    # keep connection alive for all requests.
    connector = TCPConnector(verify_ssl=False)
    async with ClientSession(connector=connector) as session:
        data = list()
        dataappend = data.append
        # with open('2000URLs', 'r', newline='', encoding='utf-8') as csvfile:
        #     reader = csvreader(csvfile, lineterminator='\n', delimiter=',', quotechar='"',
        #                        escapechar='\\', doublequote=False, quoting=QUOTE_NONE, strict=True)
        reader = pandas.read_csv('./SeoData(4473).csv')
        #rows = reader['Old URL']
        #fetcher = len(rows) / 1000
        #while i <= fetcher:
        for row in reader['Old URL']:
            dataappend(row)
            #queryparams = (quote(i) for i in row)
            urlAppendStr = row.replace(baseUrl, '')
            updatedUrl = url.format(quote_plus(urlAppendStr.strip('/')))
            #print(updatedUrl)
            task = asyncio.ensure_future(
               fetch(updatedUrl, session))
            tasks.append(task)

        responses = await asyncio.gather(*tasks)
        # you now have all response bodies in this variable
        with open('OldUrlToNewUrl.csv', 'w+', newline='', encoding='utf-8', buffering=1) as csvoutfile:
            writer = csvwriter(csvoutfile, lineterminator='\n', delimiter=',', quotechar='"',
                               escapechar='\\', doublequote=False, quoting=QUOTE_NONE, strict=True)
            index = 0
            for x in data:
                try:
                    jsonStr = jsonload(responses[index])
                    if jsonStr["data"]:
                        a = True
                    writer.writerow([x, getNewUrl(baseUrl, jsonStr["data"])])
                except:
                    writer.writerow([x, "failed"])
                index += 1
Beispiel #26
0
def insexport(request):

    LOGGER.debug('Proceedings export page accessed', request)
    uid = request.user.id
    uname = request.user.username
    res = Insolvency.objects.filter(uid=uid).order_by('desc', 'pk').distinct()
    response = HttpResponse(content_type='text/csv; charset=utf-8')
    response['Content-Disposition'] = 'attachment; filename=sir.csv'
    writer = csvwriter(response)
    for idx in res:
        dat = (
            idx.desc,
            str(idx.number),
            str(idx.year),
            'ano' if idx.detailed else 'ne',
        )
        writer.writerow(dat)
    LOGGER.info('User "{}" ({:d}) exported proceedings'.format(uname, uid),
                request)
    return response
Beispiel #27
0
def headers(logfile):
    global savedir
    savedir = logfile
    headings = ('File', 'Plane', 'Cell ID', 'Spot ID', 'Region Area',
                'Spot Area', 'Spot Average Intensity',
                'Spot Integrated Intensity', 'Perimeter -> Centroid',
                'Perimeter -> Spot', 'Spot -> Centroid', 'Percent Migration')

    try:
        with open(savedir, 'w', newline="\n", encoding="utf-8") as f:
            headerwriter = csvwriter(f)
            headerwriter.writerow(headings)
        f.close()
    except AttributeError:
        logevent("Directory appears to be invalid")
    except PermissionError:
        logevent(
            "Unable to write to save file. Please check write permissions.")
    except OSError:
        logevent("OSError, failed to write to save file.")
Beispiel #28
0
def csvlist(request):

    LOGGER.debug('CSV list accessed', request, request.GET)
    reqd = request.GET.copy()
    try:
        par = g2p(reqd)
        res = Vec.objects.filter(**par).order_by('firstAction', 'rocnik', 'bc', 'idOsobyPuvodce').distinct()
    except:
        raise Http404
    total = res.count()
    if total > EXLIM:
        return render(
            request,
            'exlim.xhtml',
            {'app': APP,
             'page_title': EXLIM_TITLE,
             'limit': EXLIM,
             'total': total,
             'back': reverse('pir:mainpage')})
    response = HttpResponse(content_type='text/csv; charset=utf-8')
    response['Content-Disposition'] = 'attachment; filename=Insolvence.csv'
    writer = csvwriter(response)
    hdr = (
        'Soud',
        'Spisová značka',
        'Stav řízení',
    )
    writer.writerow(hdr)
    for item in res:
        dat = (
            L2N[item.idOsobyPuvodce],
            '{}{} INS {:d}/{:d}'.format(
                L2S[item.idOsobyPuvodce],
                ' {:d}'.format(item.senat) if item.senat else '',
                item.bc,
                item.rocnik),
            (S2D[item.druhStavRizeni.desc]
             if item.druhStavRizeni else '(není známo)'),
        )
        writer.writerow(dat)
    return response
Beispiel #29
0
def csvlist(request):

    LOGGER.debug('CSV list accessed', request, request.GET)
    reqd = request.GET.copy()
    try:
        par = g2p(reqd)
        res = Vec.objects.filter(**par).order_by('firstAction', 'rocnik', 'bc',
                                                 'idOsobyPuvodce').distinct()
    except:
        raise Http404
    total = res.count()
    if total > EXLIM:
        return render(
            request, 'exlim.xhtml', {
                'app': APP,
                'page_title': EXLIM_TITLE,
                'limit': EXLIM,
                'total': total,
                'back': reverse('pir:mainpage')
            })
    response = HttpResponse(content_type='text/csv; charset=utf-8')
    response['Content-Disposition'] = 'attachment; filename=Insolvence.csv'
    writer = csvwriter(response)
    hdr = (
        'Soud',
        'Spisová značka',
        'Stav řízení',
    )
    writer.writerow(hdr)
    for item in res:
        dat = (
            L2N[item.idOsobyPuvodce],
            '{}{} INS {:d}/{:d}'.format(
                L2S[item.idOsobyPuvodce],
                ' {:d}'.format(item.senat) if item.senat else '', item.bc,
                item.rocnik),
            (S2D[item.druhStavRizeni.desc]
             if item.druhStavRizeni else '(není známo)'),
        )
        writer.writerow(dat)
    return response
def check_connection():
    ts = time()
    #https://github.com/sivel/speedtest-cli
    a = popen("python "+resource_filename("homenetcontrol", "check_connection/speedtest-cli.py")+" --simple --server 6474").read()
    lines = a.split('\n')
    if "Cannot" in a:
        d = '0'
        u = '0'
    else:
        d = lines[1][10:14]
        u = lines[2][8:12]
    date = datetime.fromtimestamp(ts).strftime('%Y.%m.%d %H:%M')
    out_file = open("/var/local/homenetcontrol/data.csv", 'a')
    writer = csvwriter(out_file)
    writer.writerow((ts,d,u))
    out_file.close()
    logger.info(lang.get("check_connection","msg_log") % (date, d, u))
    if "Cannot" in a:
        notif(lang.get("check_connection","msg_nocon") % date)
    elif (eval(d)<5) or (eval(u)<0.5):
        notif(lang.get("check_connection","msg_notif") % (date, d, u))
Beispiel #31
0
def procexport(request):

    LOGGER.debug('Proceedings export page accessed', request)
    uid = request.user.id
    uname = request.user.username
    res = Proceedings.objects.filter(uid=uid).order_by('desc', 'pk').distinct()
    response = HttpResponse(content_type='text/csv; charset=utf-8')
    response['Content-Disposition'] = 'attachment; filename=szr.csv'
    writer = csvwriter(response)
    for proc in res:
        dat = (
            proc.desc,
            proc.court.id,
            composeref(
                proc.senate,
                proc.register,
                proc.number,
                proc.year)
        )
        writer.writerow(dat)
    LOGGER.info('User "{}" ({:d}) exported proceedings'.format(uname, uid), request)
    return response
    def get_data_buffer(self, sheetsinfo, policy=None):
        string_buffer = StringIO()
        csvhandler = csvwriter(string_buffer, dialect="excel", delimiter=";")
        sheetsinfo = [s for s in sheetsinfo if len(s["exportables"]) > 0]
        for sheetnum, sheetinfo in enumerate(sheetsinfo):
            # title if several tables
            if len(sheetsinfo) >= 2:
                if sheetnum != 0:
                    csvhandler.writerow([""])
                sheet_title = self._format_render(sheetinfo["title"])
                csvhandler.writerow([sheet_title])

            # headers
            headerline = []
            for exportable in sheetinfo["exportables"]:
                render = exportable.render_header()
                render = self._format_render(render)
                headerline.append(render)

            csvhandler.writerow(headerline)

            # values
            for obj in sheetinfo["objects"]:
                valuesline = []
                for exportable in sheetinfo["exportables"]:
                    bound_obj = (
                        exportable.field.bind(obj).context
                        if hasattr(exportable, "field")
                        else obj
                    )
                    render = exportable.render_value(bound_obj)
                    render = self._format_render(render)
                    valuesline.append(render)

                if any((v != "" for v in valuesline)):
                    # write row only if there is one not empty line
                    csvhandler.writerow(valuesline)

        return string_buffer
def check_connection():
    ts = time()
    #https://github.com/sivel/speedtest-cli
    a = popen("python " + resource_filename(
        "homenetcontrol", "check_connection/speedtest-cli.py") +
              " --simple --server 6474").read()
    lines = a.split('\n')
    if "Cannot" in a:
        d = '0'
        u = '0'
    else:
        d = lines[1][10:14]
        u = lines[2][8:12]
    date = datetime.fromtimestamp(ts).strftime('%Y.%m.%d %H:%M')
    out_file = open("/var/local/homenetcontrol/data.csv", 'a')
    writer = csvwriter(out_file)
    writer.writerow((ts, d, u))
    out_file.close()
    logger.info(lang.get("check_connection", "msg_log") % (date, d, u))
    if "Cannot" in a:
        notif(lang.get("check_connection", "msg_nocon") % date)
    elif (eval(d) < 5) or (eval(u) < 0.5):
        notif(lang.get("check_connection", "msg_notif") % (date, d, u))
Beispiel #34
0
    def export_time_series():
        q = session.query(ExperimentData).filter_by(
            experiment_id=config['experiment_id'])

        if q.count == 0:
            print "Could not find any non-empty time series with experiement id \"%s\"" % config[
                'experiment_id']
            return

        with open(config['output_filename'], "wb") as f:
            print "Exporting data in csv format to file \"%s\"..." % config[
                'output_filename']
            from csv import writer as csvwriter
            w = csvwriter(f, delimiter=",")
            i = 0
            for r in q:
                w.writerow([
                    r.time, r.voltage, r.current, r.power, r.temperature,
                    r.cpus_online
                ])
                i += 1
        print "Exported %i samples." % i
        print "Row order: time, voltage, current, power, temperature"
Beispiel #35
0
async def run(r):
    url = "https://gw.jabong.com/v1/search/?url={}"
    tasks = []

    # Fetch all responses within one Client session,
    # keep connection alive for all requests.
    connector = TCPConnector(verify_ssl=False)
    async with ClientSession(connector=connector) as session:
        data = list()
        dataappend = data.append
        # with open('2000URLs', 'r', newline='', encoding='utf-8') as csvfile:
        #     reader = csvreader(csvfile, lineterminator='\n', delimiter=',', quotechar='"',
        #                        escapechar='\\', doublequote=False, quoting=QUOTE_NONE, strict=True)
        reader = pandas.read_csv('./OldUrlToNewUrl.csv')
        for row in reader['old_urls']:
            dataappend(row)
            #queryparams = (quote(i) for i in row)
            urlAppendStr = row.replace('https://www.jabong.com', '')
            task = asyncio.ensure_future(
                fetch(url.format(urlAppendStr), session))
            tasks.append(task)

        responses = await asyncio.gather(*tasks)
        # you now have all response bodies in this variable
        with open('OldUrlCounts.csv', 'w', newline='', encoding='utf-8', buffering=1) as csvoutfile:
            writer = csvwriter(csvoutfile, lineterminator='\n', delimiter=',', quotechar='"',
                               escapechar='\\', doublequote=False, quoting=QUOTE_NONE, strict=True)
            index = 0
            for x in data:
                try:
                    jsonStr = jsonload(responses[index])
                    if jsonStr["data"]:
                        a = True
                    writer.writerow([x, jsonStr["data"]["summary"]["productCnt"]])
                except:
                    writer.writerow([x, "failed"])
                index += 1
Beispiel #36
0
def debtorexport(request):

    LOGGER.debug('Debtor export page accessed', request)
    uid = request.user.id
    uname = request.user.username
    debtors = Debtor.objects.filter(uid=uid).order_by('desc', 'pk').distinct()
    response = HttpResponse(content_type='text/csv; charset=utf-8')
    response['Content-Disposition'] = 'attachment; filename=dir.csv'
    writer = csvwriter(response)
    for debtor in debtors:
        dat = [debtor.desc]
        if debtor.court:
            dat.append('soud={}'.format(L2S[debtor.court]))
        if debtor.name:
            dat.append('název={}'.format(
                debtor.name + TEXT_OPTS_CA[debtor.name_opt]))
        if debtor.first_name:
            dat.append('jméno={}'.format(
                debtor.first_name + TEXT_OPTS_CA[debtor.first_name_opt]))
        if debtor.genid:
            dat.append('IČO={}'.format(debtor.genid))
        if debtor.taxid:
            dat.append('DIČ={}'.format(debtor.taxid))
        if debtor.birthid:
            dat.append('RČ={}/{}'.format(
                debtor.birthid[:6],
                debtor.birthid[6:]))
        if debtor.date_birth:
            dat.append('datumNarození={0.day:02d}.{0.month:02d}.{0.year:d}'.format(debtor.date_birth))
        if debtor.year_birth_from:
            dat.append('rokNarozeníOd={:d}'.format(debtor.year_birth_from))
        if debtor.year_birth_to:
            dat.append('rokNarozeníDo={:d}'.format(debtor.year_birth_to))
        writer.writerow(dat)
    LOGGER.info('User "{}" ({:d}) exported debtors'.format(uname, uid), request)
    return response
    def get_attachments(self, fields, request):
        """Return all attachments uploaded in form.
        """

        attachments = []

        # if requested, generate CSV attachment of form values
        sendCSV = getattr(self, 'sendCSV', None)
        if sendCSV:
            csvdata = []

        for fname in fields:
            field = fields[fname]
            showFields = getattr(self, 'showFields', []) or []

            if sendCSV:
                if not is_file_data(field) and (getattr(self, 'showAll', True)
                                                or fname in showFields):
                    csvdata.append(field)

            if is_file_data(field) and (getattr(self, 'showAll', True)
                                        or fname in showFields):
                data = field.data
                filename = field.filename
                mimetype, enc = guess_content_type(filename, data, None)
                attachments.append((filename, mimetype, enc, data))

        if sendCSV:
            output = BytesIO()
            writer = csvwriter(output)
            writer.writerow(csvdata)
            csv = output.getvalue()
            now = DateTime().ISO().replace(' ', '-').replace(':', '')
            filename = 'formdata_{0}.csv'.format(now)
            attachments.append((filename, 'text/plain', 'utf-8', csv))
        return attachments
Beispiel #38
0
def export_to_csv(csv_file, columns, data):

    with open(csv_file, "w", newline='', encoding='utf-8-sig') as csv_file:
        csv_writer = csvwriter(csv_file, delimiter=",", dialect='excel')
        csv_writer.writerow([i for i in columns])
        csv_writer.writerows(data)
Beispiel #39
0
    def generate_usgs_avg_daily_flows_opt(self, 
                                          reach_id_gage_id_file,
                                          start_datetime, 
                                          end_datetime,
                                          out_streamflow_file, 
                                          out_stream_id_file):
        """
        Generate daily streamflow file and stream id file required for calibration 
        or for substituting flows based on USGS gage ids associated with stream ids.
        
        Parameters:
            reach_id_gage_id_file(str): Path to reach_id_gage_id file.
            start_datetime(datetime): A datetime object with the start date to download data.
            end_datetime(datetime): A datetime object with the end date to download data.
            out_streamflow_file(str): The path to output the streamflow file for RAPID.
            out_stream_id_file(str): The path to output the stream ID file associated with the streamflow file for RAPID.

        Example *reach_id_gage_id_file*::
        
            COMID, USGS_GAGE_ID
            2000, 503944
            ...

        .. warning:: Overuse will get you blocked from downloading data from USGS.
        
        .. warning:: This code does not clean the data in any way. Thus, you
                     are likely to run into issues if you simply use the raw data.
                     
        .. warning:: The code skips gages that do not have data
                     for the entire time period.
        
        Simple Example:
        
        .. code:: python
        
            import datetime
            from os.path import join
            from RAPIDpy import RAPID
            
            main_path = "/home/username/data"

            rapid_manager = RAPID()
            rapid_manager.generate_usgs_avg_daily_flows_opt(reach_id_gage_id_file=join(main_path,"mississippi_usgsgage_id_comid.csv"),
                                                            start_datetime=datetime.datetime(2000,1,1),
                                                            end_datetime=datetime.datetime(2014,12,31),
                                                            out_streamflow_file=join(main_path,"streamflow_2000_2014.csv"), 
                                                            out_stream_id_file=join(main_path,"streamid_2000_2014.csv"))
                                                            
        
        Complex Example:
        
        .. code:: python
        
            import datetime
            from os.path import join
            from RAPIDpy import RAPID
            
            main_path = "/home/username/data"
                
            rapid_manager = RAPID(rapid_executable_location='~/work/rapid/run/rapid'
                                  use_all_processors=True,
                                  ZS_TauR=24*3600, 
                                  ZS_dtR=15*60, 
                                  ZS_TauM=365*24*3600, 
                                  ZS_dtM=24*3600
                                 )

            rapid_manager.update_parameters(rapid_connect_file='../rapid-io/input/rapid_connect.csv',
                                            Vlat_file='../rapid-io/input/m3_riv.nc',
                                            riv_bas_id_file='../rapid-io/input/riv_bas_id.csv',
                                            k_file='../rapid-io/input/k.csv',
                                            x_file='../rapid-io/input/x.csv',
                                            Qout_file='../rapid-io/output/Qout.nc',
                                            )
                
            rapid_manager.update_reach_number_data()
            rapid_manager.update_simulation_runtime()
            rapid_manager.generate_usgs_avg_daily_flows_opt(reach_id_gage_id_file=join(main_path,"mississippi_usgsgage_id_comid.csv"),
                                                            start_datetime=datetime.datetime(2000,1,1),
                                                            end_datetime=datetime.datetime(2014,12,31),
                                                            out_streamflow_file=join(main_path,"streamflow_2000_2014.csv"), 
                                                            out_stream_id_file=join(main_path,"streamid_2000_2014.csv"))
            rapid_manager.run()

        """
        log("Generating avg streamflow file and stream id file required for calibration ...",
            "INFO")
        log("Generating avg streamflow file and stream id file required for calibration ...",
            "INFO")
        reach_id_gage_id_list = csv_to_list(reach_id_gage_id_file)
# USGS not returning tzinfo anymore, so removed tzinfo operations
#       if start_datetime.tzinfo is None or start_datetime.tzinfo.utcoffset(start_datetime) is None:
#            start_datetime = start_datetime.replace(tzinfo=utc)
#        if end_datetime.tzinfo is None or end_datetime.tzinfo.utcoffset(end_datetime) is None:
#            end_datetime = end_datetime.replace(tzinfo=utc)
        gage_data_matrix = []
        valid_comid_list = []
        
        #add extra day as it includes the start date (e.g. 7-5 is 2 days, but have data for 5,6,7, so +1)
        num_days_needed = (end_datetime-start_datetime).days + 1

        gage_id_list = []
        for row in reach_id_gage_id_list[1:]:
            station_id = row[1]
            if len(row[1]) == 7:
                station_id = '0' + row[1]
            gage_id_list.append(station_id)
        
        num_gage_id_list = np.array(gage_id_list, dtype=np.int32)
        log("Querying Server for Data ..." ,
            "INFO")
    
        query_params = {
                        'format': 'json',
                        'sites': ",".join(gage_id_list),
# USGS not returning tzinfo anymore, so removed tzinfo operations 
#                        'startDT': start_datetime.astimezone(tzoffset(None, -18000)).strftime("%Y-%m-%d"),
#                        'endDT': end_datetime.astimezone(tzoffset(None, -18000)).strftime("%Y-%m-%d"),
                        'startDT': start_datetime.strftime("%Y-%m-%d"),
                        'endDT': end_datetime.strftime("%Y-%m-%d"),
                        'parameterCd': '00060', #streamflow
                        'statCd': '00003' #average
                       }
        response = get("http://waterservices.usgs.gov/nwis/dv", params=query_params)
        if response.ok:
            data_valid = True
            try:
                requested_data = response.json()['value']['timeSeries']
            except IndexError:
                data_valid = False
                pass
            
            if data_valid:
                for time_series in enumerate(requested_data):
                    usgs_station_full_name = time_series[1]['name']
                    usgs_station_id = usgs_station_full_name.split(":")[1]
                    gage_data = []
                    for time_step in time_series[1]['values'][0]['value']:
                        local_datetime = parse(time_step['dateTime'])
                        if local_datetime > end_datetime:
                            break
                        
                        if local_datetime >= start_datetime:
                            if not time_step['value']:
                                log("MISSING DATA for USGS Station {0} {1} {2}".format(station_id,
                                                                                       local_datetime,
                                                                                       time_step['value']),
                                    "WARNING")
                            gage_data.append(float(time_step['value'])/35.3146667)
    
                    try:
                        #get where streamids assocated with USGS sation id is
                        streamid_index = np.where(num_gage_id_list==int(float(usgs_station_id)))[0][0]+1
                    except Exception:
                        log("USGS Station {0} not found in list ...".format(usgs_station_id),
                            "WARNING")
                        raise
                        
                    if len(gage_data) == num_days_needed:
                        gage_data_matrix.append(gage_data)
                        valid_comid_list.append(reach_id_gage_id_list[streamid_index][0])
                    else:
                        log("StreamID {0} USGS Station {1} MISSING {2} DATA VALUES".format(reach_id_gage_id_list[streamid_index][0],
                                                                                           usgs_station_id,
                                                                                           num_days_needed-len(gage_data)),
                            "WARNING")

            if gage_data_matrix and valid_comid_list:
                log("Writing Output ...",
                    "INFO")
                np_array = np.array(gage_data_matrix).transpose()  
                with open_csv(out_streamflow_file, 'w') as gage_data:
                    wf = csvwriter(gage_data)
                    for row in np_array:
                        wf.writerow(row)
                        
                with open_csv(out_stream_id_file, 'w') as comid_data:
                    cf = csvwriter(comid_data)
                    for row in valid_comid_list:
                        cf.writerow([int(float(row))])
                        
                #set parameters for RAPID run
                self.IS_obs_tot = len(valid_comid_list)
                self.obs_tot_id_file = out_stream_id_file
                self.Qobs_file = out_streamflow_file
                self.IS_obs_use = len(valid_comid_list)
                self.obs_use_id_file = out_stream_id_file
            else:
                log("No valid data returned ...",
                    "WARNING")
        else:
                log("USGS query error ...",
                    "WARNING")
Beispiel #40
0
def resolution(s):
    try:
        w, h = map(int, s.split('x'))
        return w, h
    except:
        raise argparse.ArgumentTypeError(
                "Resolution must be <width>x<height>. E.g.: 1200x800")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Rearrange points in ascending order from bottom center of frame")
    parser.add_argument('-d', '--data', help="Input data file from digitization", 
                        required=True, dest='data_file')
    parser.add_argument('-o', '--output', help="Output rearranged data file", 
                        required=False, dest='output_file', default=None)
    parser.add_argument('-r', '--resolution', help="Frame resolution", 
                        required=False, dest='resolution', default=(1200,800), 
                        type=resolution)
    args = parser.parse_args()
    
    data = readDataCSV(args.data_file, args.resolution[0], args.resolution[1])
    if args.output_file is None:
        output_file = args.data_file[:-4] + ".rearranged.csv"
    else:
        output_file = args.output_file
    with open(output_file, 'w') as output_file_handle:
        out = csvwriter(output_file_handle, delimiter=',')
        out.writerows(data)

# vim: set ai nu et ts=4 sw=4:
Beispiel #41
0
    def process_line(self, csv, line, settings, fields=None):
        """
        Parses line – compute fields while adding, perform filters, pick or delete cols, split and write to a file.
        """
        try:
            if not fields:
                fields = line.copy()

                if len(fields) is not len(csv.first_line_fields):
                    raise ValueError(
                        "Invalid number of line fields: {}".format(
                            len(fields)))

                # add fields
                list_lengths = []
                for col in settings[
                        "addByMethod"]:  # [("netname", 20, [lambda x, lambda x...]), ...]
                    val = fields[col[1]]
                    for l in col[2]:
                        if isinstance(val, list):
                            # resolve all items, while flattening any list encountered
                            val = [
                                y for x in (l(v) for v in val)
                                for y in (x if type(x) is list else [x])
                            ]
                        else:
                            val = l(val)
                    fields.append(val)
                    if isinstance(val, list):
                        list_lengths.append(len(val))
                if list_lengths:  # duplicate rows because we received lists amongst scalars
                    row_count = prod(list_lengths)
                    for i, f in enumerate(
                            fields
                    ):  # 1st col returns 3 values, 2nd 2 values → both should have 3*2 = 6 values
                        if type(f) is not list:
                            fields[i] = [f]
                        fields[i] *= row_count // len(fields[i])
                    it = zip(*fields)
                    fields = it.__next__()
                    for v in it:
                        self.process_line(
                            csv, line, settings, v
                        )  # duplicate row because single lambda produced a list

            # inclusive filter
            for f in settings[
                    "filter"]:  # list of tuples (col, value): [(23, "passed-value"), (13, "another-value")]
                if f[1] != fields[f[0]]:
                    return False

            # unique columns
            if settings["unique"]:
                for u in settings[
                        "unique"]:  # list of uniqued columns [2, 3, 5, ...]
                    if fields[u] in self.unique_sets[u]:  # skip line
                        return False
                else:  # do not skip line
                    for u in settings["unique"]:
                        self.unique_sets[u].add(fields[u])

            # pick or delete columns
            if settings["chosen_cols"]:
                chosen_fields = [fields[i] for i in settings["chosen_cols"]
                                 ]  # chosen_cols = [3, 9, 12]
            else:
                chosen_fields = fields

            # determine location
            if type(settings["split"]) == int:
                location = fields[settings["split"]].replace(
                    "/",
                    "-")  # split ('/' is a forbidden char in linux file names)
                if not location:
                    location = Config.UNKNOWN_NAME
                    chosen_fields = line  # reset to the original line (location will be reprocessed)
            else:
                location = settings["target_file"]
        except BdbQuit:  # BdbQuit and KeyboardInterrupt caught higher
            raise
        except Quota.QuotaExceeded:
            csv.queued_lines_count += 1
            location = Config.QUEUED_NAME
            chosen_fields = line  # reset the original line (location will be reprocessed)
        except Exception as e:
            if Config.is_debug():
                traceback.print_exc()
                Config.get_debugger().set_trace()
            else:
                logger.warning(e, exc_info=True)
            csv.invalid_lines_count += 1
            location = Config.INVALID_NAME
            chosen_fields = line  # reset the original line (location will be reprocessed)

        if not location:
            return
        elif location in self.files_created:
            method = "a"
        else:
            method = "w"
            # print("File created", location, csv.delimiter.join(chosen_fields))
            self.files_created.add(location)

        # choose the right file descriptor for saving
        # (we do not close descriptors immediately, if needed we close the one the least used)
        if location not in self.descriptorsStatsOpen:
            if self.descriptors_count >= self.descriptors_max:  # too many descriptors open, we have to close the least used
                key = min(self.descriptorsStatsOpen,
                          key=self.descriptorsStatsOpen.get)
                self.descriptors[key][0].close()
                # print("Closing", key, self.descriptorsStatsOpen[key])
                del self.descriptorsStatsOpen[key]
                self.descriptors_count -= 1
            # print("Opening", location)

            if location is 2:  # this is a sign that we store raw data to stdout (not through a CSVWriter)
                t = w = csv.stdout  # custom object simulate CSVWriter - it adopts .writerow and .close methods
            else:
                if location is 1:  # this is a sign we output csv data to stdout
                    t = io.StringIO()
                else:
                    t = open(Path(Config.get_cache_dir(), location), method)
                w = csvwriter(t, dialect=settings["dialect"])
            self.descriptors[location] = t, w
            self.descriptors_count += 1
        # print("Printing", location)
        self.descriptorsStatsAll[location] += 1
        self.descriptorsStatsOpen[location] = self.descriptorsStatsAll[
            location]
        f = self.descriptors[location]
        if method == "w" and csv.has_header:
            f[0].write(csv.header)
        f[1].writerow(chosen_fields)
Beispiel #42
0
print "Content-Type: text/csv\n"

cgienv = cgi.FieldStorage()
try:
    disaster_id = int(cgienv['disaster'].value)
except (KeyError, ValueError):
    print "Missing disaster id"
    sys.exit(1)

conn = pymysql.connect(host='localhost', port=3306, user='******',
                       passwd='1q2w3e4r', db='disastg1_dap')
cursor = conn.cursor()
cursor.execute("""SELECT `T2`.`name_organization`,
                         `T2`.`id`,
                         `T3`.`name_service`,
                         `T3`.`id`,
                         `T1`.`Lat`,
                         `T1`.`Lon`,
                         `T1`.`Magnitude`
                  FROM disasters_organizations_services T1
                  LEFT OUTER JOIN organizations T2
                  ON T1.organization_ID = T2.ID
                  LEFT OUTER JOIN services T3
                  ON T1.service_ID = T3.ID
                  WHERE (`T1`.`disaster_id` = %r)""", disaster_id)

spreadsheet = csvwriter(sys.stdout)
spreadsheet.writerow('org_name org_id service_name service_id lat lon mag'.split())
for row in cursor.fetchall():
    spreadsheet.writerow(row)
Beispiel #43
0
async def run(r):
    url = 'http://api.myntra.com/v1/search/'
    tasks = []
    headers = {}
    headers['Accept'] = 'application/json'
    headers['Content-type'] = 'application/json'
    headers['postman-token'] = 'ca64e43a-1444-41bf-099e-f604ac515b52'
    headers['clientid'] = 'jabong-a6aafa2f-ed53-4cb0-8db0-46f8c94f99b5'
    headers[
        'at'] = 'ZXlKaGJHY2lPaUpJVXpJMU5pSXNJbXRwWkNJNklqZ2lMQ0owZVhBaU9pSktWMVFpZlEuZXlKdWFXUjRJam9pTWpSaU56Wm1PV0l0TldVM09DMHhNV1U0TFdKa09XTXRNREl3TVRCaE5UUm1OVGRsSWl3aVkybGtlQ0k2SW1waFltOXVaeTFoTm1GaFptRXlaaTFsWkRVekxUUmpZakF0T0dSaU1DMDBObVk0WXprMFpqazVZalVpTENKaGNIQk9ZVzFsSWpvaWFtRmliMjVuSWl3aWMzUnZjbVZKWkNJNklqUTJNRE1pTENKbGVIQWlPakUxTkRJMk1qVXdOek1zSW1semN5STZJa2xFUlVFaWZRLlZZS1gzZVNlOVRSakVtVkVFX0kxcThvb2l6RWZnMm5IR085Z3ZKeUExV1U='
    headers['X-MYNTRA-KNUTH'] = "yes"

    # Fetch all responses within one Client session,
    # keep connection alive for all requests.
    connector = TCPConnector(verify_ssl=False)
    async with ClientSession(connector=connector, headers=headers) as session:
        data = list()
        dataappend = data.append
        reader = pandas.read_csv('./Top_Search_ga.csv')
        rows = reader['Event Label']
        length = len(rows)
        fetcher = ceil(length / counter)
        i = 0
        offset = 0
        while i < fetcher:
            for j in range(1, counter + 1):
                if (i * counter + j) >= length:
                    break
                row = rows[i * counter + j]
                dataappend(row)
                updatedKey = row.replace('https://www.jabong.com/', '')
                updatedKey = row.replace(' ', '-')
                task = asyncio.ensure_future(
                    fetch(url + updatedKey + '?o=0&rows=52', session))
                tasks.append(task)

            responses = await asyncio.gather(*tasks)
            # you now have all response bodies in this variable
            with open('NewUrlCounts.csv',
                      'w',
                      newline='',
                      encoding='utf-8',
                      buffering=1) as csvoutfile:
                writer = csvwriter(csvoutfile,
                                   lineterminator='\n',
                                   delimiter=',',
                                   quotechar='"',
                                   escapechar='\\',
                                   doublequote=False,
                                   quoting=QUOTE_NONE,
                                   strict=True)
                index = 0
                for x in data:
                    try:
                        jsonStr = jsonload(responses[index])
                        #print(x, jsonStr.keys())
                        if jsonStr["totalCount"]:
                            a = True
                        writer.writerow([x, jsonStr["totalCount"]])
                    except:
                        writer.writerow([x, responses[index]])
                    index += 1
            i += 1
def write_to_file(data, f_name):
    writer = csvwriter(open(f_name, 'wb'))
    writer.writerows(data)
Beispiel #45
0
    def plot_pairs():
        from numpy import array
        from pandas import DataFrame
        from pandas.tools.plotting import scatter_matrix
        from csv import writer as csvwriter
        import matplotlib.pyplot as plt
        from subprocess import call

        q = session.query(ExperimentInfo)

        if config['selected_load'] != None:
            q = q.filter_by(target_load_level = config['selected_load'])
        q = list(q)

        data = []

        for r in q:
            r.avg_power = get_avg_power(r.experiment_id)
            r.energy = r.avg_power * r.total_time
            r.the_class = 'bad'
            r.target_load_level = float(r.target_load_level)
            # ts = list(session.query(ExperimentData).filter_by(experiment_id = r.experiment_id))
            # for t in ts:
            #     data.append([t.power,
            #                  get_n_cpus(t),
            #                  float(r.target_load_level),
            #                  r.up_threshold])

        get_key_fn = lambda l: {'power': lambda r: ((l - r.target_load_level)**2, r.avg_power),
                                'energy': lambda r: ((l - r.target_load_level)**2, r.energy),
                                'time': lambda r: ((l - r.target_load_level)**2, r.total_time)}[config['selected_column']]

        for l in [0.2, 0.4, 0.6, 0.8]:
            q.sort(key=get_key_fn(l))
            for i in range(10):
                q[i].the_class = 'good'

        for i in range(len(q)):
            r = q[i]
            data.append([r.energy,
                         r.avg_power,
                         r.total_time,
                         r.hotplug_in_load_limit,
                         r.hotplug_out_load_limit,
                         r.hotplug_in_sampling_period,
                         r.up_threshold,
                         r.the_class
                         ])

        data.reverse() # Reverse plotting order in R

        out_data_file = "/tmp/asd.csv"
        out_code_file = "/tmp/asd2.r"
        out_pdf_file  = "/tmp/asd.pdf" if (config['selected_load'] == None) else ("/tmp/asd-%f.pdf" % config['selected_load'])

        r_code = \
"""
d <- read.csv("%s")
pdf("%s")
pairs(d[,1:6] ,pch=19,col=c("red","blue")[unclass(d$c)])
""" % (out_data_file, out_pdf_file)

        # data = map(lambda r: [r.energy,
        #                       r.avg_power,
        #                       r.total_time,
        #                       # float(r.target_load_level),
        #                       r.hotplug_in_load_limit,
        #                       r.hotplug_out_load_limit,
        #                       # r.hotplug_in_sampling_period,
        #                       # r.hotplug_out_sampling_period,
        #                       r.up_threshold,
        #                       # r.sched_olord_lb_upper_limit,
        #                       ], q)

        cols = ['energy',
                'avg. power',
                'total time',
                # 'target load level',
                'hotplug in load limit',
                'hotplug out load limit',
                'hotplug sampling period',
                # 'hotplug in sampling period',
                # 'hotplug out sampling period',
                'up threshold',
                'class'
                # 'sched olord lb upper limit',
                ]

        # cols = ['power',
        #         'cpus online',
        #         'target load level',
        #         'up threshold']

        cols = map(lambda s: ''.join(map(lambda q: q[0], s.split(" "))), cols)

        with open(out_data_file, "wb") as f:
            w = csvwriter(f)
            w.writerow(cols)
            for r in data:
                w.writerow(r)

        with open(out_code_file, "wb") as f:
            f.write(r_code)

        call(["R", '-f', out_code_file])
from GF_fundo import Fundo
from datetime import date, timedelta
from csv import writer as csvwriter

if __name__ == '__main__':

    csv_delimiter  = ';'
    quote_char     = '"'
    interval       = 20 # years
    
    # Last 20 years interval
    iniDate = date.today() - timedelta(days= 365 * interval)
    endDate = date.today()

    with open('FUNDOS.csv','wb') as f:

        arqcsv = csvwriter(f, delimiter=csv_delimiter,quotechar=quote_char)

        for idfundo in Fundo.FUNDS:
        
            fundoatual = Fundo(idfundo)
            fundoatual.updatePrices(iniDate, endDate)
            
            data = [ ( [idfundo, x] + y ) for x,y in fundoatual.prices.items() ]
            data = sorted(data,key=lambda x: x[1])
            
            arqcsv.writerows(data)
                
            print("Download %s successful!" % ( idfundo ))
        
Beispiel #47
0
    def process_line(self, csv, line, settings):
        """
        Parses line – compute fields while adding, perform filters, pick or delete cols, split and write to a file.
        """
        try:
            fields = line.copy()

            if len(fields) is not len(csv.first_line_fields):
                raise ValueError("Invalid number of line fields: {}".format(len(fields)))

            # add fields
            whois = None
            for col in settings["addByMethod"]:  # [("netname", 20, [lambda x, lambda x...]), ...]
                val = fields[col[1]]
                for l in col[2]:
                    val = l(val)
                if isinstance(val, tuple):  # we get whois info-tuple
                    whois = val[0]
                    fields.append(val[1])
                else:
                    fields.append(val)

            # inclusive filter
            for f in settings["filter"]:  # list of tuples (col, value): [(23, "passed-value"), (13, "another-value")]
                if f[1] != fields[f[0]]:
                    return False

            # unique columns
            if settings["unique"]:
                for u in settings["unique"]:  # list of uniqued columns [2, 3, 5, ...]
                    if fields[u] in self.unique_sets[u]:  # skip line
                        return False
                else:  # do not skip line
                    for u in settings["unique"]:
                        self.unique_sets[u].add(fields[u])

            # pick or delete columns
            if settings["chosen_cols"]:
                chosen_fields = [fields[i] for i in settings["chosen_cols"]]  # chosen_cols = [3, 9, 12]
            else:
                chosen_fields = fields

            if whois:
                csv.stats["ipsUnique"].add(whois.ip)
                mail = whois.get[2]
                if whois.get[1] == "local":
                    if mail == "unknown":
                        chosen_fields = line  # reset to the original line (will be reprocessed)
                        csv.stats["ipsCzMissing"].add(whois.ip)
                        csv.stats["czUnknownPrefixes"].add(whois.get[0])
                    else:
                        csv.stats["ipsCzFound"].add(whois.ip)
                        csv.stats["ispCzFound"].add(mail)
                else:
                    country = whois.get[5]
                    if country not in Contacts.csirtmails:
                        csv.stats["ipsWorldMissing"].add(whois.ip)
                        csv.stats["countriesMissing"].add(country)
                    else:
                        csv.stats["countriesFound"].add(country)
                        csv.stats["ipsWorldFound"].add(whois.ip)
                # XX invalidLines if raised an exception

            # split
            location = fields[settings["split"]] if type(settings["split"]) == int else settings["target_file"]
        except Exception as e:
            if isinstance(e, BdbQuit):
                raise  # BdbQuit and KeyboardInterrupt catched higher
            else:
                if Config.is_debug():
                    traceback.print_exc()
                    ipdb.set_trace()
                else:
                    logger.warning(e, exc_info=True)
                csv.invalid_lines_count += 1
                location = Config.INVALID_NAME
                chosen_fields = line  # reset the original line (will be reprocessed)

        if not location:
            return
        elif location in self.files_created:
            method = "a"
        else:
            method = "w"
            # print("File created", location, csv.delimiter.join(chosen_fields))
            self.files_created.add(location)

        # choose the right file descriptor for saving
        # (we do not close descriptors immediately, if needed we close the one the least used)
        if location not in self.descriptorsStatsOpen:
            if self.descriptors_count >= self.descriptors_max:  # too many descriptors open, we have to close the least used
                key = min(self.descriptorsStatsOpen, key=self.descriptorsStatsOpen.get)
                self.descriptors[key][0].close()
                # print("Closing", key, self.descriptorsStatsOpen[key])
                del self.descriptorsStatsOpen[key]
                self.descriptors_count -= 1
            # print("Opening", location)
            t = open(Config.get_cache_dir() + location, method)
            self.descriptors[location] = t, csvwriter(t, dialect=settings["dialect"])
            self.descriptors_count += 1
        # print("Printing", location)
        self.descriptorsStatsAll[location] += 1
        self.descriptorsStatsOpen[location] = self.descriptorsStatsAll[location]
        f = self.descriptors[location]
        if method == "w" and Config.has_header:
            f[0].write(Config.header)
        f[1].writerow(chosen_fields)
Beispiel #48
0
from csv import QUOTE_NONE

OldUrlCounts = pandas.read_csv('./OldUrlCounts.csv')
NewUrlCounts = pandas.read_csv('./NewUrlCounts.csv')

oldUrls = OldUrlCounts['old_urls']
newUrls = NewUrlCounts['new_urls']
oldUrlCount = OldUrlCounts['count']
newUrlCount = NewUrlCounts['count']

with open('golden_count.csv', 'w', newline='', encoding='utf-8',
          buffering=1) as csvoutfile:
    writer = csvwriter(csvoutfile,
                       lineterminator='\n',
                       delimiter=',',
                       quotechar='"',
                       escapechar='\\',
                       doublequote=False,
                       quoting=QUOTE_NONE,
                       strict=True)
    index = 0
    for x in oldUrls:
        try:
            writer.writerow([
                oldUrls[index], oldUrlCount[index], newUrls[index],
                newUrlCount[index]
            ])
        except:
            writer.writerow([x, "failed"])
        index += 1
Beispiel #49
0
def saveCSV(inRow=[None]):
    suffix = datetime.now().strftime("%y%m%d_%H%M%S")
    fileName = '_'.join(['out', suffix]) + '.csv'
    with open(fileName, 'a') as csvfile:
        writer = csvwriter(csvfile, delimiter=';')
        writer.writerow(inRow)
# File paths
DATA_PATH = Path('datastash')
SRC_PATH = DATA_PATH.joinpath('originals', '647_Global_Temperature_Data_File.txt')
DEST_PATH = DATA_PATH.joinpath('parsed', 'nasa_global_temps.csv')
DEST_PATH.parent.mkdir(parents=True, exist_ok=True)

# parsing meta info
FIELD_NAMES = ['year',
               'annual_mean',
               'lowess_smoothed',]

FIELD_PATTERN = rxcompile(
                    r'(^\d{4})'
                    r'\s+'
                    r'(.+?)'
                    r'\s+'
                    r'(.+)$')


with open(DEST_PATH, 'w') as _d:
    dest = csvwriter(_d)
    dest.writerow(FIELD_NAMES)

    with open(SRC_PATH, 'r') as src:
        for line in src:
            m = FIELD_PATTERN.match(line)
            if m:
                dest.writerow(m.groups())

Beispiel #51
0
def find_goodness_of_fit(rapid_qout_file, reach_id_file, observed_file,
                         out_analysis_file, daily=False, steps_per_group=1):
    """
    Finds the goodness of fit comparing observed streamflow in a rapid Qout file
    with simulated flows in a csv file.
    
    Args:
        rapid_qout_file(str): Path to the RAPID Qout file.
        reach_id_file(str): Path to file with river reach ID's associate with the RAPID Qout file. It is in the format of the RAPID observed flows reach ID file.
        observed_file(str): Path to input csv with with observed flows corresponding to the RAPID Qout. It is in the format of the RAPID observed flows file.
        out_analysis_file(str): Path to the analysis output csv file.
        daily(Optional[bool]): If True and the file is CF-Compliant, it will compare the *observed_file* with daily average flow from Qout. Default is False. 

    Example with CF-Compliant RAPID Qout file:
    
    .. code:: python
    
        import os
        from RAPIDpy.postprocess import find_goodness_of_fit
    
        INPUT_DATA_PATH = '/path/to/data'
        reach_id_file = os.path.join(INPUT_DATA_PATH, 'obs_reach_id.csv') 
        observed_file = os.path.join(INPUT_DATA_PATH, 'obs_flow.csv') 
    
        cf_input_qout_file = os.path.join(COMPARE_DATA_PATH, 'Qout_nasa_lis_3hr_20020830_CF.nc')
        cf_out_analysis_file = os.path.join(OUTPUT_DATA_PATH, 'cf_goodness_of_fit_results-daily.csv') 
        find_goodness_of_fit(cf_input_qout_file, reach_id_file, observed_file,
                             cf_out_analysis_file, daily=True)
    
    """
    reach_id_list = np.loadtxt(reach_id_file, delimiter=",", usecols=(0,), ndmin=1, dtype=np.int32)
   
    data_nc = RAPIDDataset(rapid_qout_file)
    
    #analyze and write
    observed_table = np.loadtxt(observed_file, ndmin=2, delimiter=",", usecols=tuple(range(reach_id_list.size)))
    with open(out_analysis_file, 'w') as outcsv:
        writer = csvwriter(outcsv)
        writer.writerow(["reach_id",
                         "percent_bias",
                         "abs_percent_bias",
                         "rmse",
                         "mae",
                         "bias",
                         "NSE",
                         "likelihood",
                         "correlation_coeff",
                         "index_agreement",
                         "KGE"])
     
        for index, reach_id in enumerate(reach_id_list):
            observed_array = observed_table[:, index]
            simulated_array = data_nc.get_qout(reach_id, daily=daily)
            #make sure they are the same length
            simulated_array = simulated_array[:len(observed_array)]
            observed_array = observed_array[:len(simulated_array)]
            simulated_array,observed_array = filter_nan(simulated_array,observed_array)
            writer.writerow([reach_id,
                             pc_bias(simulated_array,observed_array),
                             apb(simulated_array,observed_array),
                             rmse(simulated_array,observed_array),
                             mae(simulated_array,observed_array),
                             bias(simulated_array,observed_array),
                             NS(simulated_array,observed_array),
                             L(simulated_array,observed_array),
                             correlation(simulated_array,observed_array),
                             index_agreement(simulated_array,observed_array),
                             KGE(simulated_array,observed_array)[0]])
Beispiel #52
0
 def generate_usgs_avg_daily_flows_opt(self, reach_id_gage_id_file,
                                       start_datetime, end_datetime,
                                       out_streamflow_file, out_stream_id_file):
     """
     Generate streamflow file and stream id file required for optimization 
     based on usgs gage ids associated with stream ids
     """
     print "Generating avg streamflow file and stream id file required for optimization ..."
     reach_id_gage_id_list = csv_to_list(reach_id_gage_id_file) 
     if start_datetime.tzinfo is None or start_datetime.tzinfo.utcoffset(start_datetime) is None:
         start_datetime = start_datetime.replace(tzinfo=utc)
     if end_datetime.tzinfo is None or end_datetime.tzinfo.utcoffset(end_datetime) is None:
         end_datetime = end_datetime.replace(tzinfo=utc)
         
     gage_data_matrix = []
     valid_comid_list = []
     num_days_needed = (end_datetime-start_datetime).days
 
     gage_id_list = []
     for row in reach_id_gage_id_list[1:]:
         station_id = row[1]
         if len(row[1]) == 7:
             station_id = '0' + row[1]
         gage_id_list.append(station_id)
     
     num_gage_id_list = np.array(gage_id_list, dtype=np.int32)
     print "Querying Server for Data ..."                            
 
     #print station_id
     query_params = {
                     'format': 'json',
                     'sites': ",".join(gage_id_list),
                     'startDT': start_datetime.astimezone(tzoffset(None, -18000)).strftime("%Y-%m-%d"),
                     'endDT': end_datetime.astimezone(tzoffset(None, -18000)).strftime("%Y-%m-%d"),
                     'parameterCd': '00060', #streamflow
                     'statCd': '00003' #average
                    }
     response = get("http://waterservices.usgs.gov/nwis/dv", params=query_params)
     if response.ok:
         data_valid = True
         try:
             requested_data = response.json()['value']['timeSeries']
         except IndexError:
             data_valid = False
             pass
         
         if data_valid:
             for time_series in enumerate(requested_data):
                 usgs_station_full_name = time_series[1]['name']
                 usgs_station_id = usgs_station_full_name.split(":")[1]
                 gage_data = []
                 for time_step in time_series[1]['values'][0]['value']:
                     local_datetime = parse(time_step['dateTime'])
                     if local_datetime > end_datetime:
                         break
                     
                     if local_datetime >= start_datetime:
                         if not time_step['value']:
                             print "MISSING DATA", station_id, local_datetime, time_step['value']
                         gage_data.append(float(time_step['value'])/35.3146667)
 
                 try:
                     #get where streamids assocated with USGS sation id is
                     streamid_index = np.where(num_gage_id_list==int(float(usgs_station_id)))[0][0]+1
                 except Exception:
                     print "USGS Station", usgs_station_id, "not found in list ..."
                     raise
                     
                 if len(gage_data) == num_days_needed:
                     gage_data_matrix.append(gage_data)
                     valid_comid_list.append(reach_id_gage_id_list[streamid_index][0])
                 else:
                     print "StreamID", reach_id_gage_id_list[streamid_index][0], "USGS Station", \
                           usgs_station_id, "MISSING", num_days_needed-len(gage_data), "DATA VALUES"
         if gage_data_matrix and valid_comid_list:
             print "Writing Output ..."                            
             np_array = np.array(gage_data_matrix).transpose()  
             with open(out_streamflow_file, 'wb') as gage_data:
                 wf = csvwriter(gage_data)
                 for row in np_array:
                     wf.writerow(row)
                     
             with open(out_stream_id_file, 'wb') as comid_data:
                 cf = csvwriter(comid_data)
                 for row in valid_comid_list:
                     cf.writerow([int(float(row))])
                     
             #set parameters for RAPID run
             self.IS_obs_tot = len(valid_comid_list)
             self.obs_tot_id_file = out_stream_id_file
             self.Qobs_file = out_streamflow_file
             self.IS_obs_use = len(valid_comid_list)
             self.obs_use_id_file = out_stream_id_file
         else:
             print "No valid data returned ..."
     else:
         print "USGS query error ..."