Example #1
0
    def __init__(self, http, xmlRepGenerator):
        Attack.__init__(self, http, xmlRepGenerator)
        user_config_dir = os.getenv("HOME") or os.getenv("USERPROFILE")
        user_config_dir += "/config"
        if not os.path.isdir(user_config_dir):
            os.makedirs(user_config_dir)
        try:
            fd = open(os.path.join(user_config_dir, self.CONFIG_FILE))
            reader = csv.reader(fd)
            self.nikto_db = [l for l in reader if l != [] and l[0].isdigit()]
            fd.close()
        except IOError:
            try:
                print(_("Problem with local nikto database."))
                print(_("Downloading from the web..."))
                nikto_req = HTTP.HTTPResource("http://cirt.net/nikto/UPDATES/2.1.5/db_tests")
                resp = self.HTTP.send(nikto_req)
                page = resp.getRawPage()

                csv.register_dialect("nikto", quoting=csv.QUOTE_ALL, doublequote=False, escapechar="\\")
                reader = csv.reader(page.split("\n"), "nikto")
                self.nikto_db = [l for l in reader if l != [] and l[0].isdigit()]

                fd = open(os.path.join(user_config_dir, self.CONFIG_FILE), "w")
                writer = csv.writer(fd)
                writer.writerows(self.nikto_db)
                fd.close()
            except socket.timeout:
                print(_("Error downloading Nikto database"))
Example #2
0
def load_geo_defs(csv_file):
    'load datum definitions, ellipses, projections from a file'
    defs = {
        'proj':{},
        'datum':{},
        'ellps':{},
    }
    try:
        csv.register_dialect('strip', skipinitialspace=True)
        with open(os.path.join(data_dir(),csv_file),'rb') as data_f:
            data_csv=csv.reader(data_f,'strip')
            for row in data_csv:
                row=[s.decode('utf-8') for s in row]
                try:
                    rec_type = row[0]
                    rec_id = row[1]
                    rec_data = row[2:]
                    if not rec_type in defs:
                        defs[rec_type] = {}
                    defs[rec_type][rec_id.upper()] = rec_data
                except IndexError:
                    pass
                except KeyError:
                    pass
    except IOError:
        pass
    return defs
Example #3
0
def read_students_list():
    """
    Read students list from CSV file and return list with datamodel.Student
    objects.
    """
    students = []
    csv.register_dialect('students', quoting=csv.QUOTE_NONE)
    try:
        with open(common.get_config().get_students_csv_file_path(),
                  encoding="u8",
                  newline='') as f:
            reader = csv.reader(f, 'students')
            for row in reader:
                if len(row) != 2:
                    logger.print_error("incorrect students list CSV file")
                    return None
                else:
                    student = datamodel.Student()
                    student.set_login(row[0])
                    student.set_name(row[1])
                    students += [student]
    except IOError:
        logger.print_error("cannot open students list " + 
                           common.get_config().get_students_csv_file_path())
        return None
    if len(students) < 1:
        logger.print_error("none students has been found")
    else:
        logger.print_msg("students list successfully loaded")
    return students
    def handle(self, *args, **options):

        Bankleitzahl.objects.all().delete()
        csv.register_dialect('bundesbank', delimiter='|', quoting=csv.QUOTE_NONE)

        with open(options['csvfile'], 'rb') as csvfile:
            reader = csv.reader(csvfile, 'bundesbank')
            for line in reader:
                data = unicode(''.join(line), "ISO-8859-1")
                satz = Bankleitzahl.objects.create()

                satz.bankleitzahl = unicode(data[0:8]).strip()
                satz.zahlungsdienstleister = unicode(data[8:9]).strip()
                satz.bezeichnung = unicode(data[9:67]).strip()
                satz.postleitzahl = unicode(data[67:72]).strip()
                satz.ort = unicode(data[72:107]).strip()
                satz.kurzbezeichnung = unicode(data[107:134]).strip()
                satz.pan = unicode(data[134:139]).strip()
                satz.bic = unicode(data[139:150]).strip()
                satz.pruefzifferberechnungsmethode = unicode(data[150:152]).strip()
                satz.datensatz = unicode(data[152:158]).strip()
                satz.indikator_geaendert = unicode(data[158:159]).strip()
                satz.indikator_loeschung = unicode(data[159:160]).strip()
                satz.nachfolge_bankleitzahl = unicode(data[160:168]).strip()
                satz.iban_regel = unicode(data[168:174]).strip()

                satz.save()

            print "Import abgeschlossen"
Example #5
0
    def doExport(self):
        reader = KSpread.reader()
        reader.setSheets( self.sheetslistview.sheets() )

        #if len(reader.sheetNames()) == 0:
            #raise "No sheet to export selected"

        csvfilename = self.savewidget.selectedFile()
        if not csvfilename:
            raise Exception, T.i18n("No CSV file chosen")
        if os.path.splitext(csvfilename)[1] == '':
            csvfilename += '.csv'

        csv.register_dialect("custom", self.getCustomDialect())

        csvfile = open(csvfilename,'w')
        csvwriter = csv.writer(csvfile, dialect="custom")

        def changedSheet(sheetname):
            print "changedSheet sheetname=%s" % sheetname
            #csvfile.write("# %s\n" % sheetname)

        def changedRow(row):
            values = reader.currentValues()
            #print "changedRow row=%i values=%s" % (row,values)
            csvwriter.writerow(values)

        reader.connect("changedSheet(QString)",changedSheet)
        reader.connect("changedRow(int)",changedRow)
        reader.start()
        csvfile.close()
Example #6
0
    def writeheader(self):
        """
        When inserting the header using FlexT, the header gets repeated everytime a file is processed due to the way the
        config is currently sent over. This (hacky) solution writes the header to the file before any configuration data
        is sent over to FlexT. After the header is written the header_line value is set to False so that the header
        isn't rewritten when the configuration is passed to FlexT.
        """

        if self._config.header_line:
            quote_style = None
            if self._config.quote_style.lower() == 'none':
                quote_style = csv.QUOTE_NONE
            elif self._config.quote_style.lower() == 'nonnumeric':
                quote_style = csv.QUOTE_NONNUMERIC
            elif self._config.quote_style.lower() == 'all':
                quote_style = csv.QUOTE_ALL
            elif self._config.quote_style.lower() == 'minimal':
                quote_style = csv.QUOTE_MINIMAL

            csv.register_dialect('flext',
                                 delimiter=self._config.delimiter,
                                 quotechar=self._config.quote_char,
                                 escapechar=bytes(self._config.escape_char, "utf-8").decode("unicode_escape"),
                                 doublequote=self._config.double_quote,
                                 lineterminator='\r\n',
                                 quoting=quote_style
                                 )

            writer = csv.DictWriter(self._file_obj, self._config.fields, dialect='flext')

            writer.writeheader()
            self._config.header_line = False
Example #7
0
def write_csv_file(csv_file_path, headers_to_write, data_to_write):
    """
    :param csv_file_path: pass in full_file_path var pointing to desired csv output
    :param headers_to_write: pass in list of headers
    :param data_to_write: pass in list of data
    :return:
    """
    csv.register_dialect('excel', delimiter=',', quoting=csv.QUOTE_ALL)

    # grab dict keys from data_to_write var as row iterator
    row_iterator = list(data_to_write)

    with open(csv_file_path, 'wt') as f:
        try:
            writer = csv.writer(f, dialect='excel')

            # write headers
            writer.writerow(headers_to_write)

            # write data
            for row in row_iterator:
                writer.writerow(data_to_write[row])
        finally:
            print('CSV file written successfully.')
            print('\n')
            f.close()
Example #8
0
def zdecode(data):
    import csv

    csv.register_dialect('js', delimiter=',', quotechar="'", escapechar='\\')

    keys_regex = r'''eval\(.*?function\(([^\)]+)\){'''
    keys = [re.search(keys_regex, data).groups()[0]]

    values_regex = r'''.*(\w+)\s*=\s*\w+\((.*?)\);\s*eval\(\1'''
    values = [re.search(values_regex, data, re.DOTALL).groups()[1].replace('\n','')]

    key_list = [l for l in csv.reader(keys, dialect='js')][0]
    value_list = [l for l in csv.reader(values, dialect='js')][0]

    dictionary = dict(zip(key_list, value_list))

    symtab_regex = r'''\w+\[\w+\]=(\w+)\[\w+\]\|\|\w+'''
    sym_key = re.search(symtab_regex, data).groups()[0]
    symtab = dictionary[sym_key]

    split_regex = r'''(.*)\.split\('(.*)'\)'''
    _symtab, _splitter = re.search(split_regex, symtab).groups()
    splitter = re.sub(r"""'\s*\+\s*'""", '', _splitter)
    symtab = _symtab.split(splitter)

    tab_regex = r'''(\w+)=\1\.replace'''
    tab_key = re.search(tab_regex, data).groups()[0]
    tab = dictionary[tab_key]

    def lookup(match):
        return symtab[int(match.group(0))] or str(match.group(0))

    return re.sub(ur'\w+', lookup, tab)
    def handle_label(self, file, **options):
        csv.register_dialect('tabs', delimiter='\t')
        for row in csv.reader(open(file), dialect='tabs'):
            postcode = row[0].strip().replace(' ', '')
#            print "'%s' '%s' '%s'" % (row, row[6:7], row[7:8])
            if row[6] == 'Lat': continue # skip header
            lat = float(row[6])
            lon = float(row[7])
            location = Point(lon, lat, srid=4326)
            # Want to compare co-ordinates so can't use straightforward
            # update_or_create
            try:
                pc = Postcode.objects.get(postcode=postcode)
                if pc.location[0] != location[0] or \
                        pc.location[1] != location[1]:
                    pc.location = location
                    pc.save()
                    self.count['updated'] += 1
                else:
                    self.count['unchanged'] += 1
            except Postcode.DoesNotExist:
                Postcode.objects.create(postcode=postcode, location=location)
                self.count['created'] += 1
            self.count['total'] += 1
            if self.count['total'] % 1000 == 0:
                self.print_stats()
        self.print_stats()
    def _read_native_snap_format(cls, filename):
        """
        :param filename: path to ITKSnap label file
        :type filename: string

        :returns: snap_label_description -- object with labels included in the file
        """

        csvfile = open(filename).readlines()
        csv.register_dialect("spaces", delimiter=" ")

        # Determine on which line the header ends
        first_row = len(filter(lambda x: csvfile[x][0] == "#",
                           range(len(csvfile))))
        reader = csv.reader(csvfile[first_row:], dialect="spaces")

        # We will collect label in this list
        list_of_labels = []

        for row in reader:
            # filtering out empty strings returned by csv.reader object
            row = filter(None, row)
            label = snap_label.from_row(row)
            list_of_labels.append(label)

        return cls(list_of_labels)
Example #11
0
def stat_freq(files, threshold=0.5):
    ''' If matplotlib is not available, use csv module to print some information
    '''
    import csv
    import sys
    from math import sqrt
    
    csv.register_dialect('support', delimiter='\t', skipinitialspace=True)
    on = open('freq_stat.dat', 'w')
    for filename in files:
        data = csv.reader(open(filename), dialect='support')
        support = []
        freq = []
        labels = []
        data.next()
        for row in data:
            labels.append(row[0])
            support.append(int(row[1]))
            freq.append(map(int, row[2:]))
        
        coverage = sum([f[0] for f in freq])
        history = len(freq[0])
        reads = {}
        std = {}
        
        on.write('#input file=%s, coverage=%d, haplotypes with > %d %% support:\n' % (filename, coverage, int(threshold*100)))
        for i, sup in enumerate(support):
            if float(support[i])/history > threshold:
                reads[labels[i]] = float(sum(freq[i]))/history
                std[labels[i]] = sqrt(float(sum([ (d-reads[labels[i]])**2 for d in freq[i] ]))/(history-1))
                on.write('%s has support %4.2f %% and frequency %5.2E  +/- %5.2E \n'%
                         (labels[i], 100*float(support[i])/history, reads[labels[i]]/coverage, std[labels[i]]/coverage))
        on.write('\n')
    on.close()
def parse_input(filename):
    csv.register_dialect('unixpwd', delimiter=',', quoting=csv.QUOTE_NONE)
    with open(filename, 'rb') as csvfile:
        tweetreader = csv.reader(csvfile, delimiter=',', quotechar='|',  dialect=csv.excel_tab)
        tweets = compute_tweet_info(tweetreader)
    csvfile.close()     
    return tweets
def convert_bom_table_to_eagle(headers, table, xref_dict):
    """
    Routine that takes the column headers and data table of the input BOM file,
    replaces the GME CRDs in the table with Eagle CRDs,
    and returns the resulting table as a CSV-formatted string.
    """
    inmemory_file = StringIO.StringIO()
    csv.register_dialect('bomDialect', lineterminator='\n', quoting=csv.QUOTE_NONNUMERIC)
    csv_dict_writer = csv.DictWriter(inmemory_file, headers, dialect='bomDialect')
    csv_dict_reader = csv.DictReader(table)
    for table_row_dictionary in csv_dict_reader:
        if m_reference_designator_key in table_row_dictionary.keys():
            original_crd_list = [x.strip() for x in table_row_dictionary[m_reference_designator_key].split(',')]
            new_crd_list = []
            for refDes in original_crd_list:
                if refDes in xref_dict.keys():
                    print("In the BOM, should replace '{0}' with '{1}'.".format(refDes, xref_dict[refDes]))
                    new_crd_list.append(xref_dict[refDes])
                else:
                    new_crd_list.append(refDes)
            new_sorted_crd_list = sorted(new_crd_list)

            # Use spaces to separate multiple reference designators in a CSV cell
            # if the resulting string will fit OK in the cell.
            if(sum([len(x) + 2 for x in new_sorted_crd_list]) < 32):
                table_row_dictionary[m_reference_designator_key] = ', '.join(new_sorted_crd_list)
            else:
                table_row_dictionary[m_reference_designator_key] = ',\n'.join(new_sorted_crd_list)

        csv_dict_writer.writerow(table_row_dictionary)
    return inmemory_file.getvalue()
Example #14
0
    def from_file(self, input_file=None, delimiter=None):
        input_file = str(input_file) if input_file is not None else None

        if not os.path.exists(input_file):
            raise Exception('TLD Input file Not Found')

        LOG.info("Importing TLDs from %s", input_file)

        error_lines = []
        tlds_added = 0

        with open(input_file) as inf:
            csv.register_dialect('import-tlds', delimiter=str(delimiter))
            reader = csv.DictReader(inf,
                                    fieldnames=['name', 'description'],
                                    restkey='extra_fields',
                                    dialect='import-tlds')
            for line in reader:
                # check if there are more than 2 fields
                if 'extra_fields' in line:
                    error_lines.append("InvalidLine --> " +
                                       self._convert_tld_dict_to_str(line))
                else:
                    tlds_added += self._validate_and_create_tld(line,
                                                                error_lines)

        LOG.info("Number of tlds added: %d", tlds_added)

        errors = len(error_lines)
        if errors > 0:
            LOG.error("Number of errors: %d", errors)
            # Sorting the errors and printing them so that it is easier to
            # read the errors
            LOG.error("Error Lines:\n%s", '\n'.join(sorted(error_lines)))
Example #15
0
 def __init__(self, line, lineno):
     self.line = line
     self.lineno = lineno
     self.missedTask = None
     parseline = line.replace('!"', '! "') # csv parser chokes on ! or * in front of " (e.g. !" or *")
     parseline = parseline.replace('*"', '* "')
     csv.register_dialect("SpaceDialect", SpaceDialect)
     parser = csv.reader([parseline], "SpaceDialect")
     words = parser.next()
     for item in words[:]:
         if item == "":
             words.remove(item)
     if len(words)<6:
         raise InvalidLineException, self.lineno
     if words[5] == "!" or words[5] == "*":
         self.missedTask = words[5]
         del words[5]
     self.minute, self.hour, self.day, self.month, self.dow = words[:5]
     self.command = self._parse_env_vars(words[5])
     if self.command.startswith("!") or self.command.startswith("*"):
         self.missedTask = self.command[0]
         self.command = self.command[1:]
     self.parameters = None
     if len(words)>6:
         self.parameters = []
         for parameter in words[6:]:
             if len(parameter.split())>1:
                 self.parameters += ['"%s"' % self._parse_env_vars(parameter)]
             else:
                 self.parameters += [self._parse_env_vars(parameter)]
def main(ticker):
    soup = get_soup('http://finance.yahoo.com/q/op?s={}'.format(ticker))
    if soup:
        options_menu = soup.find('div', id='options_menu')
        if options_menu:
            select = options_menu.find('select')
            expire_dates = [[datetime.strptime(option.text, '%B %d, %Y'), option['data-selectbox-link']]
                            for option in select.find_all('option')]
            if expire_dates:
                headers = get_headers(soup)
                csv.register_dialect('yahoo', delimiter=',', quoting=csv.QUOTE_NONE, lineterminator='\n')
                if os.path.isfile('{}.csv'.format(ticker)):
                    with open('{}.csv'.format(ticker), 'r') as f:
                        current_headers = csv.DictReader(f).fieldnames
                else:
                    current_headers = None
                with open('{}.csv'.format(ticker), 'a') as f:
                    my_writer = csv.DictWriter(f, fieldnames=current_headers, dialect='yahoo')
                    # add headers if new file
                    if not my_writer.fieldnames:
                        my_writer.fieldnames = headers
                        my_writer.writeheader()
                    # loop trough all expire dates
                    for exp_date, link in expire_dates:
                        print('Expire date and link: {}, {}'.format(datetime.strftime(exp_date,  '%d.%m.%Y'), link))
                        soup = get_soup('http://finance.yahoo.com{}'.format(link))
                        my_writer.writerows([dict(list(zip(headers, quotes))) for quotes in get_quotes(soup, exp_date)])
                        time.sleep(1)
                        # break
            else:
                print('It looks like there are no options for ticker: {}'.format(ticker))
        else:
            print('It looks like there is no such ticker: {}'.format(ticker))
    else:
        print('Unable to retrieve html. Check the Internet connection and/or try again later.')
    def handle(self, *args, **options):
        global obj
        obj = self

        if not os.path.isfile(options['postnr_csv']):
            raise CommandError("Fila finst ikkje ({0})".format(args[0]).encode('utf8'))

        # 0       1         2                3            4         5      6       7        8      9    10   11            12                       13
        # POSTNR, POSTSTAD, POSTNR- OG STAD, BRUKSOMRÅDE, FOLKETAL, BYDEL, KOMMNR, KOMMUNE, FYLKE, LAT, LON, DATAKVALITET, DATAKVALITETSFORKLARING, SIST OPPDATERT
        csv.register_dialect('tabs', delimiter='\t')
        read = csv.reader(open(options['postnr_csv']), dialect='tabs')
        row = read.next()
        if row[0] != 'POSTNR' or row[11] != 'DATAKVALITET':
            raise CommandError("Ser ikkje ut som den korrekte type fila")

        for row in read:
            p = PostNummer()
            p.postnr = row[0].strip().replace(' ', '')
            p.poststad = row[1]
            p.bruksomrade = row[3]
            if (row[4] != ""):
                p.folketal = int(row[4].strip())
            p.bydel = row[5]
            p.kommnr = row[6]
            p.kommune = row[7]
            p.fylke = row[8]
            p.lat = float(row[9])
            p.lon = float(row[10])
            p.datakvalitet = int(row[11])
            if row[-1][0] == "2":
                p.sist_oppdatert = nmu.dt(row[-1])

            p.save()
Example #18
0
	def setUp(self):
		self.bnapi='your_key'
		self.bnsecret='your_secret'
		self.redirectUrl = 'insert_some_url_here'
		self.userId = 'your_spotify_user_id'
		self. playlistId = 'your_spotify_playlist_id'
		
		self.mainUrl = "http://thecmf.com/"
		
		self.driver = webdriver.Firefox()
		self.driver.implicitly_wait(30)
		
		self.verificationErrors = []
		
		self.bandList = []
		self.songList = []
		
		self.doDebug = True
		
		#Filenames and folder name
		self.now = datetime.datetime.now()
		self.dateappend = str(self.now.strftime("%m-%d_%H-%M"))
		savefolder="cmf"+self.dateappend
		csv.register_dialect('pipes', delimiter='|')
		thisfoldertwo = os.getcwd()
		self.allSourceFilesFolder = os.path.join(thisfoldertwo, "allCmf")
		self.outputFolder = os.path.join(thisfoldertwo, savefolder)
		
		if not os.path.exists(self.outputFolder):
			os.makedirs(self.outputFolder)
		if not os.path.exists(self.allSourceFilesFolder):
			os.makedirs(self.allSourceFilesFolder)
Example #19
0
    def get_dialect(self):
        """Returns a new dialect that implements the current selection"""

        parameters = {}

        for parameter in self.csv_params[1:]:
            pname, ptype, plabel, phelp = parameter

            widget = self._widget_from_p(pname, ptype)

            if ptype is types.StringType or ptype is types.UnicodeType:
                parameters[pname] = str(widget.GetValue())
            elif ptype is types.BooleanType:
                parameters[pname] = widget.GetValue()
            elif pname == 'quoting':
                choice = self.choices['quoting'][widget.GetSelection()]
                parameters[pname] = getattr(csv, choice)
            else:
                raise TypeError(_("{type} unknown.").format(type=ptype))

        has_header = parameters.pop("self.has_header")

        try:
            csv.register_dialect('user', **parameters)

        except TypeError, err:
            msg = _("The dialect is invalid. \n "
                    "\nError message:\n{msg}").format(msg=err)
            dlg = wx.MessageDialog(self.parent, msg, style=wx.ID_CANCEL)
            dlg.ShowModal()
            dlg.Destroy()
            raise TypeError(err)
def csv2trade(inputString):
    # Using stringIO since csv reader only works with files.
    if len(inputString) == 0:
        return {"buys": "", "sells": ""}

    csv.register_dialect('withoutSpaces', skipinitialspace=True)
    reader = csv.reader(StringIO.StringIO(inputString), "withoutSpaces")

    if not (inputString[0].isdigit()):
        reader.next()
    
    # What strings are accepted as indicating a buy or sell.
    buySet = ["b", "B", "buy", "Buy", "BUY"]
    sellSet = ["s", "S", "sell", "Sell", "SELL"]

    buys = []
    sells = []

    for line in reader:
        try:
            trade = parseDateString(line[0])
            if type(trade) is str:   # Implies that an error message was sent
                continue
            trade['price'] = line[1]
            trade['number'] = line[2]
            if line[3] in buySet:
                buys.append(trade)
            elif line[3] in sellSet:
                sells.append(trade)
        except IndexError:
            continue
    return {"buys": buys, "sells": sells}
Example #21
0
def run():
    place_data = dict()

    csv.register_dialect('pipes', delimiter='|', quoting=csv.QUOTE_NONE)

    csv_stuff = csv.reader(open(PLACE_DATA_FILE,'rb'), dialect='pipes')
    #csv_stuff.next() #drop header line


    for line in csv_stuff:
        if len(line) < 10:
            print "Skipping %s" % line[1]
            continue

        data = PlaceData(line)
        place_data[data.name] = data

    for place_name in place_data.keys():
        print "Creating %s" % place_name
        new = place_data[place_name]
        # print new.title
        place = Place(name = new.name, 
                      title = new.title, 
                      body = new.body,
                      now_text = new.now_text,
                      then_text= new.then_text,
                      start_date = new.start_date, 
                      end_date = new.end_date,
                      latitude = new.lat,
                      longitude = new.lng,
                      )
Example #22
0
File: shards.py Project: fnava/grae
def process_shards_table():
	filename = 'TAB_filters.txt'
	csv.register_dialect('shards', delimiter='\t', quoting=csv.QUOTE_NONE)
	rows = []
	with open(filename, newline='') as f:
		next(f)
		next(f)
		next(f)
		#dialect = csv.Sniffer().sniff(f.read(1024), delimiters='\t ')
		reader = csv.reader(f, 'shards')
		try:
			for row in reader:
				if len(row) == 21:
					rows.append(row)
					#print(row)
		except csv.Error as e:
			sys.exit('file {}, line {}: {}'.format(filename, reader.line_num, e))

	shard_filters = []
	shard_dict_keys=list(map(str.strip,rows[0]))
	for row in rows[1:]:
		d = dict(zip(shard_dict_keys,map(str.strip, row)))
		shard_filters.append(d)
		#print(shard_dict_keys[-1])
	return shard_filters
Example #23
0
def removeOldFiles():
    print 'Removing old files from target directory'
    for dir in directories:
        for filename in os.listdir(targetDir + dir):
            targetFile = targetDir + dir + '/' + filename
            date = datetime.today() # initialize object
            if dir == 'Nabel':
                dateString = filename[17:25]
                date = datetime.strptime(dateString, '%d%m%Y')
            elif dir == 'Combilog':
                if len(filename) == 19:
                    dateString = filename[7:15]
                elif len(filename) == 18:
                    dateString = filename[6:14]
                date = datetime.strptime(dateString, '%Y%m%d')
            elif dir == 'SwissMetNet':
                dateString = filename[7:19]
                date = datetime.strptime(dateString, '%Y%m%d%H%M')
            elif dir == 'SamWi':
                f = open(targetFile)
                csv.register_dialect('SamWi', delimiter=',', skipinitialspace=1)
                reader = csv.reader(f, dialect='SamWi')
                l1 = reader.next()
                reader.next()
                reader.next()
                dateString = str(reader.next()[0])
                date = datetime.strptime(dateString, '%d.%m.%Y')
                f.close()
            if datetime.today() - date > timedelta(days = (nbDays + 1)):
                os.remove(targetFile)
                print 'Removed old file : ' + targetFile + ' from directory'
    print 'directory cleaning finished'
Example #24
0
    def test_valid_csv_with_a_custom_dialect(self):
        """Should be valid if all data is passed"""
        piped_template = VALID_TEMPLATE_STR.replace(',', '|')
        csv_data = """
title|category|subcategory|currency|price|url|image_url
{iphone_data}
{ipad_data}
            """.format(
            iphone_data=piped_template.format(**IPHONE_DATA),
            ipad_data=piped_template.format(**IPAD_DATA),
        )

        csv.register_dialect('pipes', delimiter='|')

        reader = smartcsv.reader(
            StringIO(csv_data), dialect='pipes', columns=COLUMNS_1)
        iphone = next(reader)
        ipad = next(reader)

        self.assertRaises(StopIteration, lambda: list(next(reader)))

        self.assertTrue(
            isinstance(iphone, dict) and isinstance(ipad, dict))

        self.assertModelsEquals(iphone, IPHONE_DATA)
        self.assertModelsEquals(ipad, IPAD_DATA)
Example #25
0
 def read_company_file(self, csv_file):
     csv.register_dialect('norm', delimiter=',', quoting=csv.QUOTE_MINIMAL)
     csv_file.seek(0)
     reader = csv.reader(csv_file, 'norm')
     reader.next()
     reader.next()
     companies = []
     for rowDef in reader:
         cur_comp = CompanyCsvLine()
         set = True
         try:
             i = 0
             for val in rowDef:
                 val_stp = val.strip()
                 if val_stp != "NULL":
                     if i > 13:
                         setattr(cur_comp, self.fieldOrdinals[13], val_stp)
                     else:
                         setattr(cur_comp, self.fieldOrdinals[i], val_stp)
                 i += 1
         except Exception as ex1:
             print(ex1.message)
             set = False
         if set:
             try:
                 new_comp = Company(cur_comp.reg_no, cur_comp.name, cur_comp.turnover, cur_comp.post_code)
                 companies.append(new_comp)
             except Exception as ex:
                 print(ex.message)
                 pass
     return companies
def convert_files(file_name=None):
    if file_name is None or len(file_name) == 0:
        csv_file = sys.stdin
    else:
        csv_file = open(file_name, 'rb')
    csv.register_dialect('tab_delim', delimiter="\t",
                         quoting=csv.QUOTE_NONE)
    reader = csv.DictReader(csv_file, field_list, dialect='tab_delim')
    for d in reader:
        try:
            start = datetime.strptime(d['start'], '%m/%d/%Y')
            finish = datetime.strptime(d['finish'], '%m/%d/%Y')
            d.update({'bug ref': get_bug_ref(d['bugid']),
                      'bug_url_span': get_bug_url_span(d['bug url']),
                      'build_notes_url': get_build_notes_url(d['release']),
                      'event_id': "%s%s" % (d['release'].replace(' ', ''),
                                            d['driver']),
                      'start_h': start.strftime('%a %b %d'),
                      'start_iso': start.isoformat()[0:10],
                      'finish_h': finish.strftime('%a %b %d'),
                      'finish_iso': finish.isoformat()[0:10], })
            print event_template.format(**d)
        except ValueError as e:
            # assume bad line - get them on at least osx
            print >> sys.stderr, "Skipped line %d %s" % (reader.line_num, e)
            pass
Example #27
0
def writeTSV(newsList, category):
	filename = category + 'News.tsv'
	with open(filename, 'w') as the_file:
		csv.register_dialect("custom", delimiter=" ", skipinitialspace=True)
		writer = csv.writer(the_file, dialect="custom")
		for tup in newsList:
			writer.writerow(tup)
Example #28
0
    def saveDataAsCsv(self, path):
        """
        Save current data as CSV file.
        """
        try:
            orientationBefore = self.orientation
            self.orientation = ORIENTATION_HORIZONTAL

            if self.data:
                self._updateDataTable(self.data)

            csv.register_dialect("SimulationData", delimiter="\t", quotechar='"', skipinitialspace=True)
            writer = csv.writer(open(path, "wb"), dialect="SimulationData")

            header = self.dataTableHeaders
            writer.writerow(header)

            for row in xrange(self.dataTableRowCount):
                rowData = []
                for col in xrange(len(self.dataTableHeaders)):
                    rowData.append(self.dataTableColumnData[col][row])
                writer.writerow(rowData)

            self.orientation = orientationBefore

        except Exception, e:
            logging.error("Error while trying to write CSV file: %s\nError: %s" % (path, e))
Example #29
0
    def doImport(self):
        currentSheet = self.sheetslistview.sheet()
        if not currentSheet:
            raise Exception, T.i18n("No current sheet.")

        writer = KSpread.writer()
        if not writer.setSheet(currentSheet):
            self.forms.showMessageBox("Sorry", T.i18n('Invalid sheet "%1" defined.', [currentSheet]))

        cell = self.sheetslistview.editor()
        if not writer.setCell(cell):
            self.forms.showMessageBox("Sorry", T.i18n('Invalid cell "%1" defined.', [cell]))

        csvfilename = self.openwidget.selectedFile()
        if not os.path.isfile(csvfilename):
            self.forms.showMessageBox("Sorry", T.i18n("File '%1' not found.", [csvfilename]))

        # writer.connect("valueChanged()",writer.next)

        csv.register_dialect("custom", self.getCustomDialect())

        csvfile = open(csvfilename, "r")
        try:
            csvreader = csv.reader(csvfile, dialect="custom")
            try:
                while True:
                    record = csvreader.next()
                    if not writer.setValues(record):
                        print "Failed to set all of '%s' to cell '%s'" % (record, writer.cell())
                    # writer.insertValues(record)
                    writer.next()
            except StopIteration:
                pass
        finally:
            csvfile.close()
def crossValidation(directory, csvfile):
    """The function calculates running average for each confusion matrix element by going through all matrices. The program can be called with 1 or 3 arguments from command line. The first argument is 'test' or 'train' which indicates which matrices to calculate moving average for and print them. The second and third arguments are the row and column numbers of matrix which you want to plot. Function then plots the value of running average after every iteration to see the convergence of cross-validation. Example: cross_validation.py train 1 1"""
    curAvg=np.zeros((5,5))
    n=0.0
    if len(sys.argv)==4:
        plt.figure()
        plt.ion()

    for i in range(len(os.listdir(directory))):
        csv.register_dialect('commadot', delimiter=';')
        f = open(csvfile+str(i+1)+'.csv', 'rb')
        reader = csv.reader(f, dialect='commadot')
        #generate confusion matrix of type numpy.array
        vocalization_labels=reader.next()[1::]
        matrix_list = []
        for rows in reader:
            matrix_list.append([ int(x) for x in rows[1::] ])
        matrix_list = np.asarray(matrix_list)
        f.close()
        #calculating moving average for every confusion matrix element
        curAvg = curAvg + (matrix_list - curAvg)/(n+1.0)
        n += 1
        if len(sys.argv)==4:
            plt.scatter(i,curAvg[sys.argv[2]][sys.argv[3]])

    np.set_printoptions(precision=2)
    plt.figure()
    plot_confusion_matrix(curAvg, classes=vocalization_labels, data_type=sys.argv[1], title='')
    plt.show()

    if len(sys.argv)==4:
        while True:
            plt.pause(0.05)
Example #31
0
from django.contrib.gis.utils import LayerMapping
from world.models import Country, Town
from django.utils.translation import gettext_lazy as _


class geonames_csv_dialect(csv.Dialect):
    """Describe the usual properties of Geonames.org-generated CSV files."""
    delimiter = '\t'
    quotechar = '"'
    doublequote = True
    skipinitialspace = False
    lineterminator = '\n'
    quoting = csv.QUOTE_MINIMAL


csv.register_dialect("geonames_csv_dialect", geonames_csv_dialect)


class Command(BaseCommand):
    help = _('Импортировать города')
    fiedls_names = [
        'geonameid',
        'name',
        'asciiname',
        'alternatenames',
        'latitude',
        'longitude',
        'feature_class',
        'feature_code',
        'country_code',
        'cc2',
    '''
    Specifies the CSV dialect used by TappedOut (http://tappedout.net/).
    The parameters were determined by inspecting exports. As a test case, a deck containing "Ach! Hans, Run!" was used.
    (Note that the actual card name contains both a comma and the quotation marks.)
    It is exported as """Ach! Hans, Run!""", therefore TappedOut uses the doublequote option.
    '''
    delimiter = ","
    quotechar = '"'
    doublequote = True
    skipinitialspace = False
    lineterminator = "\r\n"
    quoting = csv.QUOTE_MINIMAL


_CSV_DIALECT_NAME = "tappedout_net"
csv.register_dialect(_CSV_DIALECT_NAME, TappedOutDialect)
# Foiling indicators used in the CSV.
# "foil": Regular foil, "pre": Pre-release (and similar event) promo card. Always (?) foil with a golden date stamp.
csv_foil_indicators = {"foil", "pre"}


def parse_deck(csv_file_path: Path) -> MTGDeckConverter.model.Deck:
    logger.info(
        f"Parsing Tappedout.com CSV exported deck from location {csv_file_path}"
    )
    deck = MTGDeckConverter.model.Deck()
    # These are the four categories/boards supported by TappedOut.
    card_categories = {
        "main": deck.add_to_main_deck,
        "side": deck.add_to_side_board,
        "maybe": deck.add_to_maybe_board,
# stored as a dataframe which allows the use of functions like `.to_csv`. The resulting csv is not a clean
# representation of the data. Therefore, the file can be read and transformed to eliminate null columns and multi-row
# headers. While the `tabula` package provides the opportunity to extract data from a PDF, the output needs to be
# investigated and cleaned before it can be turned into a usable data product.
from tabula import read_pdf
import csv
import os

# The `read_pdf()` function stores the contents of the file as a dataframe in a list object. There are methods of
# cleaning the data without needing to write the data in the current format to a csv, but it is the easiest method based
# on the functions already discussed to this point in the course. Therefore, a temporary csv is created to store the
# uncleaned data.
census = read_pdf("MontcoCensus.pdf")
census.to_csv("census_temp.csv", mode="w", sep="|", index=False)

csv.register_dialect("pipe-delim", delimiter="|", lineterminator="\n")

# Since only the age and census numbers are needed, the program writes the new header and then bypasses the old header
# through the use of an `if` statement. The required data from the four rows are then written to the file. The resulting
# csv is in the proper format and the data can then be used for future analyses.
with open("census_temp.csv", "rt") as census_in, open("census_20100401.csv",
                                                      "wt") as census_out:
    writer = csv.writer(census_out, delimiter="|", lineterminator="\n")
    writer.writerow(("age", "both_sexes", "male", "female"))
    for row in csv.reader(census_in, dialect="pipe-delim"):
        if row[0] == "Unnamed: 0" or row[0] == "":
            continue
        else:
            writer.writerow([row[0], row[2], row[3], row[4]])

# Once the temporary file is no longer needed, it can be removed.
Example #34
0
# -*- coding: utf-8 -*-
"""
Created on Wed May 15 15:19:06 2019

@author: AcharyaA
"""

import csv
import cx_Oracle

con = cx_Oracle.connect('***/****@*****')
cursor = con.cursor()

csv.register_dialect('myDialect',
                     delimiter='|',
                     lineterminator="\n",
                     quoting=csv.QUOTE_NONE,
                     skipinitialspace=True)

csv_file = open("info.csv", "w")
writer = csv.writer(csv_file, dialect='myDialect')
r = cursor.execute("SELECT * FROM db_info")

cols = []
for col in r.description:
    cols.append(col[0])
writer.writerow(cols)

for row in cursor:
    writer.writerow(row)
Example #35
0
import csv

# -= Чтение =-

# Создание диалекта
csv.register_dialect('excel-semicolon', delimiter=';')

f = open('employees.csv', 'rt')
try:
    reader = csv.reader(f, dialect="excel-semicolon")
    for row in reader:
        print(row)
finally:
    f.close()

# Авто определение диалекта файла
with open('employees.csv') as csvfile:
    dialect = csv.Sniffer().sniff(csvfile.read(1024))
    csvfile.seek(0)
    reader = csv.reader(csvfile, dialect)
    for row in reader:
        print(row)

# Считывание в namedtuple
from collections import namedtuple
Employee = namedtuple('Employee', 'name, age, department, pay')

with open('employees.csv') as csvfile:
    for emp in map(Employee._make, csv.reader(csvfile, dialect)):
        print(emp.name, emp.pay)
Example #36
0
import csv

#tworzenie dialectu
csv.register_dialect('mojDialekt', delimiter=',', quoting=csv.QUOTE_ALL)

#plik
plik_csv = open('sample.csv', 'r')
czytaj = csv.reader(plik_csv, dialect='mojDialekt')

for i, rekord in enumerate(czytaj):
    if i == 0:
        naglowek = rekord
    else:
        for j, pole in enumerate(rekord):
            print(naglowek[j], ':', pole)
    print('')

plik_csv.close()
Example #37
0
    }
    ''' % d)

    return ffi, lib


def fastcsv_reader(f, dialect):
    dialect = csv.get_dialect(dialect)
    try:
        ffi, lib = dialect2ffi[dialect]
    except KeyError:
        ffi, lib = dialect2ffi[dialect] = _make_ffi_from_dialect(dialect)
    #
    linelen = -1
    for line in f:
        if linelen <= len(line):
            linelen = 2 * len(line)
            rawline = ffi.new("char[]", linelen)
        ffi.buffer(rawline, len(line))[:] = line
        n = lib.parse_line(rawline, len(line))
        assert n >= 0
        yield ffi.buffer(rawline, n)[:].split('\x00')


if __name__ == '__main__':
    csv.register_dialect('unixpwd', delimiter=':', quoting=csv.QUOTE_NONE)
    with open('/etc/passwd', 'rb') as f:
        reader = fastcsv_reader(f, 'unixpwd')
        for row in reader:
            print row
Example #38
0
""" 自定义CSV格式
@Author: Bao Wenjie
@Email: [email protected]
@Date: 2020/10/29
"""
import csv
from csv import excel

class tsv(excel):
    """ \t分隔符 """
    delimiter = '\t'
    lineterminator = '\n'

csv.register_dialect('tsv', tsv)
Example #39
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import csv

csv.register_dialect('marvel',
                     delimiter=',',
                     quoting=csv.QUOTE_NONE,
                     escapechar="\\")
filename = "marvel-wikia-data.csv"
with open(filename, 'r') as f:
    reader = csv.reader(f, dialect="marvel")
    next(reader)  # Ignorar la primera linea
    for i, row in enumerate(reader):
        year = int(row[-1] if row[-1] else 0)
        if 1972 <= year <= 1974:
            print(row[1], year)
        if i == 24:
            break
Example #40
0
outfile = "contacts.vcf"

import csv, base64, sys, time, re
o = open(outfile, 'wb')


class Dialect(csv.Dialect):
    delimiter = ';'
    # the rest is not really needed, but some versions of
    # csv won't run without them:
    quoting = csv.QUOTE_MINIMAL
    quotechar = '"'
    lineterminator = '\n'


csv.register_dialect('csc', Dialect)

cscMap = {
    "Name": "FN",  # = "File as" in WM6 Contacts
    "Title": ('N', 4, 3),
    "First Name": ('N', 4, 1),
    "Middle Name": ('N', 4, 2),
    "Last Name": ('N', 4, 0),
    # NickName
    # Suffix
    "Display Name": None,  # = "Name" (NOT "File as"!) in WM6 Contacts
    "Picture": "PHOTO;JPEG;ENCODING=BASE64",
    "Job Title": "TITLE",
    "Department": ("ORG", 2, 1),
    "Company": ("ORG", 2, 0),
    "Business Phone": "TEL;TYPE=work",
Example #41
0
import csv
import io
import socket
from contextlib import closing
from pathlib import Path
from typing import List, Dict, Any

from .haproxy_comm_enums import ServerState, ServerAdmin, CheckStatus, CheckResult, CheckState

csv.register_dialect('haproxy', delimiter=' ', quoting=csv.QUOTE_NONE)

T_SERVERS_STATE = Dict[str, Dict[str, Any]]

class HAProxyComm:
	def __init__(self, socket_name: str):
		self._socket = socket_name

	def has_socket(self):
		return Path(self._socket).is_socket()

	def get_backends(self):
		response = self._command('show backend')
		assert response[0] == '# name', 'Unexpected response from HAProxy'
		return response[1:]

	def get_servers_state(self, backend: str) -> T_SERVERS_STATE:
		response = self._command('show servers state %s' % backend)
		assert response[0] == '1', "Unsupported version of HAProxy output"

		reader = csv.DictReader(response[1:], dialect='haproxy')
		reader.fieldnames = reader.fieldnames[1:]  # HAProxy marks fields with '#'
Example #42
0
    'es_VE': 'Spanish_Spain',
    'sv_SE': 'Swedish_Sweden',
    'ta_IN': 'English_Australia',
    'th_TH': 'Thai_Thailand',
    'tr_TR': 'Turkish_Turkey',
    'uk_UA': 'Ukrainian_Ukraine',
    'vi_VN': 'Vietnamese_Viet Nam',
    'tlh_TLH': 'Klingon',

}


class UNIX_LINE_TERMINATOR(csv.excel):
    lineterminator = '\n'

csv.register_dialect("UNIX", UNIX_LINE_TERMINATOR)

#
# Warning: better use self.pool.get('ir.translation')._get_source if you can
#
def translate(cr, name, source_type, lang, source=None):
    if source and name:
        cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s and src=%s', (lang, source_type, str(name), source))
    elif name:
        cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s', (lang, source_type, str(name)))
    elif source:
        cr.execute('select value from ir_translation where lang=%s and type=%s and src=%s', (lang, source_type, source))
    res_trans = cr.fetchone()
    res = res_trans and res_trans[0] or False
    return res
Example #43
0
import csv
import uuid
import datetime

csv.register_dialect('myDialect', delimiter=',', quoting=csv.QUOTE_NONE)
csv.register_dialect('myDialectOut', delimiter=',', quoting=csv.QUOTE_ALL, lineterminator='\n')

def save_csv(fnameIn, inOrEx, fnameOut, inNum):
   with open(fnameIn, newline='', encoding='utf-8') as myFileIn:
        reader = csv.reader(myFileIn, dialect='myDialect')
    #    for row in reader:
    #        print(row)
        today = date.today()
        myFile = open(fnameOut, 'a', encoding='utf-8')
        with myFile:
            myFile.write("\"Windows Excel","AndroMoney",{}\n
"Id","Currency","Amount","Category","Sub-Category","Date","Expense(Transfer Out)","Income(Transfer In)","Note","Periodic","Project","Payee/Payer","uid","Time\"" % today.strftime("%Y%m%d"))
            writer = csv.writer(myFile, dialect='myDialectOut')
            i = inNum
            for row in reader:
                finalrow = []
                i +=1
                finalrow.append(f'{i}')
                finalrow.append('EUR')
                finalrow.append(row[1])
                if inOrEx == 'Income':
                    finalrow.append(inOrEx)
                    finalrow.append(row[2])
                    finalrow.append((datetime.datetime.strptime(row[0], '%m-%d-%Y')).strftime("%Y%m%d"))
                    finalrow.append('')
                    finalrow.append('Bank')
Example #44
0
# motifAnalysis.py
# 12/30/14
# Motif_analysis.ipynb, in script form

import sys, os, re, cmath, math, glob, subprocess, csv, matplotlib, argparse
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd

matplotlib.rcParams['savefig.dpi'] = 2 * matplotlib.rcParams['savefig.dpi']
csv.register_dialect("textdialect", delimiter='\t')

# *** INPUT : Sample to process. Please fill these in
# ---------------------------------------------------

### Parsing arguments ###

parser = argparse.ArgumentParser(
    description="motifAnalysis.py: use this to look for motifs in CLIP data",
    epilog=
    "Example: python motifAnalysis.py -n ddx3wt --hg19 -l 12,14 -f 15 -m 25")
group = parser.add_mutually_exclusive_group()
parser.add_argument('-n',
                    metavar='SAMPLENAME',
                    help="Sample name; name of folder inside results/",
                    required=True)
parser.add_argument('-p',
                    metavar='PREFIX',
                    help="Prefix for folders where motifs will be stored",
                    default="homer")
Example #45
0
"""

import csv
import six

from translate.misc import csv_utils
from translate.storage import base


class DefaultDialect(csv.excel):
    skipinitialspace = True
    quoting = csv.QUOTE_NONNUMERIC
    escapechar = '\\'


csv.register_dialect('default', DefaultDialect)


def from_unicode(text, encoding='utf-8'):
    if encoding == 'auto':
        encoding = 'utf-8'
    if isinstance(text, six.text_type):
        return text.encode(encoding)
    return text


def to_unicode(text, encoding='utf-8'):
    if encoding == 'auto':
        encoding = 'utf-8'
    if isinstance(text, six.text_type):
        return text
ans = crsr.fetchall()

# Day Bar Chart
weekday = [['Country', 'Value'], ['Monday', 0], ['Tuesday', 0],
           ['Wednesday', 0], ['Thursday', 0], ['Friday', 0], ['Saturday', 0],
           ['Sunday', 0]]

x = 0
for i in ans:
    current_row = ans[x]
    current_weekday_int = current_row[3]
    weekday[int(current_weekday_int) + 1][1] += 1
    x += 1

csv.register_dialect('myDialect', )
with open('C:/Users/Rory/Desktop/bar_chart_weekday_data.csv', 'wb') as f:
    writer = csv.writer(f, dialect='myDialect')
    for row in weekday:
        writer.writerow(row)

f.close()

# Hour Bar Chart

hour = [['Country', 'Value'], ['00:00', 0], ['01:00', 0], ['02:00', 0],
        ['03:00', 0], ['04:00', 0], ['05:00', 0], ['06:00', 0], ['07:00', 0],
        ['08:00', 0], ['09:00', 0], ['10:00', 0], ['11:00', 0], ['12:00', 0],
        ['13:00', 0], ['14:00', 0], ['15:00', 0], ['16:00', 0], ['17:00', 0],
        ['18:00', 0], ['19:00', 0], ['20:00', 0], ['21:00', 0], ['22:00', 0],
        ['23:00', 0]]
Example #47
0
def main():

    csv.register_dialect('myDialect', delimiter=',', skipinitialspace=True)

    with open(
            '/Users/samee/Documents/Shot Danger Project/pure_shot_data/per20teamspureshots.csv',
            'r') as csvFile:
        reader = csv.reader(csvFile, dialect='myDialect')
        for row in reader:
            if not row:
                e = 1
            else:
                if row[0] != 'team':
                    f = open(
                        '/Users/samee/Documents/Shot Danger Project/pure_shot_data/teamshtml/'
                        + row[0] + '.html', 'w')

                    SS = '<td>' + str(round(float(row[1]), 3)) + '</td>'
                    S = '<td>' + str(round(float(row[2]), 3)) + '</td>'
                    A = '<td>' + str(round(float(row[3]), 3)) + '</td>'
                    B = '<td>' + str(round(float(row[4]), 3)) + '</td>'
                    C = '<td>' + str(round(float(row[5]), 3)) + '</td>'
                    D = '<td>' + str(round(float(row[6]), 3)) + '</td>'
                    E = '<td>' + str(round(float(row[7]), 3)) + '</td>'
                    F = '<td>' + str(round(float(row[8]), 3)) + '</td>'

                    opening = """
                    <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
                    "http://www.w3.org/TR/html4/strict.dtd">
                    <html>
                    <head>
                    <title>""" + row[0] + """</title>
                    <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" />

                    <link rel="stylesheet" href="https://drvic10k.github.io/bootstrap-sortable/Contents/bootstrap-sortable.css" />
                  
                    <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.2.1/jquery.js"></script>
                  
                    <script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/js/bootstrap.min.js"></script>
                  
                    <script src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.19.1/moment.js"></script>
                  
                    <script src="https://drvic10k.github.io/bootstrap-sortable/Scripts/bootstrap-sortable.js"></script>
                  
                    <link href="//netdna.bootstrapcdn.com/font-awesome/4.0.3/css/font-awesome.css" rel="stylesheet">
                    </head>
                    <body>
                    <h1>""" + row[0] + """</h1>
                    </br>
                    <table>
                    <tr><td>S+ shots/20</td><td>S shots/20</td><td>A shots/20</td><td>B shots/20</td><td>C shots/20</td><td>D shots/20</td><td>E shots/20</td><td>F shots/20</td><td></tr>
                    <tr>""" + SS + S + A + B + C + D + E + F + """</tr>
                    </table>
                    </br>
                    """

                    closing = """
                    </body>
                    </html>
                    """

                    players = getplayers(row)

                    html = opening + players + closing

                    f.write(html)
                    f.close()

    csvFile.close()
Example #48
0
        csvreader = csv.reader(csvfile)
        next(csvreader)

        # extracting each data row one by one and append it
        for line in csvreader:
            # print(line)
            full_data_rows_list.append(line)

        # uncomment the code below if you would like to get total number of rows
# print(len(full_data_rows_list))
# uncomment the code below if you would like to check to see what the list of event data rows will look like
# print(full_data_rows_list)

# creating a smaller event data csv file called event_datafile_full csv that will be used to insert data into the \
# Apache Cassandra tables
csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL, skipinitialspace=True)

with open('event_datafile_new.csv', 'w', encoding='utf8', newline='') as f:
    writer = csv.writer(f, dialect='myDialect')
    writer.writerow([
        'artist', 'firstName', 'gender', 'itemInSession', 'lastName', 'length',
        'level', 'location', 'sessionId', 'song', 'userId'
    ])
    for row in full_data_rows_list:
        if row[0] == '':
            continue
        writer.writerow((row[0], row[2], row[3], row[4], row[5], row[6],
                         row[7], row[8], row[12], row[13], row[16]))

# check the number of rows in your csv file
with open('event_datafile_new.csv', 'r', encoding='utf8') as f:
 def create_csv(self,result_sheet):
     if (os.path.isfile(os.getcwd()+result_sheet)):
         os.remove(os.getcwd()+result_sheet)
     csv.register_dialect('csv',lineterminator = '\n',skipinitialspace=True,escapechar='')
     output_file=open(os.getcwd()+result_sheet,"w")
     return output_file
Example #50
0
def getplayers(teamdata):

    csv.register_dialect('myDialect', delimiter=',', skipinitialspace=True)

    players = []
    with open(
            '/Users/samee/Documents/Shot Danger Project/pure_shot_data/per20playerspureshotsteams.csv',
            'r') as csvFile:
        reader = csv.reader(csvFile, dialect='myDialect')
        for row in reader:
            if not row:
                e = 1
            else:
                for item in row:
                    if teamdata[0] in item:
                        players.append(row)

    tablestart = """<table class="table table-bordered sortable">
                        <thead>
                            <tr><th>Player Name</th><th>Position</th><th>S+ shots/20 REL</th><th>S shots/20 REL</th><th>A shots/20 REL</th><th>B shots/20 REL</th><th>C shots/20 REL</th><th>D shots/20 REL</th><th>E shots/20 REL</th><th>F shots/20 REL</th></tr>
                        </thead>
                        <tbody id="myTable">
    """
    tableforwards = ''
    tabledefense = ''
    tableend = """
                        </tbody></table>"""

    for row in players:
        name = getplayername(row[0])
        SSr = '<td>' + str(round(
            (float(row[1]) - float(teamdata[1])), 3)) + '</td>'
        Sr = '<td>' + str(round(
            (float(row[2]) - float(teamdata[2])), 3)) + '</td>'
        Ar = '<td>' + str(round(
            (float(row[3]) - float(teamdata[3])), 3)) + '</td>'
        Br = '<td>' + str(round(
            (float(row[4]) - float(teamdata[4])), 3)) + '</td>'
        Cr = '<td>' + str(round(
            (float(row[5]) - float(teamdata[5])), 3)) + '</td>'
        Dr = '<td>' + str(round(
            (float(row[6]) - float(teamdata[6])), 3)) + '</td>'
        Er = '<td>' + str(round(
            (float(row[7]) - float(teamdata[7])), 3)) + '</td>'
        Fr = '<td>' + str(round(
            (float(row[8]) - float(teamdata[8])), 3)) + '</td>'

        if name[2] == 'D':
            tabledefense += """ 
                                <tr>""" + (
                '<td>' + name[0] + ' ' + name[1] + '</td>'
            ) + '<td>' + name[
                2] + '</td>' + SSr + Sr + Ar + Br + Cr + Dr + Er + Fr + """</tr>"""
        else:
            tableforwards += """ 
                                <tr>""" + (
                '<td>' + name[0] + ' ' + name[1] + '</td>'
            ) + '<td>' + name[
                2] + '</td>' + SSr + Sr + Ar + Br + Cr + Dr + Er + Fr + """</tr>"""

    fulltable = """
    </br>
    <h2>Defensemen</h2>
    """ + tablestart + tabledefense + tableend + """
    </br>
    <h2>Forwards</h2>
    """ + tablestart + tableforwards + tableend

    csvFile.close()

    return fulltable
Example #51
0
        mkdir_p(get_generated_path('tmp'))
        return get_generated_path('tmp', arg)


def get_dated_tmp_path(prefix):
    assert '/' not in prefix, prefix
    time_str = datetime.datetime.isoformat(datetime.datetime.now()).replace(
        ':', '-')
    return get_tmp_path(prefix + '-' + time_str)


csv.register_dialect(
    'pheweb-internal-dialect',
    delimiter='\t',
    doublequote=False,
    escapechar='\\',
    lineterminator='\n',
    quotechar='"',
    skipinitialspace=False,
    strict=True,
)

## Readers


@contextmanager
def VariantFileReader(filepath, only_per_variant_fields=False):
    '''
    Reads variants (as dictionaries) from an internal file.  Iterable.  Exposes `.fields`.

        with VariantFileReader('a.tsv') as reader:
            print(reader.fields)
import sys
import csv  # For CSV file reading, parsing and writing
import re  # For regular expression

if __name__ == '__main__':
    verbose = 0

    # This script needs one argument: the input file to parse
    if len(sys.argv) < 2:
        print("Please, specify the filename you want to parse")
        print("Example: python %s crelan-export.csv" % sys.argv[0])
        sys.exit()

    print("Opening file %s..." % sys.argv[1])
    csv.register_dialect('myDialect', delimiter=';', quoting=csv.QUOTE_MINIMAL)

    with open(sys.argv[1], 'rb') as fin:
        with open('out.csv', 'wb') as fout:
            reader = csv.reader(fin, dialect='myDialect')
            writer = csv.writer(fout, dialect='myDialect')

            # Create the header of the output file
            header = [
                'Date de valeur', 'Date operation', 'Montant', 'Contrepartie',
                "Type d'operation", 'Communication'
            ]
            writer.writerow(header)

            # Display it on the screen
            print(
                "Date de valeur;Date de l'operation;Montant;Contrepartie;Type d'operation;Communication"
Example #53
0
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.
''' 2019/01/08 flip (reorder) columns in csv with header
nested commas tolerated! '''
import os
import sys
import csv
from misc import *
import time

t = time.time()
print t
csv.register_dialect('my',
                     delimiter=",",
                     quoting=csv.QUOTE_ALL,
                     skipinitialspace=True)

infile = sys.argv[1]
f = open(infile)
before = f.readline().strip().split(",")
for i in range(0, len(before)):
    before[i] = before[i].strip()
f.close()

# infile = "Merged_SDPR_for_clustering.csv"
#before = ['"FILEID"','"YM"','"STUDYID"','"PROGRAM"','"AGE"','"FAMTYPE"','"DPNO"','"PAY"','"POSTCD_3D"']
#after = ['"FILEID"','"YM"','"PROGRAM"','"AGE"','"FAMTYPE"','"DPNO"','"PAY"','"POSTCD_3D"','"STUDYID"']

after = sys.argv[2:]
for i in range(0, len(after)):
Example #54
0
import csv
from pathlib import Path

from loguru import logger
from polypyus.annotation_parser import CSV_KEYS_LONG
from polypyus.models import Binary
from polypyus.tools import serialize

csv.register_dialect("space_delimiter",
                     delimiter=" ",
                     quoting=csv.QUOTE_MINIMAL)


def export_matches_csv(binary: Binary, path: Path):
    logger.info(f"exporting matches for {binary.name} csv to {path}")
    stream = serialize(binary.matches, export=True)
    with open(path, "w") as csv_file:
        writer = csv.DictWriter(csv_file,
                                fieldnames=CSV_KEYS_LONG,
                                dialect="space_delimiter")
        writer.writeheader()

        for match in stream:
            match["addr"] = hex(match["addr"])
            match["name"] = match["name"].split(", ")[0]
            writer.writerow({
                key: value
                for key, value in {
                    **match, "type": "FUNC"
                }.items() if key in CSV_KEYS_LONG
            })
Example #55
0
#!/usr/bin/python

import glob, gzip, csv, sys, os, copy
csv.register_dialect('tab', delimiter='\t', quoting=csv.QUOTE_NONE)

dir = None
reg = {'chr': None, 'beg': 0, 'end': (1 << 32) - 1}
min_length = 0
min_markers = 0
min_qual = 0
interactive = False
if len(sys.argv) < 2:
    print 'Usage: plot.py [OPTIONS] <dir>'
    print 'Options:'
    print '   -i, --interactive                 Run interactively'
    print '   -l, --min-length <num>            Filter input regions shorter than this [0]'
    print '   -n, --min-markers <num>           Filter input regions with fewer marker than this [0]'
    print '   -q, --min-qual <num>              Filter input regions with quality smaller than this [0]'
    print '   -r, --region <chr|chr:beg-end>    Plot this chromosome/region only'
    sys.exit(1)
args = sys.argv[1:]
while len(args):
    if args[0] == '-r' or args[0] == '--region':
        args = args[1:]
        x = args[0].split(':')
        reg['chr'] = x[0]
        if len(x) > 1:
            (reg['beg'], reg['end']) = x[1].split('-')
            reg['beg'] = float(reg['beg'])
            reg['end'] = float(reg['end'])
    elif args[0] == '-i' or args[0] == '--interactive':
Example #56
0
 def test_register_kwargs(self):
     name = 'fedcba'
     csv.register_dialect(name, delimiter=';')
     self.addCleanup(csv.unregister_dialect, name)
     self.assertEqual(csv.get_dialect(name).delimiter, ';')
     self.assertEqual([['X', 'Y', 'Z']], list(csv.reader(['X;Y;Z'], name)))
Example #57
0
                "--columns",
                required=True,
                nargs="*",
                help="Provide a list of column indexes you wish to keep")
ap.add_argument("-ctr",
                "--country",
                required=True,
                help="Provide a countryname you wish to add")

args = vars(ap.parse_args())

csv.register_dialect(
    'mydialect',
    delimiter=',',
    quotechar='"',
    doublequote=True,
    skipinitialspace=True,
    lineterminator='\r\n',
    #quoting = csv.QUOTE_ALL
    quoting=csv.QUOTE_NONNUMERIC)

csvfile = args["input"]
outputfile = args["output"]
columns = args["columns"]
country = args["country"]

with open(csvfile, 'rb') as fin, open(outputfile, 'wb') as fout:
    reader = csv.reader(fin, delimiter=",", lineterminator='\n')
    #writer = csv.writer(fout, delimiter=",", quotechar='"', lineterminator='\n')
    #writer = csv.writer(fout, delimiter=",", lineterminator='\n')
    writer = csv.writer(fout, dialect="mydialect")
Example #58
0
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy import API
import json
import csv

#Setting the right variables and conecting to the API
fieldnames = ['UserLoation','UserCountry', 'UserName', 'text']
csv.register_dialect('dialect', delimiter=';', quoting=csv.QUOTE_NONE)
auth = OAuthHandler("t3l8llVayMUCRLwh217fUY1p3", "JwqWePyGZuUJzXaGId7jv2iPe3EmLhkFoIbWBDGTf3W0I89lef")
auth.set_access_token("849205479331033088-WdPOGAMZ7dPLuP7WnpejEN62wEsVzQn", "e3FXvPQeWlmmBQ5ylxTzuCZS6WdpVNLKeG1MYsLwMKpMa")
#Writing the header in the csv file
with open('data.csv', 'w',newline='\n') as csvfile:
	writer = csv.DictWriter(csvfile, fieldnames=fieldnames, dialect='dialect')
	writer.writeheader()
#Setting up the listener and the stream
listener = CSVListener()
stream = Stream(auth, listener)
stream.filter(track=['@realDonaldTrump'])


class CSVListener(StreamListener):
	def on_data(self, data):
		global fieldnames
		# decode from json
		decoded = json.loads(data)
		exportData={}
		# testing if the tweet has an an "place" and an "user" 
		if("user" in decoded and "place" in decoded):
			#testing if we cen use the values,meaning they are not null
Example #59
0
    parsed_source = html.fromstring(html_source, 'https://therapists.psychologytoday.com/')
    parsed_source.make_links_absolute()
    
    state_code = "".join(re.findall(r'Therapists in .*? \((.*?)\)', "".join(parsed_source.xpath("//div[@id='results-right']/h1/text()")).strip()))
    if len(state_code) == 0:
        city_codes = extract_more_city_codes(url)
        city_codes = [x.replace(' ', '+') for x in city_codes]
        return city_codes
    else:
        return [state_code]
    
if __name__ == '__main__':
    
    csv_main_file_name = 'data/main_table.txt'
    fileObject = open(csv_main_file_name,'ab')
    csv.register_dialect('MyDialect', delimiter='\t',doublequote=False,quotechar='',lineterminator='\n',escapechar=' ',quoting=csv.QUOTE_NONE)
    mywriter = csv.writer(fileObject,'MyDialect')
    
    """
    mywriter.writerow(['profid', 'profurl', 'name', 'female', 'photo', 'btn_sendfriend', 'btn_emailme', 'btn_emailus', 'btn_videocall', 'btn_perweb', 'perweburl',
                               'phone', 'state', 'zipcode', 'freeconsult', 'occupation', 'specialties', 'issues1', 'menthealth', 'issues2', 'cf_relig', 'cf_ethnic', 'cf_language',
                               'cf_age', 'sexuality', 'cf_categ', 'trt_orient', 'trt_modal', 'fin_mincost', 'fin_maxcost', 'fin_avgcost', 'fin_slidescale', 'fin_paymethod',
                               'fin_insur', 'qual_yrpractice', 'qual_school', 'qual_yrgrad', 'qual_licensenum', 'qual_licensestate', 'datemod', 'groups', 'connections', 'verified'])
    """
    txt_personal_statements_file_name = 'data/personalStatements.txt'
    personal_statements_data_writer = open(txt_personal_statements_file_name, 'ab')
    
    
    gender_list = ['1', '2']
    start_url = 'https://therapists.psychologytoday.com/rms/prof_search.php'
    states_urls = extract_states_urls(start_url)
Example #60
0
import sys

import csv
import pickle


class excel_french(csv.Dialect):
    delimiter = ';'
    quotechar = '"'
    doublequote = True
    skipinitialspace = False
    lineterminator = '\n'
    quoting = csv.QUOTE_MINIMAL


csv.register_dialect('excel_french', excel_french)


def object_to_csv(obj_file, csv_file):
    """Generate CSV file."""
    dic_obj = open(obj_file, "r")
    if options.verbose:
        print "Loading dictionary..."
    dic_ip = pickle.load(dic_obj)

    c = csv.writer(open(csv_file, "wb"), 'excel_french')

    if options.verbose:
        print "Writting CSV file..."
    for ip_src in dic_ip:
        for ip_dst in dic_ip[ip_src]: