self.editors[editor][version] += 1 else: self.editors[editor][version] = 1 else: self.editors[editor] = {} r = Results() editors = open(infile,'r') lc = 0 lines = int(os.popen('wc -l %s' % (infile)).read().split(' ')[0]) pb = ProgressBar(lines) for line in editors: lc += 1 if not '-q' in sys.argv and not lc % 10000: pb.update_time(lines - (lines - lc)) print "{0}\r".format(pb), # if not lc % 10000 : continue # if lc == 10000: break if 'JOSM' in line: ro = re.search('\d{4}', line) if ro: version = ro.group(0) else: version = None r.add('JOSM',version) continue for k in common_editors: if line.find(k) == 0: r.add(line[:len(k)],line[len(k):].strip()) break
args = parser.parse_args() conn = sqlite3.connect(args.db) c = conn.cursor() c.execute(TUNE_COUNT_SQL) tune_count = c.fetchone()[0] progress = ProgressBar(tune_count) progress.width = 80 with file(args.output, 'wb') as output: writer = csv.writer(output) for i, (tuneId, abc) in enumerate(c.execute(TUNE_SQL)): progress.update_time(i) print progress, chr(27) + '[A' try: score = converter.parseData(abc, format='abc') row = [tuneId] for note in score.flat.notesAndRests: duration = note.quarterLength ps = -1 if note.isNote: ps = note.ps row.append('|'.join(map(str, (duration, ps)))) except:
score = converter.parseData(tune['raw_abc'], format='abc') row = {'id': tune['id']} for j, feature in enumerate(feature_list): try: fe = feature(score) f = fe.extract() row.update(izip(fe.getAttributeLabels(), f.vector)) except: errors += 1 row = None break progressbar.update_time((i * feature_count) + j + 1) print progressbar, '{: >3d} / {} features'.format( j + 1, feature_count), '{: >5,d} / {: <5,d} tunes'.format( i, tune_count), 'Errors: {:,d}'.format( errors), chr(27) + '[A' if row: writer.writerow(row) except: errors += 1 print progressbar, '{: >3d} / {} features'.format( j + 1, feature_count), '{: >5,d} / {: <5,d} tunes'.format( i, tune_count), 'Errors: {:,d}'.format(errors)
print('Extracting {} features from {} tunes'.format(len(feature_list), tunes.count())) for i, tune in enumerate(tunes.values('id', 'title', 'raw_abc')): try: score = converter.parseData(tune['raw_abc'], format='abc') row = {'id': tune['id']} for j, feature in enumerate(feature_list): try: fe = feature(score) f = fe.extract() row.update(izip(fe.getAttributeLabels(), f.vector)) except: errors += 1 row = None break progressbar.update_time((i * feature_count) + j + 1) print progressbar, '{: >3d} / {} features'.format(j + 1, feature_count), '{: >5,d} / {: <5,d} tunes'.format(i, tune_count), 'Errors: {:,d}'.format(errors), chr(27) + '[A' if row: writer.writerow(row) except: errors += 1 print progressbar, '{: >3d} / {} features'.format(j + 1, feature_count), '{: >5,d} / {: <5,d} tunes'.format(i, tune_count), 'Errors: {:,d}'.format(errors)
arcpy.mapping.UpdateLayer(_df, addLayer, symLayer, True) if _targetGroupLayer.isGroupLayer: arcpy.mapping.AddLayerToGroup(_df, _targetGroupLayer, addLayer, "BOTTOM") del addLayer, symbolLayer #loop to add all the layers #Energy Sector targetGroupLayer = arcpy.mapping.ListLayers(mxd, "1 Energy sector", df)[0] print '' print datetime.datetime.now(), ' - Adding layers' #prgbc += 1 prgb.update_time(prgbc) print prgb add_layer(inpPth, symPth + "\EPER_symbol0.lyr", "'1.(a)'", "1.(a) Mineral oil and gas refineries", targetGroupLayer, df) add_layer(inpPth, symPth + "\EPER_symbol1.lyr", "'1.(b)'", "1.(b) Gasification and liquefaction", targetGroupLayer, df) add_layer(inpPth, symPth + "\EPER_symbol2.lyr", "'1.(c)'", "1.(c) Thermal power stations and other combustion installations", targetGroupLayer, df) add_layer(inpPth, symPth + "\EPER_symbol3.lyr", "'1.(d)'", "1.(d) Coke ovens", targetGroupLayer, df) add_layer(inpPth, symPth + "\EPER_symbol4.lyr", "'1.(e)'", "1.(e) Coal rolling mills", targetGroupLayer, df) add_layer(inpPth, symPth + "\EPER_symbol5.lyr", "'1.(f)'", "1.(f) Manufacture of coal products and solid smokeless fuel",
parser = argparse.ArgumentParser(description='Convert a ESRI Shapefile (POINT only) to .OSM') parser.add_argument('INFILE', help='The path to the input ESRI shapefile, will append .shp if omitted') parser.add_argument('OUTFILE', type=argparse.FileType('w'), default='out.osm', help='The path to the output OSM XML file') parser.add_argument('--quiet', action='store_true', default=False) args = parser.parse_args() osm_id = 0 dt = datetime.now() sf = shapefile.Reader(args.INFILE) f = sf.fields l = len(sf.shapes()) if not args.quiet: p = ProgressBar(l) w = XMLWriter(args.OUTFILE) w.start("osm", {"generator": "shape2osm " + str(__version__), "version": API_VERSION, "upload": "false"}) for shape in sf.shapeRecords(): osm_id -= 1 (x,y) = shape.shape.points[0] w.start("node", {"id": str(osm_id), "timestamp": dt.isoformat(), "version": "1", "visible": "true", "lon": str(x), "lat": str(y)}) for i in range(1,len(f)): w.element("tag", "", {"k": str(f[i][0]), "v": str(shape.record[i-1])}) w.end() if not args.quiet: p.update_time(l - (l + osm_id)) print "{0}\r".format(p), w.end() if not args.quiet: print "\nfinished."