Beispiel #1
0
def make_output_csvs(dir, metadata):
    metadataframe = metadata

    # taking meta data file to list
    if isinstance(metadata, pd.DataFrame):
        metadata = bl.df2list(metadata)

    # getting header value
    header = metadata[0]
    count = 0
    for row in metadata[1:]:
        count += 1
        # getting the extrema values in each row
        extrema = get_extrema_metadata(header, row)

        # getting filename
        filename = dir + '/' + row[0] + '.tif'

        # from the arguments compiles makes the csv output
        try:
            make_image_output(filename,
                              metadataframe,
                              extrema,
                              outputcsvfile=dir + '/' + row[0] + '.csv')
        except IOError:
            print 'out'
        print 'Task:%s,Completed:%s' % (count, filename)
def generate_corners_metadata(filename,csvmetadata):
	# getting only the filename column poriton of the metadata file
	filename = str.split(filename,'.')[0]
	if '/' in str(filename):
		filename = str.split(filename,'/')[-1]
	
	# getting only the row associated with filename
	metarow = csvmetadata[csvmetadata.FILENAME == filename]

	# taking the data frame row to list
	metarow = metarow.values.tolist()[0]

	# getting the metadata header
	header = bl.df2list(csvmetadata)[0]

	# iterating through both the header and row to collect
	# both lat and longs
	lats = []
	longs = []
	for headval,rowval in itertools.izip(header,metarow):
		if 'lat' in str(headval).lower():
			lats.append(rowval)
		if 'long' in str(headval).lower():
			longs.append(rowval)

	# iterating through lat/longs and putting into list
	corners = [['LONG','LAT']]
	for long,lat in itertools.izip(longs,lats):
		corners.append([long,lat])

	return corners
Beispiel #3
0
def generate_corners_metadata(filename, csvmetadata):
    # getting only the filename column poriton of the metadata file
    filename = str.split(filename, '.')[0]
    if '/' in str(filename):
        filename = str.split(filename, '/')[-1]

    # getting only the row associated with filename
    metarow = csvmetadata[csvmetadata.FILENAME == filename]

    # taking the data frame row to list
    metarow = metarow.values.tolist()[0]

    # getting the metadata header
    header = bl.df2list(csvmetadata)[0]

    # iterating through both the header and row to collect
    # both lat and longs
    lats = []
    longs = []
    for headval, rowval in itertools.izip(header, metarow):
        if 'lat' in str(headval).lower():
            lats.append(rowval)
        if 'long' in str(headval).lower():
            longs.append(rowval)

    # iterating through lat/longs and putting into list
    corners = [['LONG', 'LAT']]
    for long, lat in itertools.izip(longs, lats):
        corners.append([long, lat])

    return corners
def make_output_csvs(dir,metadata):
    metadataframe = metadata

    # taking meta data file to list
    if isinstance(metadata,pd.DataFrame):
        metadata = bl.df2list(metadata)

    # getting header value
    header = metadata[0]
    count = 0
    for row in metadata[1:]:
    	count += 1
        # getting the extrema values in each row
        extrema = get_extrema_metadata(header,row)

        # getting filename
        filename = dir + '/' + row[0] + '.tif'

        # from the arguments compiles makes the csv output 
        try:
            make_image_output(filename,metadataframe,extrema,outputcsvfile=dir+'/'+row[0]+'.csv')
        except IOError:
            print 'out'
       	print 'Task:%s,Completed:%s' % (count,filename)
Beispiel #5
0
count=0
ind=0
while not count==8:
	count+=1
	# making table associated with a specific presicion
	newtable=bl.map_table(data,count,list=True)

	

	#taking output to table and reading back into memory
	newtable.to_csv('table'+str(count)+'.csv')
	newtable=pd.read_csv('table'+str(count)+'.csv')
	
	# taking new table from a dataframe to a list
	newtable=bl.df2list(newtable)
	
	# reading table to test into memory 
	testtable=pd.read_csv('table_datum/table'+str(count)+'.csv')

	# taking test table to list
	testtable=bl.df2list(testtable)

	# testing every row in tables for 
	for a,b in itertools.izip(newtable,testtable):
		if not a==b:
			ind=1
			print 'Row New: %s, Row Old: %s' % (a,b)

	if ind==0:
		print 'Table size: %s Test Passed' % count
def make_squares_table(table):
	checkrow = bl.df2list(table)[1]
	if not checkrow[-5] == 1 or checkrow[-5] == 0:
		startx = checkrow[0]
		starty = checkrow[1]

		# getting the first and second position point
		point1 = table[(table.X == startx)&(table.Y == starty)]
		point2 = table[(table.X == (startx + 1))&(table.Y == (starty + 1))]


	else:
		# getting the first and second position point
		point1 = table[(table.X == 1)&(table.Y == 1)]
		point2 = table[(table.X == 2)&(table.Y == 2)]

	# taking table to list
	table = bl.df2list(table)

	# getting header
	header = table[0]

	# getting first row 
	firstrow = bl.df2list(point1)[1]

	# getting second row 
	secondrow = bl.df2list(point2)[1]

	# getting point1 and point2
	point1 = getlatlong(header,firstrow)
	point2 = getlatlong(header,secondrow)

	# settin up newlist with header header
	newlist = [['GEOHASH','LAT1','LONG1','LAT2','LONG2','LAT3','LONG3','LAT4','LONG4','X','Y','RED','GREEN','BLUE','COLORKEY']]

	# getting distance
	distance = (((point1[0] - point2[0]) ** 2) + ((point1[1] - point2[1]) ** 2)) ** .5

	distance_vert = abs(point1[1] - point2[1]) / 2.0
	distance_horz  = abs(point1[0] - point2[0]) / 2.0

	# iterating through each point to make squares
	for row in table[1:]:
		# getting point info to be added to square
		pointinfo = row[-5:]

		# getting point for each row
		point = getlatlong(header,row)


		# adding distance to get each corner point
		ul_point = [point[0] - distance_horz,point[1] + distance_vert]
		ur_point = [point[0] + distance_horz,point[1] + distance_vert]
		bl_point = [point[0] - distance_horz,point[1] - distance_vert]
		br_point = [point[0] + distance_horz,point[1] - distance_vert]

		# making newrow
		newrow = [pointinfo[0]] + [bl_point[1],bl_point[0]] + [br_point[1],br_point[0]] + [ul_point[1],ul_point[0]] + [ur_point[1],ur_point[0]] + [row[0],row[1]] +pointinfo[1:]

		newlist.append(newrow)

	# taking newlist to dataframe again
	newlist = bl.list2df(newlist)

	return newlist
Beispiel #7
0
# reading csv file to pandas
data=pd.read_csv('sharks.csv')

# mapping table to precision 4 geohashs
data=bl.map_table(data,3,list=True) # this creates a csv file with a density block table
squares=pd.read_csv('squares3.csv')
squares=squares[:10] # getting the top 10 densest geohash squares

# getting each unique geohash
geohashs=np.unique(squares['GEOHASH']).tolist()

# constructing list of all top shark attack incidents
total=[data.columns.values.tolist()]
for row in geohashs:
	temp=data[data.GEOHASH==str(row)] # getting all the geohashs within the entire table
	temp=bl.df2list(temp) # taking dataframe to list
	total+=temp[1:] #ignoring the header on each temporary list


# taking list back to dataframe
total=bl.list2df(total)

# getting the unique activities that occured
uniques=np.unique(total['Activity']).tolist()

# we now have a list with each top geohash in a aggregated table
# we can now style each color icon based on each activity being performed during the attack
count=0
filenames=[]
for unique,color in itertools.izip(uniques,colors):
	count+=1
Beispiel #8
0
key='pk.eyJ1IjoibXVycGh5MjE0IiwiYSI6ImNpam5kb3puZzAwZ2l0aG01ZW1uMTRjbnoifQ.5Znb4MArp7v3Wwrn6WFE6A'

# reading into memory
points=pd.read_csv('points_example.csv')
line=pd.read_csv('line_example.csv')

# geohashing each table
points=bl.map_table(points,7,list=True)
line=bl.map_table(line,7,list=True)

# getting unique geohashs 
uniquepoints=np.unique(points['GEOHASH']).tolist()
uniqueline=np.unique(line['GEOHASH']).tolist()

newpoints=[points.columns.values.tolist()]
# we know if a unique point is in any unique line its on the route
for row in uniquepoints:
	oldrow=row
	for row in uniqueline:
		if row==oldrow:
			temp=points[points.GEOHASH==oldrow]
			temp=bl.df2list(temp)
			newpoints+=temp[1:] # getting all the points within this geohashs

# making the new points, line, and blocks along line 
bl.make_points(newpoints,list=True,filename='points.geojson')
bl.make_blocks('squares7.csv',filename='blocks_on_line.geojson')
bl.make_line(line,list=True,filename='line.geojson')


bl.loadparsehtml(bl.collect(),key)
Beispiel #9
0
def make_squares_table(table):
    checkrow = bl.df2list(table)[1]
    if not checkrow[-5] == 1 or checkrow[-5] == 0:
        startx = checkrow[0]
        starty = checkrow[1]

        # getting the first and second position point
        point1 = table[(table.X == startx) & (table.Y == starty)]
        point2 = table[(table.X == (startx + 1)) & (table.Y == (starty + 1))]

    else:
        # getting the first and second position point
        point1 = table[(table.X == 1) & (table.Y == 1)]
        point2 = table[(table.X == 2) & (table.Y == 2)]

    # taking table to list
    table = bl.df2list(table)

    # getting header
    header = table[0]

    # getting first row
    firstrow = bl.df2list(point1)[1]

    # getting second row
    secondrow = bl.df2list(point2)[1]

    # getting point1 and point2
    point1 = getlatlong(header, firstrow)
    point2 = getlatlong(header, secondrow)

    # settin up newlist with header header
    newlist = [[
        'GEOHASH', 'LAT1', 'LONG1', 'LAT2', 'LONG2', 'LAT3', 'LONG3', 'LAT4',
        'LONG4', 'X', 'Y', 'RED', 'GREEN', 'BLUE', 'COLORKEY'
    ]]

    # getting distance
    distance = (((point1[0] - point2[0])**2) +
                ((point1[1] - point2[1])**2))**.5

    distance_vert = abs(point1[1] - point2[1]) / 2.0
    distance_horz = abs(point1[0] - point2[0]) / 2.0

    # iterating through each point to make squares
    for row in table[1:]:
        # getting point info to be added to square
        pointinfo = row[-5:]

        # getting point for each row
        point = getlatlong(header, row)

        # adding distance to get each corner point
        ul_point = [point[0] - distance_horz, point[1] + distance_vert]
        ur_point = [point[0] + distance_horz, point[1] + distance_vert]
        bl_point = [point[0] - distance_horz, point[1] - distance_vert]
        br_point = [point[0] + distance_horz, point[1] - distance_vert]

        # making newrow
        newrow = [pointinfo[0]] + [bl_point[1], bl_point[0]] + [
            br_point[1], br_point[0]
        ] + [ul_point[1], ul_point[0]] + [ur_point[1], ur_point[0]
                                          ] + [row[0], row[1]] + pointinfo[1:]

        newlist.append(newrow)

    # taking newlist to dataframe again
    newlist = bl.list2df(newlist)

    return newlist
data = pd.read_csv('points_example.csv')

count = 0
ind = 0
while not count == 8:
    count += 1
    # making table associated with a specific presicion
    newtable = bl.map_table(data, count, list=True)

    #taking output to table and reading back into memory
    newtable.to_csv('table' + str(count) + '.csv')
    newtable = pd.read_csv('table' + str(count) + '.csv')

    # taking new table from a dataframe to a list
    newtable = bl.df2list(newtable)

    # reading table to test into memory
    testtable = pd.read_csv('table_datum/table' + str(count) + '.csv')

    # taking test table to list
    testtable = bl.df2list(testtable)

    # testing every row in tables for
    for a, b in itertools.izip(newtable, testtable):
        if not a == b:
            ind = 1
            print 'Row New: %s, Row Old: %s' % (a, b)

    if ind == 0:
        print 'Table size: %s Test Passed' % count