Ejemplo n.º 1
0
def select_upload_data(dataset_to_update, dataframe):

    try:
        # initialize an empty list to hold the features that we have already uploaded to mapbox
        existing = []

        # initialize API session
        datasets = Datasets()
        os.environ['MAPBOX_ACCESS_TOKEN'] = 'sk.eyJ1IjoiZmlyZS1yaXNrLXNmIiwiYSI6ImNqZGw1dTlwdDA1aXMzM3FrbDZpZnpmczMifQ.JbUPsvx8384WHOAfA9Vy9w'

        # get features of the mapbox dataset by calling the API
        features_collection = datasets.list_features(dataset_to_update).json()

        # loop through the features of the mapbox dataset and save them to the list "existing"
        for i in range(0,len(features_collection['features'])):
            existing.append([
                features_collection['features'][i]["id"],
                features_collection['features'][i]["properties"]["name"],
            ])

        # convert "existing" to a dataframe
        existing = pd.DataFrame(existing,columns=["id","address"])
        
        print("succesfully accessed the existing mapbox dataset")
        
    except:
        
        print("Error: could not access the mapbox dataset. Check that you are accessing your token correctly")

    try:
        # get rows in dataframe that are not yet uploaded
        new_rows = pd.merge(left = dataframe,
                            right = existing,
                            on = "address",
                            how = "left")
        new_rows = new_rows[new_rows.id.isnull() == True]
        new_rows = new_rows.set_index(np.arange(0,len(new_rows)))

        # create a "start point" for new row ids
        # note: this was not a very smart way to creat ids, but I'm stuck with it for now
        start = max(existing["id"].astype("int64"))

        # create new random ids for these new uploads
        new_ids = random.sample(range(start,start*5),len(new_rows))
        
        print("succesfully selected new upload data")
    
    except:
        
        print("there was a problem uploading the data")
        
    # make sure these new random ids are not already in the dataset
    
    try:
        [new_id not in existing["id"] for new_id in new_ids]
    except:
        "Error! Duplicate ids created"
        
    # return the new rows and new ids to upload
    return new_rows, new_ids
Ejemplo n.º 2
0
 def create_dataset(self, name, description):
     datasets = Datasets()
     res = datasets.create(name=name, description=desciption)
     ds_id = None
     if res.ok:
         ds = res.json()
         ds_id = ds['id']
     return ds_id
Ejemplo n.º 3
0
def check_upload_result(upload, dataset_to_update):
    try:
        # check that the dataset upload was successful
        datasets = Datasets()
        os.environ['MAPBOX_ACCESS_TOKEN'] = 'sk.eyJ1IjoiZmlyZS1yaXNrLXNmIiwiYSI6ImNqZGw1dTlwdDA1aXMzM3FrbDZpZnpmczMifQ.JbUPsvx8384WHOAfA9Vy9w'
        resp = datasets.list_features(dataset_to_update).json()
        assert len(resp['features']) == len(upload), "Only " + str(len(resp['features'])) + " features are included in the dataset"    
    except:
        
        print("Error: unable to check the number of uploaded records. To manually check the execution: Mapbox > Studio > Datasets > Menu > View details")
Ejemplo n.º 4
0
 def upload_dataset_features(self, dataset_id, dataset_filename):
     """
     Upload a dataset to mapbox
     This is rather slow uploading one by one so use upload as tileset
     """
     datasets = Datasets()
     with open(os.path.join(DATASETS_PATH, dataset_filename)) as src:
         features = json.load(src)
         for fid, feature in enumerate(features.get("features", [])):
             datasets.update_feature(dataset_id, fid, feature)
Ejemplo n.º 5
0
def upload_to_mapbox(upload, dataset_to_update):
    
    try:
        # initialize api
        datasets = Datasets()

        os.environ['MAPBOX_ACCESS_TOKEN'] = 'sk.eyJ1IjoiZmlyZS1yaXNrLXNmIiwiYSI6ImNqZGw1dTlwdDA1aXMzM3FrbDZpZnpmczMifQ.JbUPsvx8384WHOAfA9Vy9w'

        # upload features to new dataset
        for i in range(0,len(upload['features'])):
            feature = upload['features'][i]
            index = upload['features'][i]['id']
            resp = datasets.update_feature(dataset = dataset_to_update,
                                           fid = index,
                                           feature = feature)
    except:
        
        print("Error: could not upload the new data to Mapbox. Is there a problem with the access token?")
Ejemplo n.º 6
0
def insert_or_update_features(dataset_id, features):
    ''' Takes a list of features (dictionaries formatted as features)
        and adds them to dataset. If the ID of the feature is already
        in the dataset, it will be updated. If it's new, it will be inserted.
        Returns list of updated features.
        Will raise error if response is not 200.
    '''
    datasets = Datasets()
    loads = []
    for feature in features:
        resp = datasets.update_feature(dataset_id, feature['id'], feature)
        if resp.status_code != 200:
            err = (f'Response was {resp.status_code} when trying to',
                   f'update {feature} in dataset with ID {dataset_id}',
                   f'{resp.json()["message"]}')
            
            raise RuntimeError(err)
        loads.append(resp)
    return loads
Ejemplo n.º 7
0
class Mapbox():

    datasets = Datasets(access_token=ALL_AT)
    uploader = Uploader(access_token=ALL_AT)
    username = '******'

    @classmethod
    def datasets_list(cls, response):
        resp = cls.datasets.list()
        return HttpResponse(resp.json(), content_type=json_content_type)

    @classmethod
    def datasets_insert(cls, response, dataset_id):
        ## pokud je nějaká změna musí se updated_to_mapbox změnit na false
        mista = Misto.objects.filter(active=True, uploaded_to_mapbox=False)

        results = {}

        for misto in mista:
            feature = {
                'id': str(misto.id),
                'type': 'Feature',
                'properties': {
                    'nazev': misto.nazev,
                    'popis': misto.popis
                },
                'geometry': {
                    'type': 'Point',
                    'coordinates': [misto.y, misto.x]
                }
            }

            resp = cls.datasets.update_feature(dataset_id, str(misto.id),
                                               feature).json()
            results[misto.id] = resp
            misto.uploaded_to_mapbox = True
            misto.save()

        return HttpResponse(dumps(results), content_type=json_content_type)

    @classmethod
    def upload_tileset_from_dataset(cls, request, tileset_id, dataset_id):
        # https://github.com/mapbox/mapbox-sdk-py/issues/152#issuecomment-311708422
        # https://www.mapbox.com/api-documentation/?language=Python#create-an-upload
        uri = "mapbox://datasets/{username}/{dataset_id}".format(
            username=cls.username, dataset_id=dataset_id)
        res = cls.uploader.create(uri, tileset_id, name='Msta')
        return HttpResponse(dumps({
            'tileset_id': tileset_id,
            'dataset_id': dataset_id,
            'result': res.json()
        }),
                            content_type=json_content_type)
    def handle(self, *args, **options):
        datasets = Datasets(access_token=settings.MAPBOX_TOKEN)
        try:
            mapbox_dataset = MapBoxDataset.objects.all()[0] #check if a dataset was already created
            dataset_id = mapbox_dataset.dataset_id
            dataset = datasets.read_dataset(dataset_id).json()
        except IndexError: #it wasn't, let's do that
            dataset = datasets.create(name='os-schools', description='A listing of OpenStax Adoptions')
            dataset_decoded = ast.literal_eval(dataset.content.decode())

            mapbox_dataset_created, _ = MapBoxDataset.objects.get_or_create(name=dataset_decoded["name"],
                                                                            dataset_id=dataset_decoded["id"])
            dataset_id = mapbox_dataset_created.dataset_id


        #cool - we have a dataset, now let's fill it with school location data
        schools = School.objects.all()

        for school in schools:
            feature = {
                'type': 'Feature',
                'geometry': {
                    'type': "Point",
                    'coordinates': [float(school.long), float(school.lat)]
                },
                'properties': {
                    'name': school.name
                }
            }
            datasets.update_feature(dataset_id, school.pk, feature)


        self.stdout.write("fin")
Ejemplo n.º 9
0
from shapely.geometry import Polygon, Point, LineString
import os
os.environ[
    'MAPBOX_ACCESS_TOKEN'] = "pk.eyJ1IjoidmliaHUxOTk0NiIsImEiOiJjam01MjB1YjkwMDZrM3BsMHI4d21reXduIn0.j1yPdjaTrJ1i4Uc_dcT62g"
import pandas as pd
import numpy as np
from mapbox import Datasets
import matplotlib.pyplot as plt
import json
import sys
import geopandas as gpd
import descartes
from pandas.io.json import json_normalize
import pyvisgraph as vg

datasets = Datasets()
col = datasets.list_features("ck5gcnyor0rcw2nmdwaeaiko9").json()
#attrs = datasets.read_dataset("ck6ub6amu0h6g2nlhtl42fguy").json()
df = pd.read_json(json.dumps(col['features']))
#df.head()

#nycc = json_normalize(df['properties'])
nyc = json_normalize(df['geometry'])
#len(nycc.index) #== len(nyc.index)
#nyc.head()

pp = []
for i in nyc.index:
    for ff in nyc['coordinates'][i]:
        pp.append(Polygon(ff))
Ejemplo n.º 10
0
load_dotenv()

if len(sys.argv) != 3:
    print(
        'Please supply a mode <create|publish> and tile set <plaetze|strassen|viertel|denkmaeler|gebaeude>.'
    )
    sys.exit(1)

mode = sys.argv[1]
tileset = sys.argv[2]
url = 'https://api.mapbox.com/tilesets/v1/'
access_string = '?access_token=' + os.getenv("tileset_token")

if 'source' in mode:
    dataset_id = os.getenv(tileset + '_dataset')
    datasets = Datasets(access_token=os.getenv("dataset_token"))
    collection = datasets.list_features(dataset_id).json()
    features = collection['features']
    ld_features = '\n'.join(json.dumps(feature) for feature in features)
    files = {'file': (tileset + '.geojson', ld_features)}
    sources_url = url + 'sources/strassenlaerm/' + tileset + access_string
    if mode == 'create_source':
        r = requests.post(sources_url, files=files)
    elif mode == 'replace_source':
        r = requests.put(sources_url, files=files)
    else:
        print('Please use a valid mode.')
        sys.exit(1)
elif mode == 'create_tileset':
    with open('recipe-' + tileset + '.json', 'r') as recipe_file:
        r = requests.post(url + 'strassenlaerm.' + tileset + access_string,
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import sys
import os
import json
from mapbox import Datasets
from dotenv import load_dotenv
load_dotenv()

datasets = Datasets(access_token=os.getenv("dataset_token"))
streets_dataset = os.getenv("strassen_dataset")
squares_dataset = os.getenv("plaetze_dataset")
existing_streets = datasets.list_features(streets_dataset).json()['features']
existing_squares = datasets.list_features(squares_dataset).json()['features']
existing_objects = existing_streets + existing_squares
# print(existing_objects)


def generate_geometry(street_square, parts, msg):
    def reverse_part(part_to_reverse):
        part_to_reverse['properties']['endet_bei_'], part_to_reverse['properties']['beginnt_be'] = \
            part_to_reverse['properties']['beginnt_be'], part_to_reverse['properties']['endet_bei_']
        part_to_reverse['geometry']['coordinates'][0].reverse()

    if street_square['properties']['type'] == 'street':
        # print(street_square['properties']['name'])
        parts_multi = []
        # go through all parts belonging to an object to simplify geometry
        # for p in parts:
        #     print(p['properties']['beginnt_be'] + '  ->  ' + p['properties']['endet_bei_'] + ': ' + str(p['geometry']['coordinates']))
Ejemplo n.º 12
0
def create_dataset(name, description):
    ''' Creates a new empty dataset with name and description.
        Returns newly created dataset object.
    '''
    datasets = Datasets()
    return datasets.create(name=name, description=description)
Ejemplo n.º 13
0
def get_feature(dataset_id, feature_id):
    ''' Takes the ID of a dataset and ID of a feature.
        Returns the feature in dict.
    '''
    datasets = Datasets()
    return datasets.read_feature(dataset_id, feature_id).json()
Ejemplo n.º 14
0
def get_features(dataset_id):
    ''' Takes the ID of a dataset and returns a dict with all
        the features in the dataset
    '''
    datasets = Datasets()
    return datasets.list_features(dataset_id).json()
Ejemplo n.º 15
0
def datasets(*args, **kwargs):
    return Datasets()
Ejemplo n.º 16
0
MAPBOX_ACCESS_TOKEN = "sk.eyJ1IjoicGFwcHVzc3AxIiwiYSI6ImNqYndrZ3RrMTI2eDIzM3BjaXFtY2gzdmcifQ.B2sQFFPWo5tBrvcsL9cDVQ"

from mapbox import Datasets
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
from flask_heroku import Heroku
from sqlalchemy import create_engine
import requests

datasets = Datasets(access_token=MAPBOX_ACCESS_TOKEN)

app = Flask(__name__)
heroku = Heroku(app)
db = SQLAlchemy(app)
#engine = create_engine('postgresql+psycopg2://shankoibito:pappussp@localhost/tiotsudatamap')
app.config[
    'SQLALCHEMY_DATABASE_URI'] = 'postgres://shanbirsingh@toitsu:[email protected]:5432/postgres'
#app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://*****:*****@ec2-54-227-250-33.compute-1.amazonaws.com:5432/d9p6o27d01ao21'
app.config['SECRET_KEY'] = 'oh_so_secret'


class tiotsu_users(db.Model):
    __tablename__ = "tiotsu_users"
    email = db.Column(db.String(120), nullable=False, primary_key=True)
    firstname = db.Column(db.String(100), nullable=False)
    yunk = db.Column(db.String(100))
    level = db.Column(db.String(50), nullable=False)
    aura = db.Column(db.String(100))
    houselevel = db.Column(db.String(50))
    mylocation = db.Column(db.String(150), nullable=False, unique=True)
    help = db.Column(db.String(100))
Ejemplo n.º 17
0
from mapbox import Datasets
import json

datasets = Datasets(
    access_token=
    'sk.eyJ1IjoibG9raXByJjamc4aGRpZ3oyM3BxMzNuNWIzaDdja2ZzIn0.oFqNkH9Mlyv3ExsGknvSGg'
)
create_resp = datasets.create(name="Bay Area Zips",
                              description="ZTCA zones for the Bay Area")
listing_resp = datasets.list()
dataset_id = [ds['id'] for ds in listing_resp.json()][0]
data = json.load(open(r'ztca_bayarea.geojson'))
for count, feature in enumerate(data['features'][:1]):
    resp = datasets.update_feature(dataset_id, count, feature)
Ejemplo n.º 18
0
import json
from mapbox import Datasets
access_token = '<api key with read, write, and list access for Datasets>'

# Remove any datasets with the name "demo". This can be removed for wider use
[
    datasets.delete_dataset(d['id']) for d in datasets.list().json()
    if d['name'] == 'demo'
]

# Create a new, empty dataset and get info about it
datasets = Datasets(access_token=access_token)
create_resp = datasets.create(
    name='demo', description='Demo dataset for Datasets and Uploads APIs')
dataset_info = create_resp.json()
print(dataset_info)

# Add data to new dataset
# Make sure there is no "id" key-value pair outside the "properties" object. Mapbox has pretty strict standards for GeoJSON.
data = json.load(open(r'demo_pts.geojson'))
for count, feature in enumerate(data['features']):
    print('{}, {}'.format(count, feature))
    resp = datasets.update_feature(dataset_info['id'], count, feature)
    print(resp)
Ejemplo n.º 19
0
#!/usr/bin/env python3

import json
import re
from mapbox import Datasets

datasets = Datasets()
datasetID = 'ciuoufm6l01ch2yp4nzm3tjtb'

for lat in range(-90, 100, 10):
  id = "lat%d" % (lat)
  feature = {
    "type": "Feature",
    "id": id,
    "geometry": {
      "type": "LineString",
      "coordinates": [[-180, lat], [180, lat]]
    },
    "properties": {
      "name": "%d" % lat,
      "id": id
    }
  }
  resp = datasets.update_feature(datasetID, id, feature)
  print(id)

for lon in range(-180, 190, 10):
  id = "lon%d" % (lon)
  feature = {
    "type": "Feature",
    "id": id,
Ejemplo n.º 20
0
from mapbox import Datasets
import random as rand

datasets = Datasets(
    access_token=
    "sk.eyJ1IjoiYW5hbnRqIiwiYSI6ImNqcm5pa3NjbDA2cW8zeW8zb25nOTczY2sifQ.QkSZBaZnn6KxwO-YpRtA8A"
)


def getJSON(name):
    collections = datasets.list_features('cjrna6cc80dbo2wphvazyuf89').json()
    collections = collections['features']
    out = -1
    for col in collections:
        if (col['properties']['title'] == name):
            out = col
            break
    return out


def update(name, value):
    data = getJSON(name)
    if (not (data == -1)):
        data['properties']['description'] = value
        update = datasets.update_feature('cjrna6cc80dbo2wphvazyuf89',
                                         data['id'], data).json()
        return update
    else:
        return None