def handle(self): log_message("{} skill trying to respond.".format(self.name)) retn = False ss = Subsonic() album = self.get_param("album") artist = self.get_param("artist") genre = self.get_param("genre") song = self.get_param("song") if song and not artist and "by" in song.lower(): parts = song.lower().split("by") artist = parts[-1] song = "by".join(parts[0:-1]) songs = [] if song and artist: songs = ss.search(song, artist) elif song: songs = ss.search(song) if len(songs) > 0: the_song = songs[0] stream_url = ss.stream_url(the_song.get("id")) try: self.media.load(stream_url) except Exception as e: log_message(e) else: retn = self.media.play() return retn
def load(self, path): try: self.player = vlc.MediaPlayer(path) except Exception as e: log_message(e) else: log_message("Loaded {}".format(path))
def link_place_api(self): resp = requests.get( "http://api.geonames.org/findNearbyPlaceNameJSON", params=dict(lat=self.postal_code.latitude, lng=self.postal_code.longitude, username="******"), ) try: resp_json = resp.json() except JSONDecodeError: log_message("Turning off api search.") self.use_api_search = False else: if "geonames" in resp_json: places_json = make_list(resp_json.get("geonames")) place_json = places_json[0] place_id = place_json.get("geonameId") try: self.place = GeoName.objects.get(geonameid=place_id) except GeoName.DoesNotExist: log_message("Place not found") else: self.postal_code.place_name = place_json.get("name") self.postal_code.save() self.save()
def delete(self, using=None, keep_parents=False): super().delete(using, keep_parents) result = square_client.catalog.delete_catalog_object( object_id=self.object_id) response = result.body if result.is_success() else result.errors log_message(response, pretty=True)
def download_new_file(file_link, target_file): if os.path.exists(target_file): log_message("Removing file: {}".format(target_file)) os.remove(target_file) log_message("Downloading new file.") urlretrieve(file_link, target_file) return target_file
def file_put_contents(file_name, file_content): try: with open(file_name, "w") as fh: fh.write(file_content) fh.close() except Exception as e: log_message(e) retn = False else: retn = True return retn
def handle(self, *args, **options): rfid_id = str(uuid0.generate()) endpoint_path = reverse("rfid-lookup") endpoint_url = "http://127.0.0.1:8000{}".format(endpoint_path) # endpoint_url = "https://firefox.vryhof.net{}".format(endpoint_path) log_message("talking to: {}".format(endpoint_url)) resp = requests.post(endpoint_url, data=dict(catalog_id=1, rfid_id=rfid_id)) print(resp.text)
def date_or_none(value): retn = None if value: try: new_value = "%s-%s-%s" % (value[0:4], value[4:6], value[6:8]) except IndexError: pass except Exception as e: log_message(e) else: retn = timestamp_or_none(new_value) return retn
def link_place_api(self): resp = requests.get( "https://firefox.vryhof.net/api/rest/zipcode/{}/".format( self.postal_code)) resp_json = resp.json() if "place" in resp_json: place_id = resp_json.get("place").get("geonameid") place = GeoName.objects.get(geonameid=place_id) try: self.place = place except GeoName.DoesNotExist: log_message("Place not found") else: self.save()
def convert_csv_to_xlsx(csv_file, xlsx_file): wb = Workbook() ws = wb.active with open(csv_file, "r") as f: for row in csv.reader(f): ws.append(row) try: wb.save(xlsx_file) except Exception as e: log_message(e) retn = False else: retn = True return retn
def handle(self, *args, **options): result = square_client.catalog.list_catalog( types="ITEM,ITEM_VARIATION") if result.is_success(): objects = result.body.get("objects") del_result = square_client.catalog.batch_delete_catalog_objects( body={"object_ids": [x.get("id") for x in objects]}) if del_result.is_success(): log_message(del_result.body, pretty=True) elif del_result.is_error(): print(del_result.errors) elif result.is_error(): print(result.errors)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None): if not self.item_abbreviation or self.item_abbreviation is None: self.item_abbreviation = generate_abbreviation(self.name) if not self.item_key or self.item_key is None: self.item_key = str(uuid0.generate()) if not self.item_id: self.item_id = "#{}".format(slugify(self.name)) self.object_id = "#{}".format(slugify(self.name)) super().save(force_insert, force_update, using, update_fields) itemid = self.item_id if not self._state.adding: if self.object_id is not None: itemid = self.object_id body = self.as_dict(itemid) log_message(body, pretty=True) if self.variants.count() > 0: result = square_client.catalog.upsert_catalog_object(body=body) catalog_item = result.body if result.is_success( ) else result.errors if "catalog_object" in catalog_item: self.catalog.version = catalog_item.get("catalog_object").get( "version") self.object_id = catalog_item.get("catalog_object").get("id") super().save(force_insert, force_update, using, update_fields) try: id_mappings = catalog_item.get("id_mappings", []) except AttributeError: log_message(catalog_item, pretty=True) else: self.update_id_mappings(id_mappings)
def handle(self, *args, **options): self._timer() media_root_normalized = os.path.join(*os.path.split(settings.MEDIA_ROOT)) data_dir = os.path.join(media_root_normalized, "density-data") if not os.path.exists(data_dir): log_message("Creating directory: {}".format(data_dir)) os.makedirs(data_dir) density_file = os.path.join(data_dir, "ZCTA-DENSITY.csv") if file_is_expired(density_file, 30): download_new_file( "https://s3.amazonaws.com/SplitwiseBlogJB/Zipcode-ZCTA-Population-Density-And-Area-Unsorted.csv", density_file, ) zips = csv_to_dicts(density_file) for zipcode in zips: zip_code = zipcode.get("Zip/ZCTA") population = int(zipcode.get("2010 Population")) land_area = float(zipcode.get("Land-Sq-Mi")) density = float(zipcode.get("Density Per Sq Mile")) try: zc = PopulationDensity.objects.get(zip_code=zip_code) except PopulationDensity.DoesNotExist: zc = PopulationDensity.objects.create( zip_code=zip_code, population=population, land_miles=land_area, density=density ) else: zc.population = population zc.land_miles = land_area zc.density = density zc.save() zc.set_classification() zc.link_postal_code() self._timer()
def link_postal_code(self, zip_code=False): retn = False if not self.postal_code: if not zip_code: zip_code = self.zip_code try: self.postal_code = PostalCode.objects.get(postal_code=zip_code) except PostalCode.DoesNotExist: log_message("Postal code not found: {}".format(zip_code)) else: retn = True if not self.state: self.state = self.postal_code.state self.save() else: retn = True return retn
def _pdl_call(self, params): retn = dict() call_params = dict() base_params = dict(api_key=self.api_key) call_params.update(base_params) call_params.update(params) log_message(self.endpoint) resp = requests.get(self.endpoint, params=call_params, verify=True) log_message(resp) log_message(call_params, pretty=True) retn = resp.json() return retn
def handle(self, *args, **options): self._timer() media_root_normalized = os.path.join( *os.path.split(settings.MEDIA_ROOT)) data_dir = os.path.join(media_root_normalized, "zcta-data") if not os.path.exists(data_dir): log_message("Creating directory: {}".format(data_dir)) os.makedirs(data_dir) xls_crosswalk_link = "https://udsmapper.org/wp-content/uploads/2020/09/Zip_to_zcta_crosswalk_2020.xlsx" csv_crosswalk_link = "https://raw.githubusercontent.com/censusreporter/acs-aggregate/master/crosswalks/zip_to_zcta/zip_zcta_xref.csv" xls_crosswalk_file = os.path.join(data_dir, "crosswalk.xlsx") csv_crosswalk_file = os.path.join(data_dir, "crosswalk.csv") if file_is_expired(xls_crosswalk_file, 30): download_new_file(xls_crosswalk_link, xls_crosswalk_file) if file_is_expired(csv_crosswalk_file, 30): download_new_file(csv_crosswalk_link, csv_crosswalk_file) log_message("Loading: {}".format(xls_crosswalk_file)) xls_zctas = excel_to_dicts(xls_crosswalk_file) log_message("Importing {} data points.".format(len(xls_zctas))) for xls_zcta in xls_zctas: zip_code = xls_zcta.get("ZIP_CODE") po_name = xls_zcta.get("PO_NAME") state = xls_zcta.get("STATE") zip_type = xls_zcta.get("ZIP_TYPE") zcta_value = xls_zcta.get("ZCTA") zip_join_type = xls_zcta.get("zip_join_type") try: zcta = ZCTACrossWalk.objects.get(zcta=zcta_value) except ZCTACrossWalk.DoesNotExist: zcta = ZCTACrossWalk.objects.create( zcta=zcta_value, zip_code=zip_code, po_name=po_name, state=state, zip_type=zip_type, zip_join_type=zip_join_type) zcta.link_postal_code() log_message("Loading: {}".format(csv_crosswalk_file)) csv_zctas = csv_to_dicts(csv_crosswalk_file) log_message("Importing {} data points.".format(len(csv_zctas))) for csv_zcta in csv_zctas: zip_code = csv_zcta.get("zip_code") zcta_value = csv_zcta.get("zcta") zip_type = csv_zcta.get("source") try: zcta = ZCTACrossWalk.objects.get(zcta=zcta_value) except ZCTACrossWalk.DoesNotExist: zcta = ZCTACrossWalk.objects.create( zcta=zcta_value, zip_code=zip_code, zip_type=zip_type, ) zcta.link_postal_code() self._timer()
def import_geonames_csv(data_file_path, **kwargs): delimiter = kwargs.get("delimiter", "\t") insert_threshold = kwargs.get("insert_threshold", 10000) data_file = open(data_file_path, "rU", encoding="utf8") rows = csv.reader(data_file, delimiter=delimiter) insert_list = [] for row in rows: if len(row) > 0 and int(row[14]) > 0: try: place = GeoName.objects.get(geonameid=row[0]) except GeoName.DoesNotExist: insert_list.append( GeoName( geonameid=row[0], name=row[1], asciiname=row[2], alternatenames=row[3], latitude=row[4], longitude=row[5], feature_class=row[6], feature_code=row[7], country_code=row[8], cc2=row[9], admin1_code=row[ 10], # 1. order subdivision (state) varchar(20) admin2_code=row[ 11], # 2. order subdivision (county/province) admin3_code=row[ 12], # 3. order subdivision (community) varchar(20) admin4_code=row[ 13], # 3. order subdivision (community) varchar(20) population=row[14], elevation=int_or_none(row[15]), # in meters dem=row[ 16], # digital elevation model, srtm3 or gtopo30 timezone=row[17], modification_date=row[18], )) # else: # place.name = row[1] # place.asciiname = row[2] # place.alternatenames = row[3] # place.latitude = row[4] # place.longitude = row[5] # place.feature_class = row[6] # place.feature_code = row[7] # place.country_code = row[8] # place.cc2 = row[9] # place.admin1_code = row[10] # 1. order subdivision (state) varchar(20) # place.admin2_code = row[11] # 2. order subdivision (county/province) # place.admin3_code = row[12] # 3. order subdivision (community) varchar(20) # place.admin4_code = row[13] # 3. order subdivision (community) varchar(20) # place.population = row[14] # place.elevation = int_or_none(row[15]) # in meters # place.dem = row[16] # digital elevation model, srtm3 or gtopo30 # place.timezone = row[17] # place.modification_date = row[18] # # place.save() if len(insert_list) >= insert_threshold: GeoName.objects.bulk_create(insert_list) log_message( "Inserted {} places. Database contains {} places.".format( len(insert_list), GeoName.objects.all().count())) insert_list = [] data_file.close() GeoName.objects.bulk_create(insert_list) log_message( "Inserted {} places. Database contains {} places. Final Insert.". format(len(insert_list), GeoName.objects.all().count()))
def callback_view(request, *args, **kwargs): log_message(request.build_absolute_uri()) return redirect(resolve_link("home"))
def handle(self, *args, **options): self._timer() media_root_normalized = os.path.join( *os.path.split(settings.MEDIA_ROOT)) data_dir = os.path.join(media_root_normalized, "zcta-data") csv_places = os.path.join(data_dir, "processed", "acs5_2019_population_places.csv") csv_zctas = os.path.join(data_dir, "processed", "acs5_2019_population_zctas.csv") if file_is_expired(csv_places) or file_is_expired(csv_zctas): log_message("Downloading files") downloader = PopulationDownloader(settings.CENSUS_API_KEY, data_dir=data_dir) downloader.download_zctas() downloader.download_places() log_message("Loading Places") places = csv_to_dicts(csv_places) insert_list = [] for place in places: try: ZCTAPlace.objects.get(geoid=place.get("geoid")) except ZCTAPlace.DoesNotExist: insert_list.append( ZCTAPlace(geoid=place.get("geoid"), name=place.get("name"), universe=decimal_or_null(place.get("universe")), universe_annotation=decimal_or_null( place.get("universe_annotation")), universe_moe=place.get("universe_moe"), universe_moe_annotation=place.get( "universe_moe_annotation"), state=place.get("state"), place=place.get("place"))) if len(insert_list) == 10000: ZCTAPlace.objects.bulk_create(insert_list) insert_list = [] ZCTAPlace.objects.bulk_create(insert_list) insert_list = [] log_message("Loading ZCTAs") zctas = csv_to_dicts(csv_zctas) for zcta in zctas: try: zcta_zcta = ZCTAZcta.objects.get(geoid=zcta.get("geoid")) except ZCTAZcta.DoesNotExist: insert_list.append( ZCTAZcta(geoid=zcta.get("geoid"), name=zcta.get("name"), universe=decimal_or_null(zcta.get("universe")), universe_annotation=decimal_or_null( zcta.get("universe_annotation")), universe_moe=zcta.get("universe_moe"), universe_moe_annotation=zcta.get( "universe_moe_annotation"), state=zcta.get("state"), zcta=zcta.get("zip code tabulation area"))) if len(insert_list) == 10000: ZCTAZcta.objects.bulk_create(insert_list) insert_list = [] ZCTAZcta.objects.bulk_create(insert_list) insert_list = [] log_message("Linking Postal codes.") for zcta in ZCTAZcta.objects.filter(geonames_postal_code__isnull=True): zcta.link_postal_code() log_message("Building State crosswalk") state_codes = list( set(ZCTAZcta.objects.all().values_list("state", flat=True))) for state_code in state_codes: try: state = ZCTAState.objects.get(state=state_code) except ZCTAState.DoesNotExist: state = ZCTAState.objects.create(state=state_code) state.link_names() self._timer()
def handle(self, *args, **options): for catalog in Catalog.objects.all(): data = catalog.list() log_message(data, pretty=True)