def inventory_update(self, arguments, warnings, errors): inventory = db.Inventory() if "product_id" in arguments: inventory.product = db.Product.by_id(arguments["product_id"]) inventory.date = datetime.datetime.now() if len(arguments) > 2: if arguments["product_id"].strip() == "": errors["normal_price"].append(u"No se seleccionó un producto") if float(arguments["normal_price"].strip()) <= 0: errors["normal_price"].append( u"Es necesario entrar un valor positivo para el precio") if float(arguments["discounted_price"].strip()) <= 0: errors["discounted_price"].append( u"Es necesario entrar un valor positivo para el precio") if int(arguments["units"].strip()) <= 0: errors["units"].append( u"Es necesario entrar un valor positivo para las unidades") if len(errors) == 0: helpers.get(db.Inventory).to_record(arguments, inventory) db.session().add(inventory) db.session().commit() arguments = helpers.get(db.Inventory).to_dictionary(inventory) else: arguments = helpers.get(db.Inventory).to_dictionary(inventory) arguments["product"] = helpers.get(db.Product).to_dictionary( inventory.product) return arguments
def review_update( self, arguments, warnings, errors ): if arguments["id"] == "new": record = db.Review() else: record = db.Review.by_id( arguments["id"] ) if len( arguments ) > 1: if arguments["name"].strip() == "": errors["name"].append( u"Es necesario asignar un nombre al comentario" ) if arguments["alias"].strip() == "": errors["alias"].append( u"Es necesario capturar un nombre de usuario" ) if len( arguments["content"].split() ) < 10: errors["content"].append( u"El comentario debe tener al menos 10 palabras" ) if len( errors ) == 0: helpers.get( db.Review ).to_record( arguments, record ) record.date = datetime.datetime.now() db.session().add( record ) db.session().commit() arguments = helpers.get( db.Review ).to_dictionary( record ) admins = db.session().query( db.User )\ .filter( db.CatalogEntry.value == "Administradores" )\ .all() for user in admins: t = { "recipient": user.name } t["review"] = arguments t["product"] = helpers.get( db.Product ).to_dictionary( record.product ) send_mail( "Nuevo comentario", "*****@*****.**", [user.email], "mail/new_review.txt", t ) else: arguments = helpers.get( db.Review ).to_dictionary( record ) return arguments
def like_videos(service, videos: list): if len(videos) == 0: print('No videos to like.') return print('Liking {} videos.'.format(len(videos))) for index, video in enumerate(videos): print('#{} '.format(index), end='') video_id = get(video, 'snippet.resourceId.videoId') video_title = get(video, 'snippet.title') if not video_id: print('wrong video format.') continue print('ID: {}, title: "{}"'.format(video_id, video_title), end='') try: service.videos().rate(id=video_id, rating='like').execute() print(' liked.') except HttpError as e: print(' error:') print(e)
def product_update( self, arguments, warnings, errors ): if arguments["id"] == "new": product = db.Product( arguments["name"] ) product.date = datetime.datetime.now().date() else: product = db.Product.by_id( arguments["id"] ) if len( arguments ) > 1: if arguments["name"].strip() == "": errors["name"].append( u"Es necesario asignar un nombre al producto" ) if "images" in arguments: if len( arguments["images"]["ids"] ) == 0: errors["images"].append( u"Es necesario dar de alta una imagen" ) if len( filter( lambda x: x, arguments["images"]["values"] ) ) == 0: errors["images"].append( u"Es necesario dar de alta una imagen principal" ) else: errors["images"].append( u"Es necesario dar de alta una imagen" ) if len( errors ) == 0: helpers.get( db.Product ).to_record( arguments, product ) db.session().add( product ) if arguments["id"] == "new": statistics = db.Statistics() statistics.product = product statistics.created = 1 statistics.sold = 1 statistics.view = 1 statistics.timestamp = time.time() db.session().add( statistics ) db.session().commit() arguments = helpers.get( db.Product ).to_dictionary( product ) else: arguments = helpers.get( db.Product ).to_dictionary( product ) return arguments
def download(search_term): ''' Save files to disk. Don't call this to search the files ''' filenames = _filenames(search_term) for filename in filenames: print u'Looking up %s' % filename image_url = _image_url(filename) helpers.get(image_url, cachedir = 'wikimedia')
def gitorious(): project = fromstring(get('https://gitorious.org/tlevine.xml').read().encode('utf-8')) for repository in project.xpath('//repository[owner[text()="tlevine"]]'): title = repository.xpath('name/text()')[0] repository2 = fromstring(get('https://gitorious.org/tlevine/%s.xml' % title).read().encode('utf-8')) d = repository2.xpath('last-pushed-at/text()') yield { 'title': title, 'url': 'https' + repository.xpath('clone_url/text()')[0][3:-4], 'date': None if d == [] else datetime.datetime.strptime(d[0], '%Y-%m-%dT%H:%M:%SZ'), 'description': '\n'.join(repository2.xpath('description/text()')), }
def new_user( self, user, email, email_confirm, password, password_confirm ): u = db.User.by_email( email ) if u != None: return {}, {}, { "email": [u"La dirección '%s' ya está dada de alta, escoja otra." % email] } errors = defaultdict( list ) if len( user.strip() ) < 4: errors["user"].append( "El nombre de usuario debe tener al menos 4 caracteres." ) if email != email_confirm: errors["email_confirm"].append( "La dirección de correo y la confirmación no coinciden" ) if len( password.strip() ) < 6: errors["password"].append( "La contraseña debe tener 6 caracteres o más" ) if password != password_confirm: errors["password_confirm"].append( "La contraseña y la confirmación deben coincidir" ) if len( errors ) == 0: u = db.User() u.email = email u.set_password( password, self.key ) u.name = user u.groups.append( db.session().query( db.CatalogEntry )\ .filter( db.CatalogEntry.catalog_name == "user_groups" )\ .filter( db.CatalogEntry.value == "Usuarios" )\ .first() ) db.session().add( u ) db.session().commit() return helpers.get( db.User ).to_dictionary( u ), {}, {} return {}, {}, dict( errors )
def pager( self, table, filter_field, filter, sort_by, descending, offset, limit, prefilter=[] ): table = db.__dict__[table] def build_query( query ): for field, value in prefilter: query = query.filter( getattr( table, field ) == value ) if filter != "": if type( filter_field ) != list: for f in filter.split(): query = query.filter( getattr( table, filter_field ).like( "%%%s%%" % filter ) ) else: t = db.__dict__[filter_field[0]] for f in filter.split(): query = query.filter( getattr( t, filter_field[1] ).like( "%%%s%%" % filter ) ) if not descending: query = query.order_by( db.func.lower( getattr( table, sort_by ) ).asc() ) else: query = query.order_by( db.func.lower( getattr( table, sort_by ) ).desc() ) query = query.distinct() return query result = build_query( db.session().query( table ) ) count = build_query( db.session().query( db.func.count( db.distinct( table.id ) ) ) ).one()[0] result = result.limit( limit ).offset( offset ).all() result = [ helpers.get( table ).to_dictionary( p ) for p in result ] return result, count
def products(self): data = db.session().query(db.Product).all() result = [] for p in data: d = helpers.get(db.Product).to_dictionary(p) result.append(d) return result
def products( self ): data = db.session().query( db.Product ).all() result = [] for p in data: d = helpers.get( db.Product ).to_dictionary( p ) result.append( d ) return result
def test_feature_links(driver, django_db_blocker): login(driver) set_all_features_enabled(django_db_blocker) print("Going to the expert interface...") expert_interface_btn = driver.find_element_by_id('expert_interface') expert_interface_btn.click() WebDriverWait(driver, TIME_WAIT).until( EC.presence_of_element_located((By.ID, 'id_expert_rating_page'))) for f in VIDEO_FIELDS: elem_id = "id_explanation_" + f WebDriverWait(driver, TIME_WAIT).until( EC.presence_of_element_located((By.ID, elem_id))) link = driver.find_element_by_id(elem_id).get_attribute('href') assert link.startswith('http'), link resp = get(link) assert resp.ok assert resp.status_code == 200 assert 'MediaWiki' in resp.text print(f, resp.text[:500]) logout(driver)
def product_pager( self, filter_field, filter, sort_by, descending, page, limit, genre="", prefilter=[] ): def build_query( query ): if genre != "": query = query.join( db.Product.genres ) query = query.filter( "product_genre_1.genre_id = %s" % genre ) for field, value in prefilter: query = query.filter( getattr( db.Product, field ) == value ) if filter != "": for f in filter.split(): query = query.filter( getattr( db.Product, filter_field ).like( "%%%s%%" % filter ) ) if not descending: query = query.order_by( db.func.lower( getattr( db.Product, sort_by ) ).asc() ) else: query = query.order_by( db.func.lower( getattr( db.Product, sort_by ) ).desc() ) query = query.distinct() return query result = build_query( db.session().query( db.Product ) ) count = build_query( db.session().query( db.func.count( db.distinct( db.Product.id ) ) ) ).one()[0] result = result.limit( limit ).offset( page * limit ).all() result = [ helpers.get( db.Product ).to_dictionary( p ) for p in result ] return result, int( math.ceil( 1.0 * count / limit ) )
def reviews( self, product_id ): record = db.Product.by_id( product_id ) result = [ helpers.get( db.Review ).to_dictionary( r ) for r in record.reviews ] return result
def new_user(self, user, email, email_confirm, password, password_confirm): u = db.User.by_email(email) if u != None: return {}, {}, { "email": [ u"La dirección '%s' ya está dada de alta, escoja otra." % email ] } errors = defaultdict(list) if len(user.strip()) < 4: errors["user"].append( "El nombre de usuario debe tener al menos 4 caracteres.") if email != email_confirm: errors["email_confirm"].append( "La dirección de correo y la confirmación no coinciden") if len(password.strip()) < 6: errors["password"].append( "La contraseña debe tener 6 caracteres o más") if password != password_confirm: errors["password_confirm"].append( "La contraseña y la confirmación deben coincidir") if len(errors) == 0: u = db.User() u.email = email u.set_password(password, self.key) u.name = user u.groups.append( db.session().query( db.CatalogEntry )\ .filter( db.CatalogEntry.catalog_name == "user_groups" )\ .filter( db.CatalogEntry.value == "Usuarios" )\ .first() ) db.session().add(u) db.session().commit() return helpers.get(db.User).to_dictionary(u), {}, {} return {}, {}, dict(errors)
def orders(self): orders = db.session().query(db.Order).all() orders = [ helpers.get(db.Order).to_dictionary(order) for order in orders ] print orders return orders
def ad_update( self, arguments, warnings, errors ): if arguments["id"] == "new": ad = db.Ad() else: ad = db.Ad.by_id( arguments["id"] ) if len( arguments ) > 1: if arguments["name"].strip() == "": errors["name"].append( u"Es necesario asignar un nombre al anuncio" ) if len( errors ) == 0: helpers.get( db.Ad ).to_record( arguments, ad ) db.session().add( ad ) db.session().commit() arguments = helpers.get( db.Ad ).to_dictionary( ad ) else: arguments = helpers.get( db.Ad ).to_dictionary( ad ) return arguments
def scrape_argentina(): cwd = getcwd() argentina_dir = path.join(cwd, 'data', 'argentina') tmp_dir = path.join(cwd, 'tmp') ensure_dirs(argentina_dir, tmp_dir) page = requests.get(URL).json() updated_files = [] header = 'date,region_iso,region,province,city,place_type,cases,deaths,recovered\n' for dep in page: if dep['provincia-key'] == 'totales': continue region = CODE_REGION[dep['provincia-key']] day = str( datetime.datetime.strptime(dep['ultima-actualizacion'], '%d/%m/%Y'))[:10] iso = REGION_ISO[region] confirmed = get(dep, 'Afectados', '0') deaths = get(dep, 'Muertos', '0') recovered = get(dep, 'Recuperados', '0') line = ','.join([ day, iso, region, '', '', 'unknown' if iso == 'UNK' else 'provincia', str(confirmed), str(deaths), str(recovered) ]) region_file = path.join(argentina_dir, f'{iso.lower()}.csv') is_empty = not path.exists(region_file) with open(region_file, 'a+') as f: if is_empty: f.write(header) f.write(f'{line}\n') if not is_empty: updated_files.append(region_file) ensure_consistency(updated_files, lambda row: row[:5]) with open(path.join(getcwd(), 'data', 'argentina', 'README.md'), 'w') as readme_f: readme_f.write(get_readme_contents())
def authenticate(self, email, password): user = db.User.by_email(email) if user == None: return False else: if user.authenticate(password, self.key): return helpers.get(db.User).to_dictionary(user) else: return True
def authenticate( self, email, password ): user = db.User.by_email( email ) if user == None: return False else: if user.authenticate( password, self.key ): return helpers.get( db.User ).to_dictionary( user ) else: return True
def _image_url(filename): url = u'http://toolserver.org/~magnus/commonsapi.php?' + urlencode({u'image': filename.encode('utf-8')}) r = helpers.get(url, cachedir = 'wikimedia') response = lxml.etree.parse(r) urls = response.xpath('//urls/file/text()') if len(urls) == 1: return urls[0] else: raise ValueError('No file urls')
def ad_update(self, arguments, warnings, errors): if arguments["id"] == "new": ad = db.Ad() else: ad = db.Ad.by_id(arguments["id"]) if len(arguments) > 1: if arguments["name"].strip() == "": errors["name"].append( u"Es necesario asignar un nombre al anuncio") if len(errors) == 0: helpers.get(db.Ad).to_record(arguments, ad) db.session().add(ad) db.session().commit() arguments = helpers.get(db.Ad).to_dictionary(ad) else: arguments = helpers.get(db.Ad).to_dictionary(ad) return arguments
def user_update( self, arguments, warnings, errors ): if arguments["id"] == "new": user = db.User() else: user = db.User.by_id( arguments["id"] ) if len( arguments ) > 1: if arguments["name"].strip() == "": errors["name"].append( u"Es necesario asignar un nombre al usuario" ) if arguments["email"].strip() == "": errors["name"].append( u"Es necesario asignar un email al usuario" ) if len( errors ) == 0: helpers.get( db.User ).to_record( arguments, user ) db.session().add( user ) db.session().commit() arguments = helpers.get( db.User ).to_dictionary( user ) else: arguments = helpers.get( db.User ).to_dictionary( user ) return arguments
def test_integrity_of_all_video_scores(driver, django_db_blocker): """ Test the integrity of the public file all_video_scores.csv. The file is considered correct if: - it contains all videos of the database - it contains only the expected columns - it contains a correct calculation of the Tournesol score """ create_toy_data(django_db_blocker=django_db_blocker, driver=driver, n_users=2, n_videos=4, n_ratings=2) open_tournesol(driver) WebDriverWait(driver, TIME_WAIT).until( EC.presence_of_element_located((By.ID, "id_public_database_download"))) link = driver.find_element_by_id('id_public_database_download').get_attribute('href') data = get(link) assert data.ok assert data.content assert data.headers['content-type'] == 'application/zip' zip_file = BytesIO(data.content) dfs = {} with zipfile.ZipFile(zip_file, 'r') as zf: for fileinfo in zf.infolist(): content = zf.read(fileinfo).decode('ascii') df = pd.read_csv(StringIO(content)) dfs[fileinfo.filename] = df # the file must be in the public zip archive assert "all_video_scores.csv" in dfs.keys() df = dfs['all_video_scores.csv'] default_features = [constants['DEFAULT_PREFS_VAL'] for _ in VIDEO_FIELDS] # the file must contain only expected columns assert set(df.columns) == set(["id", "video_id", "score"] + VIDEO_FIELDS) with django_db_blocker.unblock(): # good ol' hack to make django-pandas work with annotations import django django.db.models.fields.FieldDoesNotExist = django.core.exceptions.FieldDoesNotExist # the file must contain all video in the database, with their correct # Tournesol score and value for each criterion video_df = read_frame( Video.objects.all().annotate(score=get_score_annotation(default_features)), fieldnames=['id', 'video_id', 'score'] + VIDEO_FIELDS ) assert df.equals(video_df)
def project_information(project_id): ''' This contains some stuff that isn't included in the CSV version, so this is good for historical data Based on this :: curl 'http://geo.usace.army.mil/egis/ORM2.map_api.get_project_json?p_id=4900595' -H 'Host: geo.usace.army.mil' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:37.0) Gecko/20100101 Firefox/37.0' -H 'Accept: application/json, text/javascript, */*; q=0.01' -H 'Accept-Language: fr-FR,en;q=0.8,sv;q=0.5,en-US;q=0.3' --compressed -H 'X-Requested-With: XMLHttpRequest' -H 'Referer: http://geo.usace.army.mil/egis/f?p=340:7:9330870045616::NO::P7_PROJECT_ID:4900595' -H 'Cookie: ORA_WWV_APP_340=ORA_WWV-kfNBQFrCqxw9w1pKqwtxE4jr' -H 'Connection: keep-alive' ''' url = 'http://geo.usace.army.mil/egis/ORM2.map_api.get_project_json' return get(url, params = {'p_id': project_id})
def _filenames(search_term): 'Search for a search term.' urlbase = u'https://commons.wikimedia.org/w/api.php?action=query&list=search&srnamespace=6&format=json&' searchparam = urlencode({'srsearch': search_term.encode('utf-8')}) r = helpers.get(urlbase + searchparam, cachedir = 'wikimedia') results = json.load(r)['query']['search'] if len(results) == 0: raise ValueError('No results') return [re.sub(r'^File:', '', result['title']) for result in results]
def ad(self, ad_type): ad_type = db.session().query( db.CatalogEntry )\ .filter( db.CatalogEntry.catalog_name == "ad_types" )\ .filter( db.CatalogEntry.value == ad_type )\ .first() result = db.session().query( db.Ad )\ .filter( db.Ad.ad_type == ad_type )\ .filter( db.Ad.valid_until > datetime.datetime.now().date() )\ .filter( db.Ad.enabled == True )\ .all() result = random.sample(result, 1)[0] return helpers.get(db.Ad).to_dictionary(result)
def scraperwiki(): for i in range(1,14 + 1): raw = get('https://classic.scraperwiki.com/profiles/tlevine/?page=%d' % i).read() html1 = fromstring(raw) for repository in html1.xpath('//li[@class="code_object_line"]'): a = repository.xpath('descendant::h3/a[position()=2]')[0] title = a.xpath('text()')[0] url = 'https://classic.scraperwiki.com' + a.xpath('@href')[0] relative_date = html1.xpath('descendant::p[@class="context"][position()=2]/text()')[0].strip() date = scraperwiki_date(relative_date) if url == 'https://classic.scraperwiki.com/scrapers/tahrirsupplies_map/': continue # print url html2 = fromstring(get(url).read()) description = html2.get_element_by_id('divAboutScraper').text_content() yield { 'title': title, 'url': url, 'date': date, 'description': description, }
def user_update(self, arguments, warnings, errors): if arguments["id"] == "new": user = db.User() else: user = db.User.by_id(arguments["id"]) if len(arguments) > 1: if arguments["name"].strip() == "": errors["name"].append( u"Es necesario asignar un nombre al usuario") if arguments["email"].strip() == "": errors["name"].append( u"Es necesario asignar un email al usuario") if len(errors) == 0: helpers.get(db.User).to_record(arguments, user) db.session().add(user) db.session().commit() arguments = helpers.get(db.User).to_dictionary(user) else: arguments = helpers.get(db.User).to_dictionary(user) return arguments
def ad( self, ad_type ): ad_type = db.session().query( db.CatalogEntry )\ .filter( db.CatalogEntry.catalog_name == "ad_types" )\ .filter( db.CatalogEntry.value == ad_type )\ .first() result = db.session().query( db.Ad )\ .filter( db.Ad.ad_type == ad_type )\ .filter( db.Ad.valid_until > datetime.datetime.now().date() )\ .filter( db.Ad.enabled == True )\ .all() result = random.sample( result, 1 )[0] return helpers.get( db.Ad ).to_dictionary( result )
def index(request): """Home page maker""" if settings.USE_FIXTURE: return render(request, 'home/templates/index.htm', { 'config': json.dumps(helpers_fixtures.getConfig(request), sort_keys=True, indent=4), 'settings': settings }) else: return render(request, 'home/templates/index.htm', { 'config': json.dumps(helpers.get(request), sort_keys=True, indent=4), 'settings': settings })
def review_update(self, arguments, warnings, errors): if arguments["id"] == "new": record = db.Review() else: record = db.Review.by_id(arguments["id"]) if len(arguments) > 1: if arguments["name"].strip() == "": errors["name"].append( u"Es necesario asignar un nombre al comentario") if arguments["alias"].strip() == "": errors["alias"].append( u"Es necesario capturar un nombre de usuario") if len(arguments["content"].split()) < 10: errors["content"].append( u"El comentario debe tener al menos 10 palabras") if len(errors) == 0: helpers.get(db.Review).to_record(arguments, record) record.date = datetime.datetime.now() db.session().add(record) db.session().commit() arguments = helpers.get(db.Review).to_dictionary(record) admins = db.session().query( db.User )\ .filter( db.CatalogEntry.value == "Administradores" )\ .all() for user in admins: t = {"recipient": user.name} t["review"] = arguments t["product"] = helpers.get(db.Product).to_dictionary( record.product) send_mail("Nuevo comentario", "*****@*****.**", [user.email], "mail/new_review.txt", t) else: arguments = helpers.get(db.Review).to_dictionary(record) return arguments
def product_update(self, arguments, warnings, errors): if arguments["id"] == "new": product = db.Product(arguments["name"]) product.date = datetime.datetime.now().date() else: product = db.Product.by_id(arguments["id"]) if len(arguments) > 1: if arguments["name"].strip() == "": errors["name"].append( u"Es necesario asignar un nombre al producto") if "images" in arguments: if len(arguments["images"]["ids"]) == 0: errors["images"].append( u"Es necesario dar de alta una imagen") if len(filter(lambda x: x, arguments["images"]["values"])) == 0: errors["images"].append( u"Es necesario dar de alta una imagen principal") else: errors["images"].append(u"Es necesario dar de alta una imagen") if len(errors) == 0: helpers.get(db.Product).to_record(arguments, product) db.session().add(product) if arguments["id"] == "new": statistics = db.Statistics() statistics.product = product statistics.created = 1 statistics.sold = 1 statistics.view = 1 statistics.timestamp = time.time() db.session().add(statistics) db.session().commit() arguments = helpers.get(db.Product).to_dictionary(product) else: arguments = helpers.get(db.Product).to_dictionary(product) return arguments
def add_videos_to_playlist(service, playlist_id: str, videos: list): if len(videos) == 0: print('No videos to add to playlist.') return print('Adding {} videos to playlist.'.format(len(videos))) for index, video in enumerate(videos): print('#{} '.format(index), end='') video_id = get(video, 'snippet.resourceId.videoId') video_title = get(video, 'snippet.title') if not video_id: print('wrong video format.') continue print('ID: {}, title: "{}"'.format(video_id, video_title), end='') try: service.playlistItems().insert(body={ 'snippet': { 'playlistId': playlist_id, 'resourceId': { 'kind': 'youtube#video', 'videoId': video_id } } }, part='snippet').execute() print(' added to playlist.') except HttpError as e: print(' error:') print(e)
def inventory_update( self, arguments, warnings, errors ): inventory = db.Inventory() if "product_id" in arguments: inventory.product = db.Product.by_id( arguments["product_id"] ) inventory.date = datetime.datetime.now() if len( arguments ) > 2: if arguments["product_id"].strip() == "": errors["normal_price"].append( u"No se seleccionó un producto" ) if float( arguments["normal_price"].strip() ) <= 0: errors["normal_price"].append( u"Es necesario entrar un valor positivo para el precio" ) if float( arguments["discounted_price"].strip() ) <= 0: errors["discounted_price"].append( u"Es necesario entrar un valor positivo para el precio" ) if int( arguments["units"].strip() ) <= 0: errors["units"].append( u"Es necesario entrar un valor positivo para las unidades" ) if len( errors ) == 0: helpers.get( db.Inventory ).to_record( arguments, inventory ) db.session().add( inventory ) db.session().commit() arguments = helpers.get( db.Inventory ).to_dictionary( inventory ) else: arguments = helpers.get( db.Inventory ).to_dictionary( inventory ) arguments["product"] = helpers.get( db.Product ).to_dictionary( inventory.product ) return arguments
def test_get(self): obj = { 'with_value': 'ok', 'with_true': True, 'with_false': False, 'with_obj': { 'level_2': 'ok' }, 'with_empty_obj': {}, 'with_none': None } self.assertEqual(helpers.get(obj, 'with_value'), 'ok') self.assertEqual(helpers.get(obj, 'with_true'), True) self.assertEqual(helpers.get(obj, 'with_false'), False) self.assertEqual(helpers.get(obj, 'with_obj'), {'level_2': 'ok'}) self.assertEqual(helpers.get(obj, 'with_empty_obj'), {}) self.assertEqual(helpers.get(obj, 'with_none'), None) self.assertEqual(helpers.get(obj, 'with_none', {}), {}) self.assertEqual(helpers.get(obj, 'not_in_obj'), None)
def pager(self, table, filter_field, filter, sort_by, descending, offset, limit, prefilter=[]): table = db.__dict__[table] def build_query(query): for field, value in prefilter: query = query.filter(getattr(table, field) == value) if filter != "": if type(filter_field) != list: for f in filter.split(): query = query.filter( getattr(table, filter_field).like("%%%s%%" % filter)) else: t = db.__dict__[filter_field[0]] for f in filter.split(): query = query.filter( getattr(t, filter_field[1]).like("%%%s%%" % filter)) if not descending: query = query.order_by( db.func.lower(getattr(table, sort_by)).asc()) else: query = query.order_by( db.func.lower(getattr(table, sort_by)).desc()) query = query.distinct() return query result = build_query(db.session().query(table)) count = build_query(db.session().query( db.func.count(db.distinct(table.id)))).one()[0] result = result.limit(limit).offset(offset).all() result = [helpers.get(table).to_dictionary(p) for p in result] return result, count
def test_data_download(driver, django_db_blocker): login(driver) driver.find_element_by_id('personal_info_menu').click() WebDriverWait(driver, TIME_WAIT).until( EC.presence_of_element_located((By.ID, "id_my_data_download"))) link = driver.find_element_by_id('id_my_data_download').get_attribute( 'href') cookies_dict = get_cookies(driver) headers = {'X-CSRFToken': cookies_dict.get('csrftoken')} data = get(link, cookies=cookies_dict, headers=headers) assert data.ok assert data.content assert data.headers['content-type'] == 'application/zip' logout(driver)
def create_playlist(service, name: str): print('Creating a playlist with name "{}".'.format(name)) try: result = service.playlists().insert(body={ 'snippet': { 'title': name }, 'status': { 'privacyStatus': 'private' } }, part='snippet,status').execute() print('Playlist created.') return get(result, 'id') except HttpError as e: print(e) return None
def recommendations( self, method, args = None ): data = [] if method == "new": data = db.session().query( db.Product )\ .order_by( db.Product.date )\ .limit( 5 )\ .all() elif method == "random": data = db.session().query( db.Product )\ .limit( 10 )\ .all() random.shuffle( data ) data = data[:5] elif method == "recommended": data = score.score( args["user_id"], 5 ) result = [] for p in data: d = helpers.get( db.Product ).to_dictionary( p ) result.append( d ) return result
def recommendations(self, method, args=None): data = [] if method == "new": data = db.session().query( db.Product )\ .order_by( db.Product.date )\ .limit( 5 )\ .all() elif method == "random": data = db.session().query( db.Product )\ .limit( 10 )\ .all() random.shuffle(data) data = data[:5] elif method == "recommended": data = score.score(args["user_id"], 5) result = [] for p in data: d = helpers.get(db.Product).to_dictionary(p) result.append(d) return result
def github(username): 'Get my GitHub repositories.' url = "https://api.github.com/users/%s/repos" % username while True: r = requests.get(url + '?' + GITHUB_QUERYSTRING) for repository in json.loads(r.text): d = json.load(get(repository['url'] + '/readme?' + GITHUB_QUERYSTRING)) if d.get('message') == 'Not Found': description = '' else: description = base64.decodestring(d.get('content', '')) yield { 'title': repository['name'], 'url': repository['url'].replace('https://api.github.com/repos/', 'https://github.com/'), 'date': datetime.datetime.strptime(repository['pushed_at'], '%Y-%m-%dT%H:%M:%SZ') if repository['pushed_at'] else None, 'description': description, } if repository['url'] == 'https://api.github.com/repos/tlevine/zoetrope' or 'link' not in r.headers: break url = r.headers['link'].split(';')[0][1:-1]
def product_pager(self, filter_field, filter, sort_by, descending, page, limit, genre="", prefilter=[]): def build_query(query): if genre != "": query = query.join(db.Product.genres) query = query.filter("product_genre_1.genre_id = %s" % genre) for field, value in prefilter: query = query.filter(getattr(db.Product, field) == value) if filter != "": for f in filter.split(): query = query.filter( getattr(db.Product, filter_field).like("%%%s%%" % filter)) if not descending: query = query.order_by( db.func.lower(getattr(db.Product, sort_by)).asc()) else: query = query.order_by( db.func.lower(getattr(db.Product, sort_by)).desc()) query = query.distinct() return query result = build_query(db.session().query(db.Product)) count = build_query(db.session().query( db.func.count(db.distinct(db.Product.id)))).one()[0] result = result.limit(limit).offset(page * limit).all() result = [helpers.get(db.Product).to_dictionary(p) for p in result] return result, int(math.ceil(1.0 * count / limit))
def recent_html(): ''' Download this month's HTML table. Archive daily. (Not spreadsheet because HTML is easier) ''' return get('http://geo.usace.army.mil/egis/f?p=340:2:0::NO')
def get_attribution_bodies(): 'Get the bodies of the attribution links.' for attribution in get_attribution_links(): yield (attribution, helpers.get(attribution, cachedir = 'attribution'))
def product_info(self, id): product = db.Product.by_id(id) result = helpers.get(db.Product).to_dictionary(product) return result
def orders( self ): orders = db.session().query( db.Order ).all() orders = [helpers.get( db.Order ).to_dictionary( order ) for order in orders] print orders return orders
def complete_payment_paypal( self, user_id, address, fields, cart ): user = db.User.by_id( user_id ) order = db.Order() order.date = datetime.datetime.now() order.user = user order.payment_type = db.Order.PAYMENT_PAYPAL order.status = db.Order.ORDER_PENDING order.name = address["SHIPTONAME"] order.country = address["SHIPTOCOUNTRYNAME"] order.state = address["SHIPTOSTATE"] order.city = address["SHIPTOCITY"] order.street = address["SHIPTOSTREET"] order.postal_code = address["SHIPTOZIP"] order.total_amount = float( fields["PAYMENTINFO_0_AMT"] ) order.payment_info = """TRANSACTIONID: %(PAYMENTINFO_0_TRANSACTIONID)s TOKEN: %(TOKEN)s TIMESTAMP: %(TIMESTAMP)s CORRELATIONID: %(CORRELATIONID)s""".encode( "utf-8" ) % fields order.delivery_method = cart["delivery_method"] order.delivery_cost = cart["delivery_cost"] for i in cart["items"]: product = db.Product.by_id( int( i["id"] ) ) inventory = db.Inventory() inventory.date = datetime.datetime.now() inventory.units = - float( i["quantity"] ) inventory.normal_price = product.inventory[-1].normal_price inventory.discounted_price = product.inventory[-1].discounted_price inventory.product = product db.session().add( inventory ) detail = db.OrderDetail() detail.product = product detail.quantity = int( i["quantity"] ) detail.cost = float( i["price"] ) order.detail.append( detail ) statistics = db.Statistics() statistics.product = product statistics.user = user statistics.sold = detail.quantity statistics.timestamp = time.time() db.session().add( statistics ) db.session().add( order ) db.session().commit() admins = db.session().query( db.User )\ .filter( db.CatalogEntry.value == "Administradores" )\ .all() data = helpers.get( db.Order ).to_dictionary( order ) for a in admins: t = { "recipient": a.name } t.update( data ) send_mail( "Nueva orden", "*****@*****.**", [a.email], "mail/new_order.txt", t ) t = { "recipient": user.name } t.update( data ) send_mail( "Su orden", "*****@*****.**", [user.email], "mail/order_confirmation.txt", t ) return data
def order( self, order_id ): order = db.Order.by_id( order_id ) return helpers.get( db.Order ).to_dictionary( order )
def complete_payment_paypal(self, user_id, address, fields, cart): user = db.User.by_id(user_id) order = db.Order() order.date = datetime.datetime.now() order.user = user order.payment_type = db.Order.PAYMENT_PAYPAL order.status = db.Order.ORDER_PENDING order.name = address["SHIPTONAME"] order.country = address["SHIPTOCOUNTRYNAME"] order.state = address["SHIPTOSTATE"] order.city = address["SHIPTOCITY"] order.street = address["SHIPTOSTREET"] order.postal_code = address["SHIPTOZIP"] order.total_amount = float(fields["PAYMENTINFO_0_AMT"]) order.payment_info = """TRANSACTIONID: %(PAYMENTINFO_0_TRANSACTIONID)s TOKEN: %(TOKEN)s TIMESTAMP: %(TIMESTAMP)s CORRELATIONID: %(CORRELATIONID)s""".encode("utf-8") % fields order.delivery_method = cart["delivery_method"] order.delivery_cost = cart["delivery_cost"] for i in cart["items"]: product = db.Product.by_id(int(i["id"])) inventory = db.Inventory() inventory.date = datetime.datetime.now() inventory.units = -float(i["quantity"]) inventory.normal_price = product.inventory[-1].normal_price inventory.discounted_price = product.inventory[-1].discounted_price inventory.product = product db.session().add(inventory) detail = db.OrderDetail() detail.product = product detail.quantity = int(i["quantity"]) detail.cost = float(i["price"]) order.detail.append(detail) statistics = db.Statistics() statistics.product = product statistics.user = user statistics.sold = detail.quantity statistics.timestamp = time.time() db.session().add(statistics) db.session().add(order) db.session().commit() admins = db.session().query( db.User )\ .filter( db.CatalogEntry.value == "Administradores" )\ .all() data = helpers.get(db.Order).to_dictionary(order) for a in admins: t = {"recipient": a.name} t.update(data) send_mail("Nueva orden", "*****@*****.**", [a.email], "mail/new_order.txt", t) t = {"recipient": user.name} t.update(data) send_mail("Su orden", "*****@*****.**", [user.email], "mail/order_confirmation.txt", t) return data
def reviews(self, product_id): record = db.Product.by_id(product_id) result = [ helpers.get(db.Review).to_dictionary(r) for r in record.reviews ] return result
def import_subscriptions( service, channels: list, import_state_filename: str = 'imported_subscriptions.json'): import_state = read_json_file(import_state_filename) or [] if len(import_state) > 0: channels = [ channel for channel in channels if get(channel, 'snippet.resourceId.channelId') not in import_state ] if len(channels) == 0: print('No channels left to subscribe to.') return print('Subscribing to {} channels.'.format(len(channels))) for index, channel in enumerate(channels): print('#{} '.format(index), end='') channel_id = get(channel, 'snippet.resourceId.channelId') channel_name = get(channel, 'snippet.title') if not channel_id: print('wrong channel format.') continue print('ID: {}, name: "{}"'.format(channel_id, channel_name), end='') try: service.subscriptions().insert(body={ 'snippet': { 'resourceId': { 'channelId': channel_id } } }, part='snippet').execute() import_state.append(channel_id) print(' subscribed.') except HttpError as e: print('.') if get(e, 'resp.status') != '400' or not e.content: print(e) import_state.append(channel_id) continue parsed_content = safe_json_parse(e.content) if not has(parsed_content, 'error.errors[0]'): print(e) import_state.append(channel_id) continue any_error_is_limit = any( e['message'] == 'Too many recent subscriptions. Please try again in a few hours.' for e in get(parsed_content, 'error.errors')) if not any_error_is_limit: print(e) import_state.append(channel_id) continue print( 'Reached subscription limit, run this script again in about 4 hours (it will remember already ' 'subscribed channels).') break if len(import_state) > 0: if not write_json_file(import_state_filename, import_state): print('Something went wrong when trying to save state file.')
def order(self, order_id): order = db.Order.by_id(order_id) return helpers.get(db.Order).to_dictionary(order)
def pull(db, course_id): params = {"include[]": "syllabus_body"} response = helpers.get(COURSE_PATH.format(course_id), params=params) response["canvas_id"] = response["id"] return Course(response["id"], response["name"], response["course_code"], response["workflow_state"], response["syllabus_body"])
def article(title): 'Download the English Wikipedia article of a given title.' urlbase = 'http://en.wikipedia.org/w/api.php?format=json&action=query&prop=revisions&rvprop=content&' params = urlencode({'titles': title.encode('utf-8')}) return helpers.get(urlbase + params, cachedir = 'wikipedia')
def test_download_privacy_public_database(driver, django_db_blocker): """Test that public database is a zip archive, and it only contains public info.""" create_toy_data(django_db_blocker=django_db_blocker, driver=driver, n_users=30, n_videos=100, n_ratings=30) open_tournesol(driver) WebDriverWait(driver, TIME_WAIT).until( EC.presence_of_element_located((By.ID, "id_public_database_download"))) link = driver.find_element_by_id('id_public_database_download').get_attribute('href') data = get(link) assert data.ok assert data.content assert data.headers['content-type'] == 'application/zip' # with open('data.zip', 'wb') as f: # f.write(data.content) # reading dataframes zip_file = BytesIO(data.content) dfs = {} with zipfile.ZipFile(zip_file, 'r') as zf: for fileinfo in zf.infolist(): content = zf.read(fileinfo).decode('ascii') df = pd.read_csv(StringIO(content)) dfs[fileinfo.filename] = df # print(data.content) assert set(dfs.keys()) == set( ['comparison_database.csv', 'contributors_public.csv', 'all_video_scores.csv'] ), f"Wrong files in archive: {dfs.keys()}" # checking comparisons privacy df = dfs['comparison_database.csv'] for _, row in df.iterrows(): username = row['user__user__username'] vid1 = row['video_1__video_id'] vid2 = row['video_2__video_id'] # both videos must be rated publicly! with django_db_blocker.unblock(): for vid in [vid1, vid2]: qs = Video.objects.filter(video_id=vid) assert qs.count() == 1, (qs, qs.count()) up = UserPreferences.objects.get(user__username=username) qs = VideoRatingPrivacy._annotate_privacy(qs, prefix="videoratingprivacy", field_user=up) assert qs.count() == 1, (qs, qs.count()) assert qs.get()._is_public, qs.values() print("Check for", username, vid1, vid2, "successful") # checking user information privacy df = dfs['contributors_public.csv'] for _, row in df.iterrows(): username = row['user__username'] # checking certification status with django_db_blocker.unblock(): qs = UserInformation.objects.filter(user__username=username) assert qs.count() == 1, qs qs = UserInformation._annotate_is_certified(qs) assert qs.count() == 1, qs ui = qs.get() assert ui._is_certified == row['_is_certified'], (dict(row), ui) # checking show_my_profile if not ui.show_my_profile: for f in UserInformation.PROFILE_FIELDS: assert pd.isna(row[f]), row[f] # checking online presence if not ui.show_online_presence or not ui.show_my_profile: for f in UserInformation.ONLINE_FIELDS: assert pd.isna(row[f]), row[f] # checking that protected fields are not included for f in UserInformation.PROTECTED_FIELDS: assert f not in row, (f, row) print("Check for", username, "successful")
def test_user_page(driver, django_db_blocker): img = np.random.rand(300, 300, 3) test_img = os.path.join(BASE_DIR, 'media', 'profiles', 'test_image.png') plt.imshow(img) plt.savefig(test_img, bbox_inches='tight') login(driver) with django_db_blocker.unblock(): # creating a user with data u = DjangoUser.objects.create_user(username=f"u{str(uuid1())}") ui = UserInformation.objects.create(user=u) accepted_domain = f"@{random_alphanumeric()}.com" EmailDomain.objects.create(domain=accepted_domain, status=EmailDomain.STATUS_ACCEPTED) VerifiableEmail.objects.create( email=f"{random_alphanumeric()}{accepted_domain}", user=ui, is_verified=True) ui.first_name = "FN" ui.last_name = "LN" ui.title = "T" ui.bio = "B" ui.website = "http://aba_w.xyz/" ui.orcid = "http://orcid.org/aaao" ui.twitter = "http://twitter.com/aaat" ui.linkedin = "http://linkedin.com/aaal" ui.researchgate = "http://researchgate.net/aaar" ui.youtube = "http://youtube.com/aaay" ui.google_scholar = "http://scholar.google.com/aaas" ui.avatar.name = 'profiles/test_image.png' # test_img ui.show_online_presence = True e = Expertise(name="aba") ekw = ExpertiseKeyword(name="zzz") deg = Degree(level="aa", institution="zzz", domain="magic") deg.save() ekw.save() e.save() ui.expertises.set([e]) ui.expertise_keywords.set([ekw]) ui.degrees.set([deg]) ui.save() driver.get(web_url + f'/user/{u.username}/') WebDriverWait(driver, TIME_WAIT).until( EC.presence_of_element_located( (By.ID, "id_first_last_name_certified_user"))) # checking that data is valid assert driver.find_element_by_id( 'id_first_last_name_certified_user').text == 'FN LN' assert driver.find_element_by_id('id_title_user').text == 'T' assert driver.find_element_by_id('id_bio_user').text == 'B' assert driver.find_element_by_id('id_website_user').get_attribute( 'href') == ui.website assert driver.find_element_by_id('id_linkedin_user').get_attribute( 'href') == ui.linkedin assert driver.find_element_by_id('id_google_scholar_user').get_attribute( 'href') == ui.google_scholar assert driver.find_element_by_id('id_twitter_user').get_attribute( 'href') == ui.twitter assert driver.find_element_by_id('id_orcid_user').get_attribute( 'href') == ui.orcid assert driver.find_element_by_id('id_researchgate_user')\ .get_attribute('href') == ui.researchgate assert driver.find_element_by_id('id_youtube_user').get_attribute( 'href') == ui.youtube img_src = driver.find_element_by_id('id_profile_user').get_attribute('src') assert img_src.startswith('http') cookies_dict = get_cookies(driver) headers = {'X-CSRFToken': cookies_dict.get('csrftoken')} r = get(img_src, cookies=cookies_dict, headers=headers) assert r.ok with open(test_img, 'rb') as f: assert r.content == f.read() exps = driver.find_elements_by_class_name('class_expertise_user') assert len(exps) == 1, exps assert exps[0].text == 'aba' exp_kws = driver.find_elements_by_class_name( 'class_expertise_keyword_user') assert len(exp_kws) == 1, exp_kws assert exp_kws[0].text == 'zzz' degs = driver.find_elements_by_class_name('class_degree_user') assert len(degs) == 1, degs assert degs[0].text == 'aa, magic, zzz' with django_db_blocker.unblock(): u.delete() logout(driver) os.unlink(test_img)
def product_info( self, id ): product = db.Product.by_id( id ) result = helpers.get( db.Product ).to_dictionary( product ) return result