def POST(self, name): """Add titles to an existing work""" logger.debug("Data: %s" % (web.data())) data = json.loads(web.data()) title = data.get('title') work_id = data.get('UUID') or data.get('uuid') try: titles = strtolist(title) assert titles and work_id except AssertionError as error: logger.debug(error) raise Error(BADPARAMS, msg="You must provide a (work) UUID" + " and at least a title") try: work = Work(work_id, titles=titles) assert work.exists() except: raise Error(BADPARAMS, msg="Unknown work '%s'" % (work_id)) work.save() work.load_titles() work.load_identifiers() return [work.__dict__]
def new_work(): """ Add a new publication """ form = WorkForm(request.form) if request.method == 'POST' and form.validate(): # save the work work = Work() save_changes(work, form, new=True) flash('Work published successfully.') return redirect('/') return render_template('new_work.html', form=form)
def import_work(self, works_page, publishers_page): """Imports a Work: gets the work name and heading; gets the order of impressions; walks through the work directory and creates an impression object for each PDF file.""" work = Work() work.code = self._work_code if work.code.find('Opus') >= 0: work.has_opus = True try: opus_n = int(work.code.split()[1].strip()) except ValueError: opus_n = 66 work.is_posthumous = (opus_n >= settings.POSTHUMOUS_WORKS_WITH_OPUS) work.sort_order = opus_n else: work.has_opus = False work.is_posthumous = (work.code in settings.POSTHUMOUS_WORKS_WITHOUT_OPUS) work.sort_order = settings.ALL_WORKS_WITHOUT_OPUS.index( work.code) + 74 self.logger.debug('Work sort order: {}'.format(work.sort_order)) # Heading filename. try: heading_filename = glob.glob( os.path.join(self._work_path, '*.heading*.pdf'))[0] except IndexError: self.logger.error( 'No heading file found; skipping work {0}'.format(work.code)) return work.heading = self._import_heading(heading_filename) work.title = work.heading.split(' ')[0].strip() work.slug = safe_slugify(work.title, Work) # Create a Work PDF Document. document = Document(title=work.title) with open(heading_filename, 'rb') as fh: pdf_file = File(fh) document.file.save(os.path.basename(heading_filename), pdf_file) document.tags.add('work') work.pdf = document # gets the order of impressions self._order_of_impressions = self._import_order_of_impressions() self.logger.debug(self._order_of_impressions) works_page.add_child(instance=work) self._import_impressions(work, publishers_page)
def get_work_entry(item): work = Work() b_year = item.get('DateBeginYear') e_year = item.get('DateEndYear') b_year = str(b_year) if b_year else '?' e_year = str(e_year) if e_year else '?' description = item.get('Description') image_url = item.get('PrimaryImage').get('Raw') work.name = item.get('Title')[:128] work.date = '{}-{}'.format(b_year, e_year) work.description = description[:1024] if description else None work.image_url = image_url return work
def merge_work(dbsession, person, title, source): work = dbsession.query(Work).filter(and_(Work.person_id == person.id, Work.title == title)).first() if not work: work = Work(person=person, title=title, status='unconfirmed') dbsession.add(work) dbsession.commit() else: work_source = dbsession.query(WorkSource).join(Source).filter(and_(WorkSource.work == work, Source.url == source['url'])).first() if not work_source: db_source = dbsession.query(Source).filter(Source.url == source['url']).first() if not db_source: db_source = Source(label=source['label'], url=source['url']) dbsession.add(db_source) work_source = WorkSource(work=work, source=db_source, timestamp=source['timestamp']) dbsession.add(work_source) else: work_source.timestamp = source['timestamp'] dbsession.add(work_source) dbsession.commit() return work
def work(request, work_hash=None): status = 200 work = None if work_hash: id = base62.to_decimal(work_hash) work = Work.get_by_id(id - BASE_ID) if not work: raise Http404 # create work if request.method == "POST": w = Work() w.put() data = json.dumps({'id': base62.from_decimal(BASE_ID + w.unique_id())}) # update elif request.method == "PUT": if work: work.json = request.raw_post_data work.put() data = request.raw_post_data pass # remove elif request.method == "DELETE": work.delete() status = 204 data = '' pass # get else: if not work: data = '{"error": "does not exist"}' status = 404 else: data = work.json pass return HttpResponse(data, status=status, mimetype='application/json')
def POST(self, name): """Add identifiers to an existing work""" logger.debug("Data: %s" % (web.data())) data = json.loads(web.data()) uri = data.get('URI') or data.get('uri') canonical = data.get('canonical') in (True, "true", "True") work_id = data.get('UUID') or data.get('uuid') try: assert uri and work_id except AssertionError as error: logger.debug(error) raise Error(BADPARAMS, msg="You must provide a (work) UUID" + " and a URI") try: scheme, value = Identifier.split_uri(uri) uris = [{'URI': uri, 'canonical': canonical}] except: raise Error(BADPARAMS, msg="Invalid URI '%s'" % (uri)) try: assert UriScheme(scheme).exists() except: raise Error(BADPARAMS, msg="Unknown URI scheme '%s'" % (scheme)) try: work = Work(work_id, uris=uris) assert work.exists() except: raise Error(BADPARAMS, msg="Unknown work '%s'" % (work_id)) work.save() work.load_identifiers() return [work.__dict__]
def handler(request): """ wechat backend handler :param request: :return: """ if request.method == "GET": # wechat server signature signature = request.GET.get('signature', '') timestamp = request.GET.get('timestamp', '') nonce = request.GET.get('nonce', '') echostr = request.GET.get('echostr', '') wechat = WechatBasic(token=appToken) if wechat.check_signature(signature=signature, timestamp=timestamp, nonce=nonce): return HttpResponse(echostr) else: return HttpResponse('INVALID') # text from user body_text = request.body wechat = WechatBasic(token=appToken) wechat.parse_data(body_text) # get wechat message message = wechat.get_message() # check message type if message.type != 'text': return HttpResponse(wechat.response_text(u'说人话')) content = message.content # if search if content.startswith(u"搜:"): content = get_magnet_from_keyword(content[2:]) # check if magnet if content.startswith("magnet:?xt=urn:btih:"): if Work.objects.filter(magnet=content): return HttpResponse(wechat.response_text(u'已经添加过这个链接了')) work = Work(magnet=content, operate=Operator.DOWNLOAD) work.save() return HttpResponse( wechat.response_text(u'链接已添加!回复【%s】显示详情。' % keyword_check)) # user check if content == keyword_check: works = Work.objects.filter(is_removed=False).order_by('-create_time') work_list = u'任务详情:\n\n' for index, work in enumerate(works): name = work.name if work.name else u'名字解析中' speed = work.down_speed progress = work.progress operate = work.get_operate_name() work_list += "%d. %s [%s] [%s] [%s]\n" % (index + 1, name, speed, progress, operate) work_list += u'\n回复【%s】下载,【%s】暂停,【%s】删除,后跟相应数字' % ( keyword_download, keyword_pause, keyword_remove) return HttpResponse(wechat.response_text(work_list)) return HttpResponse(wechat.response_text(u'待开发'))
def scrape_works(): PERSON_ID = [ '18041923', '18046981', '18048665', '18042915', '18042233', '18041501', '18042405', '18042839' ] role_id = "35236565" works = [] museum = Venue.query.filter_by( name="Cooper Hewitt, Smithsonian Design Museum").first() for pid in PERSON_ID: pid_role_id = pid + ":" + role_id print(pid_role_id) parameters = { "access_token": COOPER_KEY, 'has_image': 1, 'person_role_id': pid_role_id, } r = requests.get( 'https://api.collection.cooperhewitt.org/rest/?method=cooperhewitt.people.getObjects', params=parameters) for work in r.json().get("objects"): title = work.get("title") e = title.find(",") art_type = title[:e] title = title[e + 2:] if len(work.get("images")) > 0: try: url = work.get("images")[0].get("x").get("url") except AttributeError: url = work.get("images")[0].get("b").get("url") medium = work.get("medium") date = work.get("date") artist = "" for p in work.get("participants"): if p.get("role_name") == "Artist": artist = p.get("person_name") artist = db.session().query(Artist).filter_by( name=artist).first() # Add art type if its not in table. query = ArtType.query.filter_by(name=art_type).first() if query: art_type = query elif len(art_type) < 128: art_type = ArtType(name=art_type) db.session.add(art_type) else: art_type = None if medium: query = Medium.query.filter_by(name=medium).first() if query: medium = query elif len(medium) < 128: medium = Medium(name=medium) db.session.add(medium) else: medium = None if (medium and art_type and url and len(title) < 128): # print(title) # print(artist) # print(art_type) # print(medium) # print(date) # print(museum) # print(url) w = Work(name=title, artist=artist, art_type=art_type, medium=medium, date=date, venue=museum, image_url=url)
def result_to_work(r): work = Work(r["work_id"], r["work_type"] if "work_type" in r else None, r["titles"] if "titles" in r else []) return work
def POST(self, name): """Create a work""" logger.debug("Data: %s" % (web.data())) data = json.loads(web.data()) wtype = data.get('type') title = data.get('title') uri = data.get('URI') or data.get('uri') parent = data.get('parent') child = data.get('child') try: titles = strtolist(title) uris = strtolist(uri) assert wtype and titles and uris except AssertionError as error: logger.debug(error) raise Error(BADPARAMS, msg="You must provide a (work) type" + ", a title, and at least one URI") try: assert WorkType(wtype).exists() except: raise Error(BADPARAMS, msg="Unknown work type '%s'" % (wtype)) for i in uris: # attempt to get scheme from URI try: ident = i['URI'] or i['uri'] scheme, value = Identifier.split_uri(ident) try: i['canonical'] = i['canonical'] in (True, "true", "True") except: i['canonical'] = False except: raise Error(BADPARAMS, msg="Invalid URI '%s'" % (ident)) # check whether the URI scheme exists in the database try: assert UriScheme(scheme).exists() except: raise Error(BADPARAMS, msg="Unknown URI scheme '%s'" % (scheme)) uuid = Work.generate_uuid() work = Work(uuid, wtype, titles, uris) if parent: parents = strtolist(parent) for p in parents: try: assert Work.is_uuid(p) assert Work.uuid_exists(p) except AssertionError as error: logger.debug(error) raise Error(BADPARAMS, msg="Invalid parent UUID provided.") work.set_parents(parents) if child: children = strtolist(child) for c in children: try: assert Work.is_uuid(c) assert Work.uuid_exists(c) except AssertionError as error: logger.debug(error) raise Error(BADPARAMS, msg="Invalid child UUID provided.") work.set_children(children) work.save() return [work.__dict__]
def get_data(x): BASE_URL = "http://kokoelmat.fng.fi/api/v2" params = { "apikey": FINNISH_KEY, "format": "dc-json", "q": "artist-search:1970" } museum = Venue.query.filter_by(name="Finnish National Gallery").first() r = requests.get(BASE_URL, params) for item in r.json().get("descriptionSet"): # if item.get("type")[0].get("type") == "artist": # print("SHEESH") name = item.get("title")[0].get("title") dates = item.get("date") if len(dates[0]) > 1 and dates[0].get("type") == "birth": birth = dates[0].get("value") birthplace = dates[0].get("loc") else: birth = dates[0].get("birth") birthplace = "unknown" if len(dates) > 1: if len(dates[1]) > 1 and dates[1].get("type") == "death": death = dates[1].get("value") deathplace = dates[1].get("loc") else: death = dates[1].get("death") deathplace = "unknown" else: death = None deathplace = "N/A" culture = item.get("relation") culture = culture[len(culture) - 1].get("group") # print(name) # print(birth) # print(death) # print(birthplace) # print(deathplace) # print(culture) if "-" in birth: birth = birth[:birth.find("-")] if death and "-" in death: death = death[:death.find("-")] artist = Artist(name=name, birth=birth, death=death, birthplace=birthplace, deathplace=deathplace, culture=culture, image_url="") if (x): db.session.add(artist) # attempt to add in everything else thats not the artist (works, medium, art_type) else: for work in item.get("relation"): time.sleep(1) params["q"] = work.get("id") r = requests.get(BASE_URL, params) if r and r.json() and r.json().get("descriptionSet"): result = r.json().get("descriptionSet")[0] if result.get("title") and result.get("title")[0]: title = next(iter(result.get("title")[0].values())) # art_type:: art_type = None media = None for d in result.get("type"): if d.get("artwork-class"): art_type = d.get("artwork-class") media = d.get("artwork-class") ty = ArtType.query.filter_by(name=art_type).first() if not ty: art_type = ArtType(name=art_type) db.session.add(art_type) else: art_type = ty me = Medium.query.filter_by(name=media).first() if not me: media = Medium(name=media) db.session.add(media) else: media = me date = None if result.get("date"): for dic in result.get("date"): if dic.get("creation"): date = dic.get("creation") if date and (" " in date or "-" in date): date = date[:4] try: date = int(date) except: date = None url = "" if result.get("relation") and result.get("relation")[ 0] and result.get("relation")[0].get("image"): url = "http://kokoelmat.fng.fi/app?action=image&profile=topicartworkbignew&iid=" + result.get( "relation")[0].get("image") if title and title != "Access frequency exceeded for user." and date and art_type and media and url: work = Work(name=title, artist=artist, art_type=art_type, medium=media, date=date, venue=museum, image_url=url) db.session.add(work) print(title, artist, art_type, media, date, museum, url) db.session.commit()