def GET(self, name): """List a work if UUID provided otherwise list all works""" logger.debug("Query: %s" % (web.input())) work_id = web.input().get('uuid') or web.input().get('UUID') if work_id: results = Work.get_from_work_id(work_id) sort = "" else: filters = web.input().get('filter') sort = web.input().get('sort') order = web.input().get('order', 'asc') clause, params = build_parms(filters) try: if sort: assert sort in ["title"] assert order in ["asc", "desc"] except: raise Error(BADFILTERS, msg="Unknown sort '%s' '%s'" % (sort, order)) results = Work.get_all(clause, params) if not results: raise Error(NORESULT) data = results_to_works(results) if sort: reverse = order == "desc" # we sort by each work's (first) title, ignoring special chars return sorted( data, key=lambda x: re.sub('[^A-Za-z0-9]+', '', x[sort][0]), reverse=reverse) return data
def POST(self, name): """Add titles to an existing work""" logger.debug("Data: %s" % (web.data())) data = json.loads(web.data()) title = data.get('title') work_id = data.get('UUID') or data.get('uuid') try: titles = strtolist(title) assert titles and work_id except AssertionError as error: logger.debug(error) raise Error(BADPARAMS, msg="You must provide a (work) UUID" + " and at least a title") try: work = Work(work_id, titles=titles) assert work.exists() except: raise Error(BADPARAMS, msg="Unknown work '%s'" % (work_id)) work.save() work.load_titles() work.load_identifiers() return [work.__dict__]
def work(request, work_hash=None): status = 200 work = None if work_hash: id = base62.to_decimal(work_hash) work = Work.get_by_id(id - BASE_ID) if not work: raise Http404 # create work if request.method == "POST": w = Work() w.put(); data = json.dumps({'id': base62.from_decimal(BASE_ID + w.unique_id())}) # update elif request.method == "PUT": if work: work.json = request.raw_post_data work.put() data = request.raw_post_data pass # remove elif request.method == "DELETE": work.delete(); status = 204 data = '' pass # get else: if not work: data = '{"error": "does not exist"}' status = 404 else: data = work.json pass return HttpResponse(data, status=status, mimetype='application/json')
def new_work(): """ Add a new publication """ form = WorkForm(request.form) if request.method == 'POST' and form.validate(): # save the work work = Work() save_changes(work, form, new=True) flash('Work published successfully.') return redirect('/') return render_template('new_work.html', form=form)
def merge_work(dbsession, person, title, source): work = dbsession.query(Work).filter(and_(Work.person_id == person.id, Work.title == title)).first() if not work: work = Work(person=person, title=title, status='unconfirmed') dbsession.add(work) dbsession.commit() else: work_source = dbsession.query(WorkSource).join(Source).filter(and_(WorkSource.work == work, Source.url == source['url'])).first() if not work_source: db_source = dbsession.query(Source).filter(Source.url == source['url']).first() if not db_source: db_source = Source(label=source['label'], url=source['url']) dbsession.add(db_source) work_source = WorkSource(work=work, source=db_source, timestamp=source['timestamp']) dbsession.add(work_source) else: work_source.timestamp = source['timestamp'] dbsession.add(work_source) dbsession.commit() return work
def get_work_entry(item): work = Work() b_year = item.get('DateBeginYear') e_year = item.get('DateEndYear') b_year = str(b_year) if b_year else '?' e_year = str(e_year) if e_year else '?' description = item.get('Description') image_url = item.get('PrimaryImage').get('Raw') work.name = item.get('Title')[:128] work.date = '{}-{}'.format(b_year, e_year) work.description = description[:1024] if description else None work.image_url = image_url return work
def POST(self, name): """Add identifiers to an existing work""" logger.debug("Data: %s" % (web.data())) data = json.loads(web.data()) uri = data.get('URI') or data.get('uri') canonical = data.get('canonical') in (True, "true", "True") work_id = data.get('UUID') or data.get('uuid') try: assert uri and work_id except AssertionError as error: logger.debug(error) raise Error(BADPARAMS, msg="You must provide a (work) UUID" + " and a URI") try: scheme, value = Identifier.split_uri(uri) uris = [{'URI': uri, 'canonical': canonical}] except: raise Error(BADPARAMS, msg="Invalid URI '%s'" % (uri)) try: assert UriScheme(scheme).exists() except: raise Error(BADPARAMS, msg="Unknown URI scheme '%s'" % (scheme)) try: work = Work(work_id, uris=uris) assert work.exists() except: raise Error(BADPARAMS, msg="Unknown work '%s'" % (work_id)) work.save() work.load_identifiers() return [work.__dict__]
def work(request, work_hash=None): status = 200 work = None if work_hash: id = base62.to_decimal(work_hash) work = Work.get_by_id(id - BASE_ID) if not work: raise Http404 # create work if request.method == "POST": w = Work() w.put() data = json.dumps({'id': base62.from_decimal(BASE_ID + w.unique_id())}) # update elif request.method == "PUT": if work: work.json = request.raw_post_data work.put() data = request.raw_post_data pass # remove elif request.method == "DELETE": work.delete() status = 204 data = '' pass # get else: if not work: data = '{"error": "does not exist"}' status = 404 else: data = work.json pass return HttpResponse(data, status=status, mimetype='application/json')
async def put(id: int, work: WorkBaseIn): await Work.filter(id=id).update(**work.dict(exclude_unset=True)) return await WorkBaseOut.from_queryset_single(Work.get(id=id))
async def get(id: int): return await WorkBaseOut.from_queryset_single(Work.get(id=id))
async def read(): return await WorkBaseOut.from_queryset(Work.all())
def import_work(self, works_page, publishers_page): """Imports a Work: gets the work name and heading; gets the order of impressions; walks through the work directory and creates an impression object for each PDF file.""" work = Work() work.code = self._work_code if work.code.find('Opus') >= 0: work.has_opus = True try: opus_n = int(work.code.split()[1].strip()) except ValueError: opus_n = 66 work.is_posthumous = (opus_n >= settings.POSTHUMOUS_WORKS_WITH_OPUS) work.sort_order = opus_n else: work.has_opus = False work.is_posthumous = ( work.code in settings.POSTHUMOUS_WORKS_WITHOUT_OPUS) work.sort_order = settings.ALL_WORKS_WITHOUT_OPUS.index(work.code) + 74 self.logger.debug('Work sort order: {}'.format(work.sort_order)) # Heading filename. try: heading_filename = glob.glob(os.path.join(self._work_path, '*.heading*.pdf'))[0] except IndexError: self.logger.error('No heading file found; skipping work {0}'.format( work.code)) return work.heading = self._import_heading(heading_filename) work.title = work.heading.split(' ')[0].strip() work.slug = safe_slugify(work.title, Work) # Create a Work PDF Document. document = Document(title=work.title) with open(heading_filename, 'rb') as fh: pdf_file = File(fh) document.file.save(os.path.basename(heading_filename), pdf_file) document.tags.add('work') work.pdf = document # gets the order of impressions self._order_of_impressions = self._import_order_of_impressions() self.logger.debug(self._order_of_impressions) works_page.add_child(instance=work) self._import_impressions(work, publishers_page)
def post(self, message_id): new_text = self.request.get("some_text") work = Work.get_by_id(int(work_id)) work.work_text = new_text work.put()
def edit(user): person = fluid_filter('fluiddb/users/username="******"' % user) if not person: abort(404) form = ResumeForm() person = Person(person[0], user) jobs = fluid_filter('has %s/employer' % user) jobs = [Work(uid, user) for uid in jobs] for job in jobs: form.jobs.append_entry() # join functions list to paragraph job.functions = r"\n".join(job.functions).strip() schools = fluid_filter('has %s/attended' % user) schools = [Education(uid, user) for uid in schools] for school in schools: form.schools.append_entry() if form.validate_on_submit(): denied = False logging.debug('Valid form for %s' % user) password = form.person.password.data if not person.update_from_form(form.person): denied = True for job_form in form.jobs: # it seems that we get 1 or more phantom jobs here... if not job_form.url.data: continue # this seems kinda weird, but I'm not sure of a better way to # find the correct work object yet found_job = False logging.info("working on job: %s" % job_form.url.data) for job in jobs: if job.about == job_form.url.data: if not job.update_from_form(job_form, password): denied = True found_job = True if not found_job: job = Work.create(job_form.url.data, user) if not job.update_from_form(job_form, password): denied = True for school_form in form.schools: # it seems that we get 1 or more phantom schools here... if not school_form.url.data: continue # this seems kinda weird, but I'm not sure of a better way to # find the correct work object yet found_school = False logging.info("working on school: %s" % school_form.url.data) for school in schools: if school.about == school_form.url.data: if not school.update_from_form(school_form, password): denied = True found_school = True if not found_school: school = Education.create(school_form.url.data, user) if not school.update_from_form(school_form, password): denied = True if form.skills.data: OReillySkill.update_from_form(form.skills, user, password) if denied: flash("Permission Denied!", category='error') else: flash("Success!", category='info') # gotta reload the attributes to show changes person.reload_tags() for job in jobs: job.reload_tags() for school in schools: school.reload_tags() skills = fluid_filter('has %s/skill' % user) skills = [OReillySkill(uid) for uid in skills] skill_list = json.dumps([{'id' : skill.uid, 'name' : skill.title} for skill in skills]) return render_template('edit.html', person=person, jobs=jobs, schools=schools, form=form, user=user, skill_list=skill_list)
def result_to_work(r): work = Work(r["work_id"], r["work_type"] if "work_type" in r else None, r["titles"] if "titles" in r else []) return work
def import_work(self, works_page, publishers_page): """Imports a Work: gets the work name and heading; gets the order of impressions; walks through the work directory and creates an impression object for each PDF file.""" work = Work() work.code = self._work_code if work.code.find('Opus') >= 0: work.has_opus = True try: opus_n = int(work.code.split()[1].strip()) except ValueError: opus_n = 66 work.is_posthumous = (opus_n >= settings.POSTHUMOUS_WORKS_WITH_OPUS) work.sort_order = opus_n else: work.has_opus = False work.is_posthumous = (work.code in settings.POSTHUMOUS_WORKS_WITHOUT_OPUS) work.sort_order = settings.ALL_WORKS_WITHOUT_OPUS.index( work.code) + 74 self.logger.debug('Work sort order: {}'.format(work.sort_order)) # Heading filename. try: heading_filename = glob.glob( os.path.join(self._work_path, '*.heading*.pdf'))[0] except IndexError: self.logger.error( 'No heading file found; skipping work {0}'.format(work.code)) return work.heading = self._import_heading(heading_filename) work.title = work.heading.split(' ')[0].strip() work.slug = safe_slugify(work.title, Work) # Create a Work PDF Document. document = Document(title=work.title) with open(heading_filename, 'rb') as fh: pdf_file = File(fh) document.file.save(os.path.basename(heading_filename), pdf_file) document.tags.add('work') work.pdf = document # gets the order of impressions self._order_of_impressions = self._import_order_of_impressions() self.logger.debug(self._order_of_impressions) works_page.add_child(instance=work) self._import_impressions(work, publishers_page)
def POST(self, name): """Create a work""" logger.debug("Data: %s" % (web.data())) data = json.loads(web.data()) wtype = data.get('type') title = data.get('title') uri = data.get('URI') or data.get('uri') parent = data.get('parent') child = data.get('child') try: titles = strtolist(title) uris = strtolist(uri) assert wtype and titles and uris except AssertionError as error: logger.debug(error) raise Error(BADPARAMS, msg="You must provide a (work) type" + ", a title, and at least one URI") try: assert WorkType(wtype).exists() except: raise Error(BADPARAMS, msg="Unknown work type '%s'" % (wtype)) for i in uris: # attempt to get scheme from URI try: ident = i['URI'] or i['uri'] scheme, value = Identifier.split_uri(ident) try: i['canonical'] = i['canonical'] in (True, "true", "True") except: i['canonical'] = False except: raise Error(BADPARAMS, msg="Invalid URI '%s'" % (ident)) # check whether the URI scheme exists in the database try: assert UriScheme(scheme).exists() except: raise Error(BADPARAMS, msg="Unknown URI scheme '%s'" % (scheme)) uuid = Work.generate_uuid() work = Work(uuid, wtype, titles, uris) if parent: parents = strtolist(parent) for p in parents: try: assert Work.is_uuid(p) assert Work.uuid_exists(p) except AssertionError as error: logger.debug(error) raise Error(BADPARAMS, msg="Invalid parent UUID provided.") work.set_parents(parents) if child: children = strtolist(child) for c in children: try: assert Work.is_uuid(c) assert Work.uuid_exists(c) except AssertionError as error: logger.debug(error) raise Error(BADPARAMS, msg="Invalid child UUID provided.") work.set_children(children) work.save() return [work.__dict__]
def parse_minutegram(msheet, csheet, sw, user): work = Work() if msheet.cell(0,7).value == '': e = IntegrityError() e.__cause__="El trabajo no tiene numero" raise e else: work.number = msheet.cell(0, 7).value if column_value_search(1, 'DESCRIPCION TP:', msheet): drow = column_value_search(1, 'DESCRIPCION TP:', msheet) else: e = IntegrityError() e.__cause__="El documento no tiene seccion DESCRIPCION TP" raise e if column_value_search(1, 'JUSTIFICACION: ', msheet): jrow = column_value_search(1, 'JUSTIFICACION: ', msheet) else: e = IntegrityError() e.__cause__="El documento no tiene seccion JUSTIFICACION" raise e if column_value_search(1, 'OBSERVACIONES:', msheet): orow = column_value_search(1, 'OBSERVACIONES:', msheet) else: e = IntegrityError() e.__cause__="El documento no tiene seccion OBSERVACIONES" raise e if column_value_search(1, 'PLAN DE TRABAJO (MINUTOGRAMA):', msheet): wprow = column_value_search(1, 'PLAN DE TRABAJO (MINUTOGRAMA):', msheet) else: e = IntegrityError() e.__cause__="El documento no tiene seccion PLAN DE TRABAJO" raise e if column_value_search(1, 'PLAN DE CONTINGENCIA / ROLLBACK:', msheet): cprow = column_value_search(1, 'PLAN DE CONTINGENCIA / ROLLBACK:', msheet) else: e = IntegrityError() e.__cause__="El documento no tiene seccion PLAN DE CONTINGENCIA / ROLLBACK" raise e #este bloque de codigo asigna los datos extraidos del formulario al work creado work.ticketArea = sw.ticketArea work.department = sw.department work.municipality = sw.municipality work.impact = sw.impact work.ticketCause = sw.ticketCause work.initialDate = sw.initialDate work.finalDate = sw.finalDate work.outboundDate = sw.outboundDate work.createdDate = datetime.date.today() work.affectTime = sw.affectTime work.rollbackTime = sw.rollbackTime now = timezone.make_aware(datetime.datetime.now(), timezone.get_default_timezone()) #Si el tiempo dado para la causa esta en horas se entiende que debe pasarse a areas internas y nunca externas if sw.ticketCause.timeLapseType == Cause.HOURS and sw.ticketArea.type == Area.INTERN: if now + datetime.timedelta(days=1, hours=sw.ticketCause.internTimeLapse) <= sw.initialDate: work.limitResponseDate = now + datetime.timedelta(days=1, hours=sw.ticketCause.internTimeLapse) else: e = IntegrityError() e.__cause__="El tiempo maximo de respuesta de los clientes es mas tarde que la fecha de inicio del trabajo" raise e elif sw.ticketCause.timeLapseType == Cause.HOURS and sw.ticketArea.type == Area.EXTERN: e = IntegrityError() e.__cause__="La Causa del ticket no puede asignarse a un area externa" raise e elif sw.ticketCause.timeLapseType == Cause.DAYS and sw.ticketArea.type == Area.INTERN: if now + datetime.timedelta(days=1+sw.ticketCause.internTimeLapse) <= sw.initialDate: work.limitResponseDate = now + datetime.timedelta(days=1+sw.ticketCause.internTimeLapse) else: e = IntegrityError() e.__cause__="El tiempo maximo de respuesta de los clientes es mas tarde que la fecha de inicio del trabajo" raise e elif sw.ticketCause.timeLapseType == Cause.DAYS and sw.ticketArea.type == Area.INTERN: if now + datetime.timedelta(days=1+sw.ticketCause.externTimeLapse) <= sw.initialDate: work.limitResponseDate = now + datetime.timedelta(days=1+sw.ticketCause.externTimeLapse) else: e = IntegrityError() e.__cause__="El tiempo maximo de respuesta de los clientes es mas tarde que la fecha de inicio del trabajo" raise e #se asigna el usuario loggeado al trabajo if user: work.userCreator = user #------------------------------------------------------------------------------- work.description = msheet.cell(drow+1, 1).value work.justification = msheet.cell(jrow+1, 1).value work.observations = msheet.cell(orow+1, 1).value try: group = WorkGroup.objects.get(number = work.number) for w in group.work_set.all(): w.state = Work.CANCELED for acc in w.acceptance_set.all(): acc.valid = False acc.save() w.save() work.group = group work.programmed = Work.REPROGRAMMED except: group = WorkGroup() group.number = work.number group.save() work.group = group work.save() #loads work plans for i in range(wprow+2,cprow): if check_line(i, 2, 6, msheet): wp = WorkPlan() wp.work=work wp.initialDate = xldate_as_datetime(msheet.cell(i, 2).value, 0) wp.finalDate = xldate_as_datetime(msheet.cell(i, 3).value, 0) wp.affectation = datetime.time(*(xldate_as_tuple(msheet.cell(i, 4).value, 0))[3:]) wp.activity = msheet.cell(i, 5).value wp.save() else: e = IntegrityError() e.__cause__="Alguno de los planes de trabajo tiene un campo vacio" raise e #loads contingency plans for i in range(cprow+2, drow-1): if check_line(i, 2, 6, msheet): cp = ContingencyPlan() cp.work=work cp.initialDate = xldate_as_datetime(msheet.cell(i, 2).value, 0) cp.finalDate = xldate_as_datetime(msheet.cell(i, 3).value, 0) cp.affectation = datetime.time(*(xldate_as_tuple(msheet.cell(i, 4).value, 0))[3:]) cp.activity = msheet.cell(i, 5).value cp.save() else: e = IntegrityError() e.__cause__="Alguno de los planes de contingencia tiene un campo vacio" raise e parse_corp_clients(csheet, work)
def scrape_works(): PERSON_ID = [ '18041923', '18046981', '18048665', '18042915', '18042233', '18041501', '18042405', '18042839' ] role_id = "35236565" works = [] museum = Venue.query.filter_by( name="Cooper Hewitt, Smithsonian Design Museum").first() for pid in PERSON_ID: pid_role_id = pid + ":" + role_id print(pid_role_id) parameters = { "access_token": COOPER_KEY, 'has_image': 1, 'person_role_id': pid_role_id, } r = requests.get( 'https://api.collection.cooperhewitt.org/rest/?method=cooperhewitt.people.getObjects', params=parameters) for work in r.json().get("objects"): title = work.get("title") e = title.find(",") art_type = title[:e] title = title[e + 2:] if len(work.get("images")) > 0: try: url = work.get("images")[0].get("x").get("url") except AttributeError: url = work.get("images")[0].get("b").get("url") medium = work.get("medium") date = work.get("date") artist = "" for p in work.get("participants"): if p.get("role_name") == "Artist": artist = p.get("person_name") artist = db.session().query(Artist).filter_by( name=artist).first() # Add art type if its not in table. query = ArtType.query.filter_by(name=art_type).first() if query: art_type = query elif len(art_type) < 128: art_type = ArtType(name=art_type) db.session.add(art_type) else: art_type = None if medium: query = Medium.query.filter_by(name=medium).first() if query: medium = query elif len(medium) < 128: medium = Medium(name=medium) db.session.add(medium) else: medium = None if (medium and art_type and url and len(title) < 128): # print(title) # print(artist) # print(art_type) # print(medium) # print(date) # print(museum) # print(url) w = Work(name=title, artist=artist, art_type=art_type, medium=medium, date=date, venue=museum, image_url=url)
def cv(user): # quick check to make sure the user actually exists resp = fluid.get('/users/%s' % user) if resp.status_code == 404: abort(404) person_rpc = Person.filter('fluiddb/users/username="******"', user, async=True) # check for person's picture #logging.debug('Checking for %s/picture at %s' % (user, person.picture)) #h = httplib2.Http() #head, cont = h.request(person.picture, 'HEAD') #if head['status'] == '200': # person.has_pic = True # logging.debug('%s/picture exists' % user) #else: # person.has_pic = False # logging.debug('%s/picture does not exist. Returned %s status' % ( # user, head['status'])) # find user's jobs work_rpc = Work.filter('has %s/employer', user, async=True) # find user's schools school_rpc = Education.filter('has %s/attended', user, async=True) # find user's publications #publications = fluid_filter('has %s/publication' % user) #publications = [Publication(uid, user) for uid in publications] publications = [] # find user's skills associated with o'reilly books oskill_rpc = OReillySkill.filter( 'has %s/skill and has %%s/title' % user, 'oreilly.com', async=True) resp = fluid.result(person_rpc) logging.info('Person filter for %s returned %d' % (user, resp.status_code)) if resp.status_code == 200 and resp.content['results']['id']: [(uid, tags)] = resp.content['results']['id'].items() person = Person(uid, user, tags) else: abort(404) resp = fluid.result(work_rpc) logging.info('Work filter for %s returned %d' % (user, resp.status_code)) if resp.status_code == 200 and resp.content['results']['id']: resp = resp.content['results']['id'] jobs = Work.from_response(user, resp) else: #FIXME need better error handling jobs = [] resp = fluid.result(school_rpc) logging.info('School filter for %s returned %d' % (user, resp.status_code)) if resp.status_code == 200 and resp.content['results']['id']: resp = resp.content['results']['id'] schools = Education.from_response(user, resp) else: schools = [] resp = fluid.result(oskill_rpc) logging.info('Skill filter for %s returned %d' % (user, resp.status_code)) if resp.status_code == 200 and resp.content['results']['id']: resp = resp.content['results']['id'] oskills = OReillySkill.from_response(resp) else: oskills = [] return render_template('cv.html', person=person, jobs=jobs, schools=schools, publications=publications, oskills=oskills, current_date=datetime.now().date())
def get(self, work_id): work = Work.get_by_id(int(work_id)) params = {"work": work} return self.render_template("work_edit.html", params=params)
def handler(request): """ wechat backend handler :param request: :return: """ if request.method == "GET": # wechat server signature signature = request.GET.get('signature', '') timestamp = request.GET.get('timestamp', '') nonce = request.GET.get('nonce', '') echostr = request.GET.get('echostr', '') wechat = WechatBasic(token=appToken) if wechat.check_signature(signature=signature, timestamp=timestamp, nonce=nonce): return HttpResponse(echostr) else: return HttpResponse('INVALID') # text from user body_text = request.body wechat = WechatBasic(token=appToken) wechat.parse_data(body_text) # get wechat message message = wechat.get_message() # check message type if message.type != 'text': return HttpResponse(wechat.response_text(u'说人话')) content = message.content # if search if content.startswith(u"搜:"): content = get_magnet_from_keyword(content[2:]) # check if magnet if content.startswith("magnet:?xt=urn:btih:"): if Work.objects.filter(magnet=content): return HttpResponse(wechat.response_text(u'已经添加过这个链接了')) work = Work(magnet=content, operate=Operator.DOWNLOAD) work.save() return HttpResponse( wechat.response_text(u'链接已添加!回复【%s】显示详情。' % keyword_check)) # user check if content == keyword_check: works = Work.objects.filter(is_removed=False).order_by('-create_time') work_list = u'任务详情:\n\n' for index, work in enumerate(works): name = work.name if work.name else u'名字解析中' speed = work.down_speed progress = work.progress operate = work.get_operate_name() work_list += "%d. %s [%s] [%s] [%s]\n" % (index + 1, name, speed, progress, operate) work_list += u'\n回复【%s】下载,【%s】暂停,【%s】删除,后跟相应数字' % ( keyword_download, keyword_pause, keyword_remove) return HttpResponse(wechat.response_text(work_list)) return HttpResponse(wechat.response_text(u'待开发'))
def get_data(x): BASE_URL = "http://kokoelmat.fng.fi/api/v2" params = { "apikey": FINNISH_KEY, "format": "dc-json", "q": "artist-search:1970" } museum = Venue.query.filter_by(name="Finnish National Gallery").first() r = requests.get(BASE_URL, params) for item in r.json().get("descriptionSet"): # if item.get("type")[0].get("type") == "artist": # print("SHEESH") name = item.get("title")[0].get("title") dates = item.get("date") if len(dates[0]) > 1 and dates[0].get("type") == "birth": birth = dates[0].get("value") birthplace = dates[0].get("loc") else: birth = dates[0].get("birth") birthplace = "unknown" if len(dates) > 1: if len(dates[1]) > 1 and dates[1].get("type") == "death": death = dates[1].get("value") deathplace = dates[1].get("loc") else: death = dates[1].get("death") deathplace = "unknown" else: death = None deathplace = "N/A" culture = item.get("relation") culture = culture[len(culture) - 1].get("group") # print(name) # print(birth) # print(death) # print(birthplace) # print(deathplace) # print(culture) if "-" in birth: birth = birth[:birth.find("-")] if death and "-" in death: death = death[:death.find("-")] artist = Artist(name=name, birth=birth, death=death, birthplace=birthplace, deathplace=deathplace, culture=culture, image_url="") if (x): db.session.add(artist) # attempt to add in everything else thats not the artist (works, medium, art_type) else: for work in item.get("relation"): time.sleep(1) params["q"] = work.get("id") r = requests.get(BASE_URL, params) if r and r.json() and r.json().get("descriptionSet"): result = r.json().get("descriptionSet")[0] if result.get("title") and result.get("title")[0]: title = next(iter(result.get("title")[0].values())) # art_type:: art_type = None media = None for d in result.get("type"): if d.get("artwork-class"): art_type = d.get("artwork-class") media = d.get("artwork-class") ty = ArtType.query.filter_by(name=art_type).first() if not ty: art_type = ArtType(name=art_type) db.session.add(art_type) else: art_type = ty me = Medium.query.filter_by(name=media).first() if not me: media = Medium(name=media) db.session.add(media) else: media = me date = None if result.get("date"): for dic in result.get("date"): if dic.get("creation"): date = dic.get("creation") if date and (" " in date or "-" in date): date = date[:4] try: date = int(date) except: date = None url = "" if result.get("relation") and result.get("relation")[ 0] and result.get("relation")[0].get("image"): url = "http://kokoelmat.fng.fi/app?action=image&profile=topicartworkbignew&iid=" + result.get( "relation")[0].get("image") if title and title != "Access frequency exceeded for user." and date and art_type and media and url: work = Work(name=title, artist=artist, art_type=art_type, medium=media, date=date, venue=museum, image_url=url) db.session.add(work) print(title, artist, art_type, media, date, museum, url) db.session.commit()