def normalize(cls, data: dict): doc = model.Document() doc.current_id = data['currentId'] doc.reference_view = data['referenceView'] doc.ontology = data['ontology'] doc.stepSize = data['stepSize'] doc.camera_position = tuple(data['cameraPosition']) doc.camera_view_up = tuple(data['cameraViewUp']) for i, dann in enumerate(data['markups'], start=1): dmark = dann['markup'] ann = model.Annotation() ann.name = f'Annotation {i}' ann.orientation = dann['orientation'] ann.representation_type = dann['representationType'] ann.thickness = dann['thickness'] ann.markup_type = dmark['type'] ann.coordinate_system = dmark['coordinateSystem'] if 'coordinateUnits' in dmark: ann.coordinate_units = dmark['coordinateUnits'] for point in dmark['controlPoints']: ann.points.append(tuple(point['position'])) doc.annotations.append(ann) return doc
def get_file_tree1(path): for f in os.scandir(path): if not f.is_dir(follow_symlinks=False): m1 = model.Document() m1.zip_path = f.path m1.zip_file_name = f.name document_list.append(m1)
def normalize(cls, data: dict): doc = model.Document() # this format only supports one markup doc.current_id = 0 for i, dmark in enumerate(data['Markups']): if dmark['Selected']: doc.current_id = i doc.reference_view = dmark['ReferenceView'] doc.ontology = dmark['Ontology'] doc.stepSize = dmark['StepSize'] doc.camera_position = tuple(dmark['CameraPosition']) doc.camera_view_up = tuple(dmark['CameraViewUp']) break for i, dmark in enumerate(data['Markups']): ann = model.Annotation() ann.name = dmark['Label'] ann.markup_type = 'ClosedCurve' ann.coordinate_system = 'LPS' ann.points = [ (-p['x'], -p['y'], p['z']) # RAS → LPS conversion for p in dmark['Points'] ] ann.thickness = dmark['Thickness'] ann.orientation = dmark['SplineOrientation'] ann.representation_type = dmark['RepresentationType'] doc.annotations.append(ann) return doc
def post(self): parser = reqparse.RequestParser() parser.add_argument('name', type=str, location='json', required=True, help='name required') parser.add_argument('user_id', type=str, location='json', required=True, help='user_id required') args = parser.parse_args() name = args['name'] user_id = args['user_id'] # TODO Check if the user exists id = generate_uuid_for(request.dbs, model.Document) d = model.Document(id=id, user_id=user_id, name=name, date_time=datetime.datetime.now()) request.dbs.add(d) app.logger.info('{} uploaded raw file {}'.format(id, user_id)) return marshal(d.to_dict(), doc_fields)
def add_document(db, user_id, project_id, document_type, name, parent_id, predecessor_id): user = db.query(model.User).filter(model.User.id == user_id).first() if user is None: raise QueryException("Authentication failed") project, access = get_project_access(db, project_id, user_id, ACCESS_WRITE) # initially use project renderer renderer = project.renderer if parent_id == -1: # add to root parent_id = None else: parent = db.query(model.Document).filter( (model.Document.id == parent_id) & (model.Document.project == project)).first() # if you specify an id, it should point to something if parent is None: raise QueryException("Invalid parent") # new document can't be a child of another document if parent.document_type == 'document': parent_id = parent.parent_id # point to its parent # if a parent, use its renderer renderer = parent.renderer if predecessor_id == -1: # no predecessor (should be first) predecessor_id = None # if there is an existing empty predecessor, make it point to this guy first = db.query(model.Document).filter( (model.Document.project_id == project_id) & (model.Document.parent_id == parent_id) & (model.Document.predecessor_id == None)).first() else: first = None predecessor = db.query(model.Document).filter( (model.Document.id == predecessor_id) & (model.Document.project == project)).first() if predecessor is None: raise QueryException("Invalid predecessor") # create the new document document = model.Document(project=project, name=name, parent_id=parent_id, predecessor_id=predecessor_id, document_type=document_type, renderer=renderer) db.add(document) if first is not None: db.flush() # get document id first.predecessor_id = document.id db.commit() return document
def run_all(self, ipt, skiplist, cpsob): """ Calls L{_run_one} for each item given as input, yielding the response, parsed annotations, document object and file name @param ipt: full path to input to run (or text-string to run) @param skiplist: filenames (one per line) to skip, if any """ uts = ut.Utils(self.cfg) cat_ind = uts.load_entity_category_indicators() #input fn2txt = self.rd.read(ipt) try: dispipt = ipt[0:100] except IndexError: dispipt = ipt try: skips = [ x.strip() for x in codecs.open(skiplist, "r", "utf8").readlines() ] except IOError: skips = [] # run calls print "-- [{}] RUNNING COLLECTION: {}, {}".format( self.cl.name, dispipt, time.asctime(time.localtime())) dones = 0 todo = self.cfg.limit for fn in sorted(fn2txt): if fn in skips: print "Skipping {}".format(repr(fn)) continue # create doc objs dob = md.Document(fn, text=fn2txt[fn]) dob.find_sentence_positions() # annots try: res, anns = self._run_one(fn, ut.Utils.norm_text(fn2txt[fn]), cpsob) except ValueError, msg: print "\n! Error with file: {}".format(fn) print "\n" + msg.message res, anns = {}, {} uts.add_sentence_number_to_annots(anns, dob) for link in [an.enti.link for posi, an in anns.items()]: cpsob.normalize_entity_categories(link, cat_ind) dones += 1 yield res, anns, dob, fn if dones == todo: break
def normalize(cls, data: dict): doc = model.Document() # expect these to be present, even though we don't actually need them. # conversion will still fail so that inference works correctly. _ = data["DefaultCameraPosition"] _ = data["DefaultCameraViewUp"] _ = data["DefaultOntology"] _ = data["DefaultReferenceView"] _ = data["DefaultRepresentationType"] _ = data["DefaultSplineOrientation"] _ = data["DefaultStepSize"] _ = data["DefaultThickness"] # set document-wide values based on the currently selected markup. doc.current_id = 0 for i, dmark in enumerate(data['Markups']): if dmark['Selected']: doc.current_id = i doc.reference_view = dmark['ReferenceView'] doc.ontology = dmark['Ontology'] doc.stepSize = dmark['StepSize'] doc.camera_position = tuple(dmark['CameraPosition']) doc.camera_view_up = tuple(dmark['CameraViewUp']) break # copy markup-specific values for i, dmark in enumerate(data['Markups']): ann = model.Annotation() ann.name = dmark['Label'] ann.markup_type = 'ClosedCurve' ann.coordinate_system = 'LPS' ann.points = [ (-p['x'], -p['y'], p['z']) # RAS → LPS conversion for p in dmark['Points'] ] ann.thickness = dmark['Thickness'] ann.orientation = dmark['SplineOrientation'] ann.representation_type = dmark['RepresentationType'] doc.annotations.append(ann) return doc
def testBlipFields(self): b = model.Blip(self.test_blip_data, model.Document(self.test_blip_data)) self.assertEquals(self.test_blip_data.child_blip_ids, b.GetChildBlipIds()) self.assertEquals(self.test_blip_data.contributors, b.GetContributors()) self.assertEquals(self.test_blip_data.creator, b.GetCreator()) self.assertEquals(self.test_blip_data.content, b.GetDocument().GetText()) self.assertEquals(self.test_blip_data.blip_id, b.GetId()) self.assertEquals(self.test_blip_data.last_modified_time, b.GetLastModifiedTime()) self.assertEquals(self.test_blip_data.parent_blip_id, b.GetParentBlipId()) self.assertEquals(self.test_blip_data.wave_id, b.GetWaveId()) self.assertEquals(self.test_blip_data.wavelet_id, b.GetWaveletId()) self.assertEquals(True, b.IsRoot())
def import_project(db, user_id, name, data, attachment_data): user = db.query(model.User).filter(model.User.id == user_id).first() if user is None: raise QueryException("Authentication failed") # for attachments root = os.path.abspath(os.path.join(config.ASSETS, user_id, "attachments")) if not os.path.exists(root): os.makedirs(root) project = model.Project(name=name, renderer='Markdown', owner=user) # renderer db.add(project) id_map = {} ids_added = set() queue = [x for x in data['documents']] last_added = 0 stats = {'documents': 0, 'attachments': 0} while len(queue) > 0: item = queue.pop(0) if item['id'] not in id_map: id_map[item['id']] = model.generate_id() if (item['parent_id'] is None or item['parent_id'] in ids_added) and (item['predecessor_id'] is None or item['predecessor_id'] in ids_added): document = model.Document(project=project, id=id_map.get(item['id']), name=item['name'], parent_id=id_map.get(item['parent_id']), predecessor_id=id_map.get( item['predecessor_id']), document_type=item['document_type'], renderer=item['renderer'], content=item['content'], updated=item['updated'], rating=item['rating']) db.add(document) stats['documents'] += 1 ids_added.add(item['id']) last_added = 0 for attachment in item['attachments']: new_attachment_id = model.generate_id() # extract and save zipped file to new id target_filename = os.path.join(root, new_attachment_id) file_data = attachment_data.open(attachment['id'], 'r').read() with open(target_filename, "wb") as fh: fh.write(file_data) new_attachment = model.Attachment(id=new_attachment_id, project=project, document=document, name=attachment["name"], size=len(file_data), location="server") db.add(new_attachment) stats['attachments'] += 1 else: queue.append(item) last_added += 1 if last_added > 2 * len(queue): raise QueryException('Circular document definition') db.commit() return stats
def testDocument(self): b = model.Blip(self.test_blip_data) doc = model.Document(b) self.assertEquals(b.content, doc.GetText())
def testBlipIsRoot(self): self.test_blip_data.parent_blip_id = 'blip-parent' b = model.Blip(self.test_blip_data, model.Document(self.test_blip_data)) self.assertEquals(False, b.IsRoot())
def store(self): self.doc_id = self.gen_doc_id() db.session.add( model.Document(self.doc_id, self.fs_name, self.friendly_name)) db.session.commit()