def setup_class(self): """Create the unittest database""" makeunitdb.createTestDB(p.token, channel_list=p.channels, public=True, readonly=0) cutout1 = "0/2,5/1,3/0,2" cutout2 = "0/1,3/4,6/2,5" cutout3 = "0/4,6/2,5/5,7" cutout4 = "0/6,8/5,9/2,4" syn_segments1 = [ [7, 3], ] syn_segments2 = [ [7, 4], ] syn_segments3 = [ [3, 9], ] syn_segments4 = [ [5, 4], ] f1 = createSpecificSynapse(1, syn_segments1, cutout1) putid = putAnnotation(p, f1) f2 = createSpecificSynapse(2, syn_segments2, cutout2) putid = putAnnotation(p, f2) f3 = createSpecificSynapse(3, syn_segments3, cutout3) putid = putAnnotation(p, f3) f4 = createSpecificSynapse(4, syn_segments4, cutout4) putid = putAnnotation(p, f4)
def test_anno_minmal(self): """Upload a minimal and maximal annotation. Verify fields.""" # Create an in-memory HDF5 file tmpfile = tempfile.NamedTemporaryFile() h5fh = h5py.File ( tmpfile.name ) # Create the top level annotation id namespace idgrp = h5fh.create_group ( str(0) ) h5fh.flush() tmpfile.seek(0) p.annoid = putAnnotation(p, tmpfile) h5ret = getAnnotation(p) idgrpret = h5ret.get(str(p.annoid)) assert idgrpret assert ( idgrpret['ANNOTATION_TYPE'][0] == 1 ) assert not idgrpret.get('RESOLUTION') assert not idgrpret.get('XYZOFFSET') assert not idgrpret.get('VOXELS') assert not idgrpret.get('CUTOUT') mdgrpret = idgrpret['METADATA'] assert mdgrpret assert ( mdgrpret['CONFIDENCE'][0] == 0.0 ) assert ( mdgrpret['STATUS'][0] == 0 ) assert ( mdgrpret['KVPAIRS'][:] == '' ) assert ( mdgrpret['AUTHOR'][:] == 'unknown' )
def test_anno_minmal(self): """Upload a minimal and maximal annotation. Verify fields.""" # Create an in-memory HDF5 file tmpfile = tempfile.NamedTemporaryFile() h5fh = h5py.File(tmpfile.name) # Create the top level annotation id namespace idgrp = h5fh.create_group(str(0)) h5fh.flush() tmpfile.seek(0) p.annoid = putAnnotation(p, tmpfile) h5ret = getAnnotation(p) idgrpret = h5ret.get(str(p.annoid)) assert idgrpret assert (idgrpret['ANNOTATION_TYPE'][0] == 1) assert not idgrpret.get('RESOLUTION') assert not idgrpret.get('XYZOFFSET') assert not idgrpret.get('VOXELS') assert not idgrpret.get('CUTOUT') mdgrpret = idgrpret['METADATA'] assert mdgrpret assert (mdgrpret['CONFIDENCE'][0] == 0.0) assert (mdgrpret['STATUS'][0] == 0) assert (mdgrpret['KVPAIRS'][:] == '') assert (mdgrpret['AUTHOR'][:] == 'unknown')
def setup_class(self): """Create the unittest database""" makeunitdb.createTestDB(p.token, channel_list=p.channels, public=True, readonly=0, ximagesize=100, yimagesize=100, zimagesize=100) cutout1 = "0/2,5/1,3/1,3" cutout2 = "0/1,3/4,6/2,5" cutout3 = "0/4,6/2,5/5,7" cutout4 = "0/6,8/5,9/2,4" syn_segments1 = [7, 3] syn_segments2 = [7, 4] syn_segments3 = [3, 9] syn_segments4 = [5, 4] # syn_presegments1 = [7, 3, 5] # syn_presegments2 = [7, 4, 2, 8] # syn_presegments3 = [3, 9] # syn_presegments4 = [5] # syn_postsegments1 = [7, 3, 5] # syn_postsegments2 = [7, 4, 2, 8] # syn_postsegments3 = [3, 9] # syn_postsegments4 = [5] # RB COMMENT Old RAMON schema list of lists. # syn_segments1 = [[7, 3],] # syn_segments2 = [[7, 4],] # syn_segments3 = [[3, 9],] # syn_segments4 = [[5, 4],] f1 = createSpecificSynapse(1, syn_segments1, cutout1) putid = putAnnotation(p, f1) f2 = createSpecificSynapse(2, syn_segments2, cutout2) putid = putAnnotation(p, f2) f3 = createSpecificSynapse(3, syn_segments3, cutout3) putid = putAnnotation(p, f3) f4 = createSpecificSynapse(4, syn_segments4, cutout4) putid = putAnnotation(p, f4)
def test_anno_upload( self ): """Upload all different kinds of annotations and retrieve""" for anntype in [ 1, 2, 3, 4, 5, 6]: annoid = 0 f = H5AnnotationFile ( anntype, annoid ) putid1 = putAnnotation(p, f) # this assumes that +1 is available (which it is) newid = putid1 + 1 f = H5AnnotationFile ( anntype, newid ) putid2 = putAnnotation(p, f) # retrieve both annotations p.annoid = putid1 assert ( putid1 == getId(p) ) p.annoid = putid2 assert ( putid2 == getId(p) )
def test_anno_upload(self): """Upload all different kinds of annotations and retrieve""" for anntype in [1, 2, 3, 4, 5, 6]: annoid = 0 f = H5AnnotationFile(anntype, annoid) putid1 = putAnnotation(p, f) # this assumes that +1 is available (which it is) newid = putid1 + 1 f = H5AnnotationFile(anntype, newid) putid2 = putAnnotation(p, f) # retrieve both annotations p.annoid = putid1 assert (putid1 == getId(p)) p.annoid = putid2 assert (putid2 == getId(p))
def setup_class(self): """Create the unittest database""" makeunitdb.createTestDB(p.token, channel_list=p.channels, public=True, readonly=0) cutout1 = "0/2,5/1,3/0,2" cutout2 = "0/1,3/4,6/2,5" cutout3 = "0/4,6/2,5/5,7" cutout4 = "0/6,8/5,9/2,4" syn_segments1 = [[7, 3], ] syn_segments2 = [[7, 4], ] syn_segments3 = [[3, 9], ] syn_segments4 = [[5, 4], ] f1 = createSpecificSynapse(1, syn_segments1, cutout1) putid = putAnnotation(p, f1) f2 = createSpecificSynapse(2, syn_segments2, cutout2) putid = putAnnotation(p, f2) f3 = createSpecificSynapse(3, syn_segments3, cutout3) putid = putAnnotation(p, f3) f4 = createSpecificSynapse(4, syn_segments4, cutout4) putid = putAnnotation(p, f4)
def test_anno_update(self): """Upload a Updated file with new data""" tmpfile = tempfile.NamedTemporaryFile() h5fh = h5py.File(tmpfile.name) # Create the top level annotation id namespace idgrp = h5fh.create_group(str(p.annoid)) # Create a metadata group mdgrp = idgrp.create_group("METADATA") # now lets add a bunch of random values for the specific annotation type ann_status = random.randint(0, 4) ann_confidence = random.random() ann_author = 'unittest_author2' # Annotation type idgrp.create_dataset("ANNOTATION_TYPE", (1, ), np.uint32, data=1) # Set Annotation specific metadata mdgrp.create_dataset("STATUS", (1, ), np.uint32, data=ann_status) mdgrp.create_dataset("CONFIDENCE", (1, ), np.float, data=ann_confidence) mdgrp.create_dataset("AUTHOR", (1, ), dtype=h5py.special_dtype(vlen=str), data=ann_author) h5fh.flush() tmpfile.seek(0) p.field = 'update' p.annoid = putAnnotation(p, tmpfile) p.field = None h5ret = getAnnotation(p) idgrpret = h5ret.get(str(p.annoid)) assert idgrpret assert (idgrpret['ANNOTATION_TYPE'][0] == 1) assert not idgrpret.get('RESOLUTION') assert not idgrpret.get('XYZOFFSET') assert not idgrpret.get('VOXELS') assert not idgrpret.get('CUTOUT') mdgrpret = idgrpret['METADATA'] assert mdgrpret assert (abs(mdgrpret['CONFIDENCE'][0] - ann_confidence) < 0.0001) #assert ( mdgrpret['CONFIDENCE'][0] == ann_confidence ) assert (mdgrpret['STATUS'][0] == ann_status) assert (mdgrpret['AUTHOR'][:] == ann_author)
def test_anno_update (self): """Upload a Updated file with new data""" tmpfile = tempfile.NamedTemporaryFile() h5fh = h5py.File ( tmpfile.name ) # Create the top level annotation id namespace idgrp = h5fh.create_group ( str(p.annoid) ) # Create a metadata group mdgrp = idgrp.create_group ( "METADATA" ) # now lets add a bunch of random values for the specific annotation type ann_status = random.randint(0,4) ann_confidence = random.random() ann_author = 'unittest_author2' # Annotation type idgrp.create_dataset ( "ANNOTATION_TYPE", (1,), np.uint32, data=1 ) # Set Annotation specific metadata mdgrp.create_dataset ( "STATUS", (1,), np.uint32, data=ann_status ) mdgrp.create_dataset ( "CONFIDENCE", (1,), np.float, data=ann_confidence ) mdgrp.create_dataset ( "AUTHOR", (1,), dtype=h5py.special_dtype(vlen=str), data=ann_author ) h5fh.flush() tmpfile.seek(0) p.field = 'update' p.annoid = putAnnotation(p, tmpfile) p.field = None h5ret = getAnnotation(p) idgrpret = h5ret.get(str(p.annoid)) assert idgrpret assert ( idgrpret['ANNOTATION_TYPE'][0] == 1 ) assert not idgrpret.get('RESOLUTION') assert not idgrpret.get('XYZOFFSET') assert not idgrpret.get('VOXELS') assert not idgrpret.get('CUTOUT') mdgrpret = idgrpret['METADATA'] assert mdgrpret assert ( abs(mdgrpret['CONFIDENCE'][0] - ann_confidence) < 0.0001 ) #assert ( mdgrpret['CONFIDENCE'][0] == ann_confidence ) assert ( mdgrpret['STATUS'][0] == ann_status ) assert ( mdgrpret['AUTHOR'][:] == ann_author )
def test_bigint_json(self): """Test the annotation (RAMON) JSON interface with a large ID""" # create hdf5 file and post it tmpfile = tempfile.NamedTemporaryFile() h5fh = h5py.File(tmpfile.name) ann_status = random.randint(0, 4) ann_confidence = random.random() ann_author = 'unittest_author' ann_annoid = 10025 # create annotation id namespace idgrp = h5fh.create_group(str(ann_annoid)) # annotation type idgrp.create_dataset("ANNOTATION_TYPE", (1, ), np.uint32, data=1) mdgrp = idgrp.create_group("METADATA") # set annotation metadata mdgrp.create_dataset("STATUS", (1, ), np.uint32, data=ann_status) mdgrp.create_dataset("CONFIDENCE", (1, ), np.float, data=ann_confidence) mdgrp.create_dataset("AUTHOR", (1, ), dtype=h5py.special_dtype(vlen=str), data=ann_author) h5fh.flush() tmpfile.seek(0) p.annoid = putAnnotation(p, tmpfile) # fetching the JSON info f = getURL("https://{}/ca/{}/{}/{}/json/".format( SITE_HOST, p.token, p.channels[0], ann_annoid)) # read the JSON file ann_info = json.loads(f.content) assert (ann_info.keys()[0] == str(ann_annoid)) assert (str( ann_info[str(ann_annoid)]['ann_status']) == str(ann_status)) assert (str(ann_info[str(ann_annoid)]['ann_confidence'])[:5] == str( ann_confidence)[:5]) assert (ann_info[str(ann_annoid)]['ann_author'] == str(ann_author))
def makeAnno ( p, anntype ): """Helper make an annotation""" # Create an annotation tmpfile = tempfile.NamedTemporaryFile() h5fh = h5py.File ( tmpfile.name ) # Create the top level annotation id namespace idgrp = h5fh.create_group ( str(0) ) mdgrp = idgrp.create_group ( "METADATA" ) ann_author='Unit Test' mdgrp.create_dataset ( "AUTHOR", (1,), dtype=h5py.special_dtype(vlen=str), data=ann_author ) idgrp.create_dataset ( "ANNOTATION_TYPE", (1,), np.uint32, data=anntype ) h5fh.flush() tmpfile.seek(0) p.annoid = putAnnotation (p, tmpfile) tmpfile.close()
def test_bigint_json(self): """Test the annotation (RAMON) JSON interface with a large ID""" # create hdf5 file and post it tmpfile = tempfile.NamedTemporaryFile() h5fh = h5py.File( tmpfile.name ) ann_status = random.randint(0,4) ann_confidence = random.random() ann_author = 'unittest_author' ann_annoid = 10025 # create annotation id namespace idgrp = h5fh.create_group ( str(ann_annoid) ) # annotation type idgrp.create_dataset ( "ANNOTATION_TYPE", (1,), np.uint32, data=1 ) mdgrp = idgrp.create_group( "METADATA" ) # set annotation metadata mdgrp.create_dataset ( "STATUS", (1,), np.uint32, data=ann_status ) mdgrp.create_dataset ( "CONFIDENCE", (1,), np.float, data=ann_confidence ) mdgrp.create_dataset ( "AUTHOR", (1,), dtype=h5py.special_dtype(vlen=str), data=ann_author ) h5fh.flush() tmpfile.seek(0) p.annoid = putAnnotation(p, tmpfile) # fetching the JSON info f = getURL("https://{}/ca/{}/{}/{}/json/".format(SITE_HOST, p.token, p.channels[0], ann_annoid)) # read the JSON file ann_info = json.loads(f.content) assert( ann_info.keys()[0] == str(ann_annoid) ) assert( str(ann_info[str(ann_annoid)]['ann_status']) == str(ann_status) ) assert( str(ann_info[str(ann_annoid)]['ann_confidence'])[:5] == str(ann_confidence)[:5] ) assert( ann_info[str(ann_annoid)]['ann_author'] == str(ann_author) )
def test_multiple_json(self): """Test the annotation (RAMON) JSON interface with multiple objects""" number_of_annotations = 3 # Note: these are 1-indexed anno_objs = {} for i in range(number_of_annotations): # create hdf5 file and post it tmpfile = tempfile.NamedTemporaryFile() h5fh = h5py.File( tmpfile.name ) ann_status = random.randint(0,4) ann_confidence = random.random() ann_author = 'unittest_author' ann_annoid = (i + 1)*10 # we multiply by 10 to avoid conflicts with above test anno_objs[ str(ann_annoid) ] = { 'ann_status': ann_status, 'ann_confidence': ann_confidence, 'ann_author': ann_author } # create annotation id namespace idgrp = h5fh.create_group ( str(ann_annoid) ) # annotation type idgrp.create_dataset ( "ANNOTATION_TYPE", (1,), np.uint32, data=1 ) mdgrp = idgrp.create_group( "METADATA" ) # set annotation metadata mdgrp.create_dataset ( "STATUS", (1,), np.uint32, data=ann_status ) mdgrp.create_dataset ( "CONFIDENCE", (1,), np.float, data=ann_confidence ) mdgrp.create_dataset ( "AUTHOR", (1,), dtype=h5py.special_dtype(vlen=str), data=ann_author ) h5fh.flush() tmpfile.seek(0) p.annoid = putAnnotation(p, tmpfile) # fetching the JSON info ann_id_str = '' for i in range(number_of_annotations): ann_id_str += '{}'.format( (i + 1)*10 ) if i < number_of_annotations - 1: ann_id_str += ',' f = getURL("https://{}/ca/{}/{}/{}/json/".format(SITE_HOST, p.token, p.channels[0], ann_id_str)) # read the JSON file ann_info = json.loads(f.content) for i in range(number_of_annotations): # make sure we have all the relevant annotation objects assert( str( (i + 1)*10 ) in ann_info.keys() ) chosen_id = (i + 1)*10 chosen_obj = anno_objs[str( chosen_id )] assert( str(ann_info[str(chosen_id)]['ann_status']) == str(chosen_obj['ann_status']) ) assert( str(ann_info[str(chosen_id)]['ann_confidence'])[:5] == str(chosen_obj['ann_confidence'])[:5] ) assert( ann_info[str(chosen_id)]['ann_author'] == str(chosen_obj['ann_author']) ) # pick an annotation object at random and check its properties chosen_id = random.randint(1,number_of_annotations)*10 chosen_obj = anno_objs[str(chosen_id)] assert( str(ann_info[str(chosen_id)]['ann_status']) == str(chosen_obj['ann_status']) ) assert( str(ann_info[str(chosen_id)]['ann_confidence'])[:5] == str(chosen_obj['ann_confidence'])[:5] ) assert( ann_info[str(chosen_id)]['ann_author'] == str(chosen_obj['ann_author']) )
def test_multiple_json(self): """Test the annotation (RAMON) JSON interface with multiple objects""" number_of_annotations = 3 # Note: these are 1-indexed anno_objs = {} for i in range(number_of_annotations): # create hdf5 file and post it tmpfile = tempfile.NamedTemporaryFile() h5fh = h5py.File(tmpfile.name) ann_status = random.randint(0, 4) ann_confidence = random.random() ann_author = 'unittest_author' ann_annoid = ( i + 1) * 10 # we multiply by 10 to avoid conflicts with above test anno_objs[str(ann_annoid)] = { 'ann_status': ann_status, 'ann_confidence': ann_confidence, 'ann_author': ann_author } # create annotation id namespace idgrp = h5fh.create_group(str(ann_annoid)) # annotation type idgrp.create_dataset("ANNOTATION_TYPE", (1, ), np.uint32, data=1) mdgrp = idgrp.create_group("METADATA") # set annotation metadata mdgrp.create_dataset("STATUS", (1, ), np.uint32, data=ann_status) mdgrp.create_dataset("CONFIDENCE", (1, ), np.float, data=ann_confidence) mdgrp.create_dataset("AUTHOR", (1, ), dtype=h5py.special_dtype(vlen=str), data=ann_author) h5fh.flush() tmpfile.seek(0) p.annoid = putAnnotation(p, tmpfile) # fetching the JSON info ann_id_str = '' for i in range(number_of_annotations): ann_id_str += '{}'.format((i + 1) * 10) if i < number_of_annotations - 1: ann_id_str += ',' f = getURL("https://{}/ca/{}/{}/{}/json/".format( SITE_HOST, p.token, p.channels[0], ann_id_str)) # read the JSON file ann_info = json.loads(f.content) for i in range(number_of_annotations): # make sure we have all the relevant annotation objects assert (str((i + 1) * 10) in ann_info.keys()) chosen_id = (i + 1) * 10 chosen_obj = anno_objs[str(chosen_id)] assert (str(ann_info[str(chosen_id)]['ann_status']) == str( chosen_obj['ann_status'])) assert (str(ann_info[str(chosen_id)]['ann_confidence'])[:5] == str( chosen_obj['ann_confidence'])[:5]) assert (ann_info[str(chosen_id)]['ann_author'] == str( chosen_obj['ann_author'])) # pick an annotation object at random and check its properties chosen_id = random.randint(1, number_of_annotations) * 10 chosen_obj = anno_objs[str(chosen_id)] assert (str(ann_info[str(chosen_id)]['ann_status']) == str( chosen_obj['ann_status'])) assert (str(ann_info[str(chosen_id)]['ann_confidence'])[:5] == str( chosen_obj['ann_confidence'])[:5]) assert (ann_info[str(chosen_id)]['ann_author'] == str( chosen_obj['ann_author']))