Пример #1
0
    def testInt(self):
        num = np.int(2562010)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)

        num = np.int8(127)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)

        num = np.int16(2562010)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)

        num = np.int32(2562010)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)

        num = np.int64(2562010)
        self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)

        num = np.uint8(255)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)

        num = np.uint16(2562010)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)

        num = np.uint32(2562010)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)

        num = np.uint64(2562010)
        self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
Пример #2
0
    def testIntMax(self):
        num = np.int(np.iinfo(np.int).max)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)

        num = np.int8(np.iinfo(np.int8).max)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)

        num = np.int16(np.iinfo(np.int16).max)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)

        num = np.int32(np.iinfo(np.int32).max)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)

        num = np.uint8(np.iinfo(np.uint8).max)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)

        num = np.uint16(np.iinfo(np.uint16).max)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)

        num = np.uint32(np.iinfo(np.uint32).max)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)

        if platform.architecture()[0] != '32bit':
            num = np.int64(np.iinfo(np.int64).max)
            self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)

            # uint64 max will always overflow as it's encoded to signed
            num = np.uint64(np.iinfo(np.int64).max)
            self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
Пример #3
0
def getMarkupTab(image):
    #fallback margin size approximation so we can exit
    left = 0
    top  = 0

    height, width = image.shape[:2]


    print "{width} x {height}".format(width=width, height=height)

    ## Iterate over the width then by height until we find a pixel red enough to make a decision
    for x in xrange(width):
        for y in xrange(height):

            # Check individual pixel values
            pixel = image[y,x]

            b, g, r = pixel

            temp_gb = np.uint16(g) + np.uint16(b)

            # Checking the green blue and red threshold values
            if r > 200 and temp_gb < 320:
                return image[ top : height, left : x]
    return None
Пример #4
0
def wire_value(token, solved_wires):
        if str.isdigit(token):
            return uint16(token)
        elif token in solved_wires.keys():
            return uint16(solved_wires.get(token))
        else:
            return None
Пример #5
0
    def test_valid(self):
        prop = bcpp.Int()

        assert prop.is_valid(None)

        assert prop.is_valid(0)
        assert prop.is_valid(1)

        assert prop.is_valid(np.int8(0))
        assert prop.is_valid(np.int8(1))
        assert prop.is_valid(np.int16(0))
        assert prop.is_valid(np.int16(1))
        assert prop.is_valid(np.int32(0))
        assert prop.is_valid(np.int32(1))
        assert prop.is_valid(np.int64(0))
        assert prop.is_valid(np.int64(1))
        assert prop.is_valid(np.uint8(0))
        assert prop.is_valid(np.uint8(1))
        assert prop.is_valid(np.uint16(0))
        assert prop.is_valid(np.uint16(1))
        assert prop.is_valid(np.uint32(0))
        assert prop.is_valid(np.uint32(1))
        assert prop.is_valid(np.uint64(0))
        assert prop.is_valid(np.uint64(1))

        # TODO (bev) should fail
        assert prop.is_valid(False)
        assert prop.is_valid(True)
Пример #6
0
 def save(self, fname):
     with open(fname, "w") as f:
         np.uint16(self.location).tofile(f)
         np.uint16(len(self.stack)).tofile(f)
         np.array(self.stack, dtype=np.uint16).tofile(f)
         self.register.tofile(f)
         self.memory.tofile(f)
Пример #7
0
def _combine_bytes(msb, lsb):
    msb = np.uint16(msb)
    lsb = np.uint16(lsb)

    value = (msb << 8) | lsb

    return np.int16(value) / 900
Пример #8
0
def encode_color(grey_left, grey_right):
    """
    encodes the two grey values from two corresponding pixels in
    one color value.

    grey_left, grey_right must be integer between 0 and 1023 or castable to it.

    returns tuple of r,g,b as integer between 0 and 255.

    >>> encode_color(0, 0)
        (0, 0, 0)
    >>> encode_color(1020, 0)
        (0, 255, 0)
    """
    # store most significant bits of grey_left in green
    green = np.uint16(grey_left/4)
    # store most significant bits of grey_right in red
    red = np.uint16(grey_right/4)

    # store least significant two bits in correct blue bits
    # for left it has to be in the 5th and 6th bit
    # for right it has to be in the 1st and 2nd bit
    blue = np.uint16((grey_left % 4)*16 + (grey_right % 4)*1)

    return( (green, red, blue) )
Пример #9
0
def write_sequence_file(awgData, fileName, miniLLRepeat=1):
	'''
	Main function to pack channel LLs into an APS h5 file.
	'''
	#Preprocess the sequence data to handle APS restrictions
	LLs12, repeat12, wfLib12 = preprocess(awgData['ch12']['linkList'],
		                                      awgData['ch12']['wfLib'],
		                                      awgData['ch12']['correctionT'])
	LLs34, repeat34, wfLib34 = preprocess(awgData['ch34']['linkList'],
		                                      awgData['ch34']['wfLib'],
		                                      awgData['ch34']['correctionT'])
	assert repeat12 == repeat34, 'Failed to unroll sequence'
	if repeat12 != 0:
		miniLLRepeat *= repeat12

	#Merge the the marker data into the IQ linklists
	merge_APS_markerData(LLs12, awgData['ch1m1']['linkList'], 1)
	merge_APS_markerData(LLs12, awgData['ch2m1']['linkList'], 2)
	merge_APS_markerData(LLs34, awgData['ch3m1']['linkList'], 1)
	merge_APS_markerData(LLs34, awgData['ch4m1']['linkList'], 2)

	#Open the HDF5 file
	if os.path.isfile(fileName):
		os.remove(fileName)
	with h5py.File(fileName, 'w') as FID:

		#List of which channels we have data for
		#TODO: actually handle incomplete channel data
		channelDataFor = [1,2] if LLs12 else []
		channelDataFor += [3,4] if LLs34 else []
		FID['/'].attrs['Version'] = 2.1
		FID['/'].attrs['channelDataFor'] = np.uint16(channelDataFor)
		FID['/'].attrs['miniLLRepeat'] = np.uint16(miniLLRepeat - 1)

		#Create the waveform vectors
		wfInfo = []
		for wfLib in (wfLib12, wfLib34):
			wfInfo.append(create_wf_vector({key:wf.real for key,wf in wfLib.items()}))
			wfInfo.append(create_wf_vector({key:wf.imag for key,wf in wfLib.items()}))

		LLData = [LLs12, LLs34]
		repeats = [0, 0]
		#Create the groups and datasets
		for chanct in range(4):
			chanStr = '/chan_{0}'.format(chanct+1)
			chanGroup = FID.create_group(chanStr)
			chanGroup.attrs['isIQMode'] = np.uint8(1)
			#Write the waveformLib to file
			FID.create_dataset('{0}/waveformLib'.format(chanStr), data=wfInfo[chanct][0])

			#For A channels (1 & 3) we write link list data if we actually have any
			if (np.mod(chanct,2) == 0) and LLData[chanct//2]:
				groupStr = chanStr+'/linkListData'
				LLGroup = FID.create_group(groupStr)
				LLDataVecs, numEntries = create_LL_data(LLData[chanct//2], wfInfo[chanct][1], os.path.basename(fileName))
				LLGroup.attrs['length'] = numEntries
				for key,dataVec in LLDataVecs.items():
					FID.create_dataset(groupStr+'/' + key, data=dataVec)
			else:
				chanGroup.attrs['isLinkListData'] = np.uint8(0)
Пример #10
0
 def read_calibration_data(self):
   # Read calibration data
   self.AC1 = numpy.int16(self.read_word(self.BMP183_REG['CAL_AC1']))
   self.AC2 = numpy.int16(self.read_word(self.BMP183_REG['CAL_AC2']))
   self.AC3 = numpy.int16(self.read_word(self.BMP183_REG['CAL_AC3']))
   self.AC4 = numpy.uint16(self.read_word(self.BMP183_REG['CAL_AC4']))
   self.AC5 = numpy.uint16(self.read_word(self.BMP183_REG['CAL_AC5']))
   self.AC6 = numpy.uint16(self.read_word(self.BMP183_REG['CAL_AC6']))
   self.B1 = numpy.int16(self.read_word(self.BMP183_REG['CAL_B1']))
   self.B2 = numpy.int16(self.read_word(self.BMP183_REG['CAL_B2']))
   self.MB = numpy.int16(self.read_word(self.BMP183_REG['CAL_MB']))
   self.MC = numpy.int16(self.read_word(self.BMP183_REG['CAL_MC']))
   self.MD = numpy.int16(self.read_word(self.BMP183_REG['CAL_MD']))
   self.ID = numpy.int16(self.read_byte(self.BMP183_REG['ID']))
   print "CALIBRATION DATA"
   print "AC1: {0}".format(self.AC1)
   print "AC2: {0}".format(self.AC2)
   print "AC3: {0}".format(self.AC3)
   print "AC4: {0}".format(self.AC4)
   print "AC5: {0}".format(self.AC5)
   print "B1:  {0}".format(self.B1)
   print "B2:  {0}".format(self.B2)
   print "MB:  {0}".format(self.MB)
   print "MC:  {0}".format(self.MC)
   print "MD:  {0}".format(self.MD)
   print "ID:  {0}".format(self.ID)
   print ""
Пример #11
0
def oldmovie(src, dst):
    graph = Image.open(src)
    mask = Image.open('PhotoManager/Library/mask.png')
    size = graph.size
    width = size[0]
    height = size[1]

    mask = mask.resize((width, height))
    mask = ny.uint16(ny.array(mask))
    result = ny.uint16(ny.array(graph))

    b = 10
    g = 130
    r = 200
    gray = 0
    for row in range(height):
        for col in range(width):
            gray = (result[row, col, 0] + result[row, col, 1] + result[row, col, 2]) / 3
            b = mode(gray, b)
            g = mode(gray, g)
            r = mode(gray, r)
            result[row, col, 0] = mode(b, mask[row, col, 0])
            result[row, col, 1] = mode(g, mask[row, col, 1])
            result[row, col, 2] = mode(r, mask[row, col, 2])

    result = Image.fromarray(ny.uint8(result)).convert('RGB')
    result.save(dst)
    return 0
Пример #12
0
    def identify(self):
        # get the current byte at pc
        rom_instruction = True
        self.instruction_byte = self._get_memory_owner(self.pc_reg).get(self.pc_reg)
        if type(self.instruction_byte) is not bytes:
            rom_instruction = False
            self.instruction_byte = bytes([self.instruction_byte])

        # turn the byte into an Instruction
        self.instruction = self.instructions.get(self.instruction_byte, None)  # type: Instruction
        if self.instruction is None:
            raise Exception('Instruction not found: {}'.format(self.instruction_byte.hex()))

        # get the data bytes
        if rom_instruction:
            self.data_bytes = self.rom.get(self.pc_reg + np.uint16(1), self.instruction.data_length)
        else:
            if self.instruction.data_length > 0:
                self.data_bytes = bytes([self.get_memory(self.pc_reg + np.uint16(1), self.instruction.data_length)])
            else:
                self.data_bytes = bytes()

        # print out diagnostic information
        # example: C000  4C F5 C5  JMP $C5F5                       A:00 X:00 Y:00 P:24 SP:FD CYC:  0
        print('{}, {}, {}, A:{}, X:{}, Y:{}, P:{}, SP:{}'.format(hex(self.pc_reg),
                                                                 (self.instruction_byte + self.data_bytes).hex(),
                                                                 self.instruction.__name__, hex(self.a_reg),
                                                                 hex(self.x_reg), hex(self.y_reg),
                                                                 hex(self.status_reg.to_int()), hex(self.sp_reg)))
Пример #13
0
 def save(self, file_name):
   '''
     Save data to a binary file and flush output buffer
     :param file_name:
   '''
   # Start of writing
   try:
     fid = open(file_name, 'wb')
   except IOError:
     print 'Can''t create file: {0}'.format(file_name)
     return
   # Write options to binary file
   fid.write('fc')
   fid.write(numpy.uint8([1, 0]))
   fid.write(numpy.uint16(self.option.width))
   fid.write(numpy.uint16(self.option.height))
   fid.write(numpy.uint16(self.option.rank_size))
   fid.write(numpy.uint16(self.option.min_rank_size))
   fid.write(numpy.uint8(self.option.dom_step))
   # Write main data to binary file
   data_ln = numpy.uint32(numpy.ceil(self.cur_bit / 8.0))
   fid.write(data_ln)
   fid.write(self.out_buf[: (data_ln / 2)])
   # End of writing and clear output buffer
   fid.close()
   del self.out_buf
def quantization(img, channels, r_bits=8, g_bits=8, b_bits=8):
    img_out = None

    if r_bits == 8 and r_bits == g_bits and r_bits == b_bits:
        return img

    if r_bits <= 16 and g_bits <= 16 and b_bits <= 16:
        if r_bits < 8 or g_bits < 8 or b_bits < 8:
            img_out = mediancut_algorithm(np_to_pil(img), 2 ** (r_bits+ g_bits + b_bits), np.array((r_bits, g_bits, b_bits)), channels)

        else:
            height = np.size(img,0)
            width = np.size(img,1)
            img_out = np.zeros(shape=img.shape, dtype=np.uint16)

            for i in range (height):
                for j in range (width):
                    if channels == 1:
                        aux = np.int(img[i][j]) / 255
                        img_out[i][j] = aux * ( 2 ** r_bits )

                    elif channels == 3:
                        r_aux = np.float(img[i][j][0]) / 255
                        g_aux = np.float(img[i][j][1]) / 255
                        b_aux = np.float(img[i][j][2]) / 255

                        new_r = np.uint16(r_aux * ( 2 ** r_bits ))
                        new_g = np.uint16(g_aux * ( 2 ** g_bits ))
                        new_b = np.uint16(b_aux * ( 2 ** b_bits ))

                        img_out[i, j] = np.array((new_r, new_g, new_b))
        return img_out
Пример #15
0
    def initialize(data, mode):
        # mapping index to user name and book isbn
        n = len(data)
        p = len(user_list) + len(book_list)
        x = ss.lil_matrix((n, p), dtype=np.uint16)
        y = np.zeros((n, 1), dtype=np.float)

        for row, entry in enumerate(data):
            user = entry["user"]
            isbn = entry["isbn"]
            user_i = id_i[user]
            isbn_i = id_i[isbn]
            x[row, user_i] = np.uint16(1)
            x[row, isbn_i] = np.uint16(1)

            if mode == "train":
                rating = entry["rating"]
                y[row] = np.float(rating)

        if mode == "train":
            y -= train_mean
            x = ss.csc_matrix(x)
            # xtx = x.transpose().dot(x)
            # xty = x.transpose().dot(y)
            return x, y
        else:
            return x
Пример #16
0
def initialize(data):
    # mapping index to user name and book isbn   
    n = len(data); p = len(user_list) + len(book_list)
    x = ss.lil_matrix((n,p), dtype = np.uint16)
    y = np.zeros((n,1), dtype = np.float)
    
    row = 0
    counter = 0
    for entry in data:
        counter += 1
        if counter % 10000 == 0:
            print int(counter/n * 100)
        user = entry['user']; isbn = entry['isbn']; rating = entry['rating']
        user_i = id_i[user]; isbn_i = id_i[isbn]
    
        x[row, user_i] = np.uint16(1)
        x[row, isbn_i] = np.uint16(1)
        y[row] = np.float(rating)
        row += 1
    

    y -= global_mean    
    x = ss.csc_matrix(x)
    #xtx = x.transpose().dot(x)
    #xty = x.transpose().dot(y)
    
    return x, y
    def galois_lfsr(seed, taps):
        seed = np.uint16(seed)
        taps = np.uint16(taps)
        start_state = np.uint16(seed)      # Any nonzero start state will work
        arg = "start_state \t\t%s" % bin(start_state)
        print(arg)
        lf_state_register = np.uint16(start_state)
        period = 0
        bit_sequence = []
        while True:
            lsb = lf_state_register & 1             # /* Get LSB (i.e., the output bit). */
            bit_sequence.append(lsb)
            lf_state_register >>= 1                 # /* Shift register */
            lf_state_register ^= (-lsb) & taps    # /* If the output bit is 1, apply toggle mask.
                                                    # * The value has 1 at bits corresponding
                                                    # * to taps, 0 elsewhere. */
            period += 1
            # arg = "lf_state_register \t%s" % bin(lf_state_register)
            # print(arg)
            if lf_state_register == start_state:
                break

        # arg = "bit_sequence[0] %s" % bit_sequence[0]
        # print(arg)
        # arg = "bit_sequence[1] %s" % bit_sequence[1]
        # print(arg)
        return bit_sequence
Пример #18
0
def pack_waveform(analog, marker1, marker2):
    '''
    Helper function to convert a floating point analog channel and two logical marker channel to a sequence of 16bit integers.
    AWG 5000 series binary data format
    m2 m1 d14 d13 d12 d11 d10 d9 d8 d7 d6 d5 d4 d3 d2 d1
    16-bit format with markers occupying left 2 bits followed by the 14 bit
    analog channel value
    '''

    #Convert decimal shape on [-1,1] to binary on [0,2^14 (16383)] 
    #AWG actually makes 111,111,111,111,10 the 100% output, and
    # 111,111,111,111,11 is one step larger than 100% output so we
    # ignore the one extra positive number and scale from [0,16382]
    analog[analog>1] = 1.0
    analog[analog<-1] = -1.0

    maxLength = max(analog.size, marker1.size, marker2.size)

    if marker1.size < maxLength:
        marker1 = np.append(marker1, np.zeros(maxLength-marker1.size, dtype=np.bool))
    if marker2.size < maxLength:
        marker2 = np.append(marker2, np.zeros(maxLength-marker2.size, dtype=np.bool))
    if analog.size < maxLength:
        analog = np.append(analog, np.zeros(maxLength-analog.size, dtype=np.float64))

    binData = np.uint16( MAX_WAVEFORM_VALUE*analog + MAX_WAVEFORM_VALUE );
    binData += 2**14*np.uint16(marker1) + 2**15*np.uint16(marker2)
    
    return binData
Пример #19
0
 def get_bits(self, b):
   nb = numpy.uint8(b) # bit8
   #
   num_word = numpy.uint32(self.cur_bit) # bit32
   nbit_on_word = numpy.uint8(num_word - ((num_word >> 4) << 4)) # bit8
   num_word = num_word >> 4
   nbit_on_sword = numpy.uint8(0) # bit8
   #
   rtail = 16 - numpy.int8(nbit_on_word + nb)
   if rtail < 0:
     nbit_on_sword = numpy.uint8(abs(rtail))
     rtail = 0
   #
   if num_word < len(self.out_buf):
     left_p = numpy.uint16((self.out_buf[num_word] << nbit_on_word) >> (nbit_on_word + rtail)) # bit16
   else:
     left_p = numpy.uint16(0)
   if (num_word + 1) < len(self.out_buf):
     right_p = numpy.uint16(self.out_buf[num_word + 1] >> (16 - nbit_on_sword)) # bit16
   else:
     right_p = numpy.uint16(0)
   #
   left_p <<= nbit_on_sword
   #
   self.cur_bit += b
   return left_p + right_p
def mpl_Hough(csv_head, img, gray, png_x, png_y, hot_r, png_ruler, lowThreshold, higThreshold): 
    
    min_Dist, sml_Radius, big_Radius = hot_R_Hough(hot_r, png_ruler)
    
    circles1 = cv2.HoughCircles(gray,cv2.HOUGH_GRADIENT ,1,
                                minDist=min_Dist,param1=lowThreshold,param2=higThreshold,
                                minRadius=sml_Radius,maxRadius=big_Radius)
    img0 = np.array(img)
    w, h = gray.shape
    line_x = np.uint16(round(png_x))
    line_y = np.uint16(round(png_y))
    cv2.line(img0,(line_x,0),(line_x,h),(255,0,0),2)
    cv2.line(img0,(0,line_y),(w,line_y),(255,0,0),2)
                            
    if circles1 is None:
        cv2.imshow('mpl_Hough',img0)
        return 0,0,0,0
    else:
        circles = circles1[0,:,:]#三维提取为二维,非常易出Bug,返回值为NoneType或数组
        png_x, png_y, png_r = select_Hot_Spot(circles,png_x,png_y)
        
        circles = np.uint16(np.around(circles))#四舍五入,取整
        for i in circles[:]: 
            cv2.circle(img0,(i[0],i[1]),i[2],(0,0,255),2)#画圆
            cv2.circle(img0,(i[0],i[1]),2,(0,0,255),2)#画圆心  
        box_x0, box_y0, box_x1, box_y1 = lock_Box_Draw(png_x, png_y, png_r)
        cv2.rectangle(img0,(box_x0,box_y0),(box_x1,box_y1),(0,255,0),2) 
        
        pzt_x, pzt_y = pngXY_to_pztXY(csv_head, gray, png_x, png_y)
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(img0,"({:.2f}, {:.2f})".format(pzt_x, pzt_y),(10,30), font, 0.8,(255,255,255),2)
        cv2.imshow('mpl_Hough',img0) 
        return 1, png_x, png_y, png_r     #浮点像素坐标值
Пример #21
0
def load_lyrics(lyrics_file, tids):
    lyrics = {}

    # Enable fast lookup (hash based) of tids
    tids = set(tids)
    
    # Read in the list of terms at the beginning of the file
    with open(lyrics_file, 'r') as f:    
        for line in f:
            if line == '' or line.strip() == '':
                continue
            if line[0] in ('%'):
                lyrics['terms'] = line.strip()[1:].split(',')
                break
                                
    # Figure out the unstemmed versions of all the terms (note that this will 
     # not necessarily recover the originals since multiple words can be converted
     # to the same stem)
    stemmed_to_unstemmed = pd.read_csv('musicXmatch/mxm_reverse_mapping.txt',sep='<SEP>',header=None,index_col=0,names=['unstemmed'])
    lyrics['unstemmed_terms'] = stemmed_to_unstemmed.loc[lyrics['terms']].unstemmed.values
                    
#    tdm = pd.read_csv(lyrics_file, skiprows=18, nrows=1000)


    tdm = np.zeros([len(tids),len(lyrics['terms'])], dtype=np.uint32)
    lyrics['track_id'] = []
    start = time.time()
    with open(lyrics_file, 'r') as f:
        cnt_lines = 0
        for line in f:
            if line == '' or line.strip() == '':
                continue
            if line[0] in ('#', '%'):
                continue
            
            lineparts = re.split(r"[:,]",line)
            tid = lineparts[0]
            if not (tid in tids):
                  continue

            indices = np.uint16(lineparts[2::2])
            counts = np.uint16(lineparts[3::2])
            tdm[cnt_lines,indices-1] = counts

#            
            lyrics['track_id'].append(lineparts[0])
#            
#            for wordcnt in lineparts[2:]:
#                wordid, cnt = wordcnt.split(':')
#                tdm[cnt_lines,int(wordid)-1] = int(cnt)
                
        #        print "%s\t%s" % (wordid,cnt)            indices = 
            cnt_lines += 1
            if cnt_lines % 1000 == 0:
                print (time.time() - start) / 60.
                print 'Done with %d tracks.' % cnt_lines
    lyrics['tdm'] = scipy.sparse.csr_matrix(tdm)
    print (time.time() - start) / 60.
    return lyrics
Пример #22
0
def whiteDensity(image,ellipse):
  ((x,y),(h,w),a) = ellipse
  xi,xf = np.uint16(x - w/2), np.uint16(x + w/2)
  yi,yf = np.uint16(y - h/2), np.uint16(y + h/2)

  m = np.asarray(image[yi:yf,xi:xf])
  m = m.astype(np.bool)
  return np.divide(np.sum(m), (w * h))
Пример #23
0
def convertStringToHex(hexString,modifier):
	immediate = hexString.replace(" ","")
	if(modifier=="n"):
		return np.uint32(int(immediate,16))
	elif (modifier=='u'):
		return np.uint32(np.uint16(int(immediate,16)))
	elif (modifier=='h'):
		return np.uint32(np.uint16(int(immediate,16))<<16)
Пример #24
0
 def crc_xmodem(self, crc, data):
     crc = np.uint16(crc ^ (data << 8))
     for i in range(0, 8):
         if crc & 0x8000:
             crc = np.uint16((crc << 1) ^ 0x1021)
         else:
             crc = np.uint16(crc << 1)
     return crc
Пример #25
0
def rotate_image( a, resize = 1.5, angle = 180., interpolation = "linear", blocks = (16,16,1)  ):
    """
    Rotates the array. The new array has the new size and centers the
    picture in the middle.

    a             - array (2-dim)
    resize        - new_image w/old_image w
    angle         - degrees to rotate the image
    interpolation - "linear" or None
    blocks        - given to the kernel when run


    returns: a new array with dtype=uint8 containing the rotated image
    """
    angle = angle/180. *pi

    # Convert this image to float. Unsigned int texture gave
    # strange results for me. This conversion is slow though :(
    a = a.astype("float32")

    # Calculate the dimensions of the new image
    calc_x = lambda (x,y): (x*a.shape[1]/2.*cos(angle)-y*a.shape[0]/2.*sin(angle))
    calc_y = lambda (x,y): (x*a.shape[1]/2.*sin(angle)+y*a.shape[0]/2.*cos(angle))

    xs = [ calc_x(p) for p in [ (-1.,-1.),(1.,-1.),(1.,1.),(-1.,1.) ] ]
    ys = [ calc_y(p) for p in [ (-1.,-1.),(1.,-1.),(1.,1.),(-1.,1.) ] ]

    new_image_dim = (
        int(numpy.ceil(max(ys)-min(ys))*resize),
        int(numpy.ceil(max(xs)-min(xs))*resize),
    )

    # Now generate the cuda texture
    cuda.matrix_to_texref(a, texref, order="C")

    # We could set the next if we wanted to address the image
    # in normalized coordinates ( 0 <= coordinate < 1.)
    # texref.set_flags(cuda.TRSF_NORMALIZED_COORDINATES)
    if interpolation == "linear":
        texref.set_filter_mode(cuda.filter_mode.LINEAR)

    # Calculate the gridsize. This is entirely given by the size of our image.
    gridx = new_image_dim[0]/blocks[0] if \
            new_image_dim[0]%blocks[0]==1 else new_image_dim[0]/blocks[0] +1
    gridy = new_image_dim[1]/blocks[1] if \
            new_image_dim[1]%blocks[1]==0 else new_image_dim[1]/blocks[1] +1

    # Get the output image
    output = numpy.zeros(new_image_dim,dtype="uint8")

    # Call the kernel
    copy_texture_func(
        numpy.float32(resize), numpy.float32(angle),
        numpy.uint16(a.shape[1]), numpy.uint16(a.shape[0]),
        numpy.uint16(new_image_dim[1]), numpy.uint16(new_image_dim[0]),
            cuda.Out(output),texrefs=[texref],block=blocks,grid=(gridx,gridy))

    return output
Пример #26
0
    def saveTiff(self, drappingMain):
        
        rasterPath = QFileDialog.getSaveFileName(drappingMain,"save file dialog" ,"/ortho.tiff","Images (*.tiff)")
        
        maskedOrtho = self.ortho
        pointRaster = self.pointRaster
        im = self.image
        
        if rasterPath:
                    
            cols = pointRaster.RasterXSize
            rows = pointRaster.RasterYSize
            
            geoTrans = pointRaster.GetGeoTransform()
            geoTrans = list(geoTrans)
            x_min = geoTrans[0]
            pixelWidth = geoTrans[1]
            y_min = geoTrans[3]
            pixelHeight = geoTrans[5]
            
            nDim = np.ndim(im)
            if nDim == 3:
                nBand = im.shape[2]
                
                driver = gdal.GetDriverByName('GTiff')
                outRaster = driver.Create(rasterPath, cols, rows, nBand, gdal.GDT_UInt16)
                outRaster.SetGeoTransform((x_min, pixelWidth, 0, y_min, 0, pixelHeight))
                
                for i in range(nBand):
                    outband = outRaster.GetRasterBand(i+1)
                    outband.WriteArray(np.uint16(maskedOrtho[:,:,i]))
                    outband.FlushCache()
                outRasterSRS = osr.SpatialReference()
                outRasterSRS.ImportFromEPSG(self.epsg)
                outRaster.SetProjection(outRasterSRS.ExportToWkt())
                outRaster = None
                
#            driver = gdal.GetDriverByName('GTiff')
#            outRaster = driver.Create(rasterSaveName, cols, rows, 1, gdal.GDT_UInt16)
#            outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
#            outband = outRaster.GetRasterBand(1)
#            outband.WriteArray(boolMat)
#            outRasterSRS = osr.SpatialReference()
#            outRasterSRS.ImportFromEPSG(self.crs.srsid ())#2056)
#            outRaster.SetProjection(outRasterSRS.ExportToWkt())
                
            else:
                driver = gdal.GetDriverByName('GTiff')
                outRaster = driver.Create(rasterPath, cols, rows, 1, gdal.GDT_UInt16)
                outRaster.SetGeoTransform((x_min, pixelWidth, 0, y_min, 0, pixelHeight))
                outband = outRaster.GetRasterBand(1)
                outband.WriteArray(np.uint16(maskedOrtho))
                outRasterSRS = osr.SpatialReference()
                outRasterSRS.ImportFromEPSG(self.epsg)
                outRaster.SetProjection(outRasterSRS.ExportToWkt())
                outband.FlushCache()
                outRaster = None
Пример #27
0
  def RGBAChannel ( self ):
    """Convert the uint32 back into 4x8 bit channels"""

    zdim, ydim, xdim = self.data.shape
    newcube = np.zeros( (4, zdim, ydim, xdim), dtype=np.uint16 )
    newcube[0,:,:,:] = np.bitwise_and(self.data, 0xffff, dtype=np.uint16)
    newcube[1,:,:,:] = np.uint16 ( np.right_shift( self.data, 16) & 0xffff )
    newcube[2,:,:,:] = np.uint16 ( np.right_shift( self.data, 32) & 0xffff )
    newcube[3,:,:,:] = np.uint16 ( np.right_shift (self.data, 48) )
    self.data = newcube
Пример #28
0
def check_exists(word, connect_dict, values_dict, num_edges_dict):
    if word not in connect_dict:
        connect_dict[word] = dict()
    if word not in values_dict:
        if word.isdigit():
            values_dict[word] = uint16(int(word))
        else:
            values_dict[word] = uint16(0)
    if word not in num_edges_dict:
        num_edges_dict[word] = 0
Пример #29
0
 def _DoCRC(crc, onech):
     assert(type(crc) is uint16 and type(onech) is uint8)
     ans = uint32(crc ^ onech << uint16(8))
     for ind in range(8):
         if ans & uint32(0x8000):
           ans <<= uint32(1)
           ans = ans ^ uint32(4129)
         else:
           ans <<= uint32(1)
     return uint16(ans)
    def channelLastValue(self, channelNum, value):
        if len(self.data) <= 0:
            raise ValueError('No Channel Data. Try using the delayAndWidth method.')

        if value == True or value > 0:
            binChannel = numpy.uint16(2**channelNum)
            self.data[-1] = self.data[-1] | binChannel
        else:
            binChannel = ~numpy.uint16(2**channelNum)
            self.data[-1] = self.data[-1] & binChannel
        return self.data
Пример #31
0
    seconds = int(cap.get(cv2.CAP_PROP_POS_FRAMES) / cap.get(cv2.CAP_PROP_FPS))
    videoTime = secs_to_HMS(seconds)

    #Detect circles in images to block them from using black mask. This is because sewage pipes usually are connected by rings, which are not defects.
    circles = cv2.HoughCircles(maskedImage,
                               cv2.HOUGH_GRADIENT,
                               1,
                               20,
                               param1=200,
                               param2=20,
                               minRadius=int(maskRadius / 2),
                               maxRadius=int(maskRadius))

    #If detection is circle and apply detection is ON, block it by a circle mask.
    if circles is not None and applyDetection:
        circles = np.uint16(np.around(circles))
        for i in circles[0, :]:
            #Draw the outer circle
            cv2.circle(maskedImage, (i[0], i[1]), i[2], (0, 0, 0), 20)
            #Draw the maskCenter of the circle
            arcDenoisedImage = detectArcs(dat, maskedImage, 2, circles)
            contouredImage = highlightContours(arcDenoisedImage)

    #If detection is not a circle and apply detection is ON, measure the detection size
    elif circles is None and applyDetection:
        contouredImage = highlightContours(maskedImage)
        for contour in contouredImage:
            #Get the contours measurements
            [x, y, w, h] = cv2.boundingRect(contour)
            new_blockage_area = w * h
            new_blockage_y = y
Пример #32
0
        w = 510
        img = img[y:y + h, x:x + w]
        """

        # invertimos la imagen
        # if OVERRIDE_MAXVALUE != -1:
        #   img = OVERRIDE_MAXVALUE-img
        """
        # Normalizamos
        img = img/OVERRIDE_MAXVALUE
        """

        if not os.path.exists(dest_path):
            os.makedirs(dest_path)
        cv2.imwrite(os.path.join(dest_path,
                                 str(num_lamb) + ".png"), np.uint16(img))

        # guardamos la etiqueta
        label = [[1, 0] if j[key] == "bad" else [0, 1]][0]
        label_list.append(label)

        num_lamb += 1

        i2 += 1
        printProgressBar(i2,
                         num_images,
                         prefix='Estructurando JSON ' + str(json_number) + ':',
                         suffix='Completado',
                         length=100,
                         color=118)
Пример #33
0
def index():
    if request.method == 'POST':
        # check if the post request has the file part
        if 'file' not in request.files:
            print('No file part')
            return redirect(request.url)
        file = request.files['file']
        # if user does not select file, browser also
        # submit a empty part without filename
        if file.filename == '':
            flash('No selected file')
            return redirect(request.url)
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            lokasi_file = os.path.join(app.config['UPLOAD_FOLDER'], filename)
            file.save(lokasi_file)
            # utama(filename)
            filenamee = filename

    iterate_name = 1
    # loop through the input images
    for file in glob.glob(input_path + "/*.bmp"):
        try:
            #disini hough
            img_gray = cv2.imread(file, 0)
            img_gray = cv2.medianBlur(img_gray, 5)
            img_biner = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2BGR)
            #parameter class 1-7
            circles = cv2.HoughCircles(img_gray,
                                       cv2.HOUGH_GRADIENT,
                                       1,
                                       20,
                                       param1=350,
                                       param2=55,
                                       minRadius=0,
                                       maxRadius=0)
            if (str(circles) == "None"):
                circles = cv2.HoughCircles(img_gray,
                                           cv2.HOUGH_GRADIENT,
                                           1,
                                           20,
                                           param1=320,
                                           param2=37,
                                           minRadius=0,
                                           maxRadius=0)
                if (str(circles) == "None"):
                    continue
            circles = np.uint16(np.around(circles))

            # cv2.imshow("hasil", circles)
            for i in circles[0, :]:
                cv2.circle(img_biner, (i[0], i[1]), i[2], (0, 255, 255), 2)
                cv2.circle(img_biner, (i[0], i[1]), 2, (0, 0, 255), 112)

            flag = 1
            row, col, ch = img_biner.shape
            graykanvas = np.zeros((row, col, 1), np.uint8)
            for i in range(0, row):
                for j in range(0, col):
                    b, g, r = img_biner[i, j]
                    if (b == 255 & g == 0 & r == 0):
                        graykanvas.itemset((i, j, 0), 255)
                        if (flag == 1):
                            x = i
                            y = j
                            flag = 100
                    else:
                        graykanvas.itemset((i, j, 0), 0)

            img_hasil = cv2.subtract(graykanvas, img_gray)

            namafile = file.split("\\")[-1]

            hasil_crop = img_hasil[x:x + 112, y - 56:y + 56]  # im awe [y,x]
            cv2.imwrite(os.path.join(hasil_path,
                                     str(iterate_name) + '.jpg'), hasil_crop)
            iterate_name += 1
            print("crop")
            print(hasil_crop)
            print(type(hasil_crop))
        except OSError as e:
            print("Something happened:", e)

    # for file in glob.glob(hasil_path + "/*.jpg"):

    for file in glob.glob(hasil_path + "/*.jpg"):
        try:
            # baca gambar hasil Hough Transform
            print("glob")
            print(file)
            print(type(file))
            image = cv2.imread(file)

            # resize gambarnya
            image = cv2.resize(image, fixed_size)

            fv_histogram = fd_histogram(image)
            global_feature = np.hstack([fv_histogram])

            # predict label of test image
            modelrf = pickle.load(open("model.sav", 'rb'))
            prediction = modelrf.predict(global_feature.reshape(1, -1))[0]

            print("kelas:", train_labels[prediction])
            # show predicted label on image
            cv2.putText(image, train_labels[prediction], (20, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)

            namafile = "hasil"
            cv2.imwrite(os.path.join(input_path, namafile + "_hasil" + ".jpg"),
                        image)
            print(train_labels[prediction])
            return render_template('hasil.html',
                                   hasil=train_labels[prediction],
                                   nama=namafile + "_hasil" + ".jpg")
        except OSError as e:
            print("Something happened:", e)

    return render_template('hasil.html')
Пример #34
0
#circles_s = cv2.HoughCircles(s_part,cv2.HOUGH_GRADIENT,1,minDist=500,
#param1=200,param2=13,minRadius=10,maxRadius=40)
circles_s = cv2.HoughCircles(s_part,
                             cv2.HOUGH_GRADIENT,
                             dbValue_s,
                             minDist=minDist_s,
                             param1=param1_s,
                             param2=param2_s,
                             minRadius=minRadius_s,
                             maxRadius=maxRadius_s)
#For basement pictures in TestDrone640 use minRadius=10,maxRadius=30)
#For  pica 30 use: param1=500,param2=9,minRadius=5,maxRadius=15)
#pica 33: circles_s = cv2.HoughCircles(s_part,cv2.HOUGH_GRADIENT,1,minDist=500,
#param1=200,param2=13,minRadius=10,maxRadius=40)

circles_s = np.uint16(np.around(circles_s))
for i in circles_s[0, :]:
    # draw the outer circle
    cv2.circle(img_s, (i[0], i[1]), i[2], (0, 255, 0), 12)
    # draw the center of the circle
    cv2.circle(img_s, (i[0], i[1]), 2, (0, 0, 255), 8)
#cv2.imshow('detected circles_h',cimg)

#cv2.waitKey(0)
#cv2.destroyAllWindows()

#cv2.imshow('s part',img_s)
cimg_s = cv2.cvtColor(img_s, cv2.COLOR_BGR2RGB)
plt.figure('s part')
imgplot = plt.imshow(cimg_s)
"""
Пример #35
0
                                    maxRadius=200)
    circlesBlue = cv2.HoughCircles(greyBlue,
                                   cv2.HOUGH_GRADIENT,
                                   1,
                                   50,
                                   param1=50,
                                   param2=30,
                                   minRadius=10,
                                   maxRadius=1000)

    #Gestisco il caso in cui non vi siano cerchi nel frame( assurdo )
    if circlesRed or circlesGreen or circlesBlue is None:
        print("Nessuna circonferenza trovata")
        continue
    #Disegno le circonferenze DEBUG
    circlesBlue = np.uint16(np.around(circlesBlue))
    for i in circlesBlue[0, :]:
        cv2.circle(foto, (i[0], i[1]), i[2], (255, 0, 0),
                   1)  # draw the outer circle
        cv2.circle(foto, (i[0], i[1]), 2, (255, 0, 0),
                   3)  # draw the center of the circle
        cv2.imwrite('processed/0processed.png', foto)


#return angles in radiants
def inverseCinematic(x, y, z, l1, l2, l3):
    fi = 30 * math.pi / 180  #Angolo che definisce l'orientamento della pinza rispetto all'asse x.
    xp = x - l3 * math.cos(fi)
    zp = z - l3 * math.sin(fi)

    teta2 = math.acos(((xp * xp + zp * zp - l1 * l1 - l2 * l2) / 2 * l1 * l2))
Пример #36
0
 def test_numpy_numeric_type_uint16(self):
     self.assertTrue(isnumeric(np.uint16(1)),
                     "Unsigned integer (0 to 65535)")
            # le hacemos crop
            x = 38
            y = 102
            h = 230
            w = 510
            img = img[y:y + h, x:x + w]

            # invertimos la imagen
            # if OVERRIDE_MAXVALUE != -1:
            #   img = OVERRIDE_MAXVALUE-img

            if not os.path.exists(dest_path):
                os.makedirs(dest_path)
            cv2.imwrite(os.path.join(dest_path,
                                     str(num_ret_img) + ".png"),
                        np.uint16(img))

            # guardamos la etiqueta
            label = j[key]["label"]
            if label == "lamb":
                num_lamb = num_lamb + 1
                label = [1, 0, 0]
            elif label == "empty":
                num_empty = num_empty + 1
                label = [0, 1, 0]
            elif label == "wrong":
                num_wrong = num_wrong + 1
                label = [0, 0, 1]
            else:
                print("hay alguna etiqueta mala: " + str(j[key]["label"]))
Пример #38
0
def detect2(img_read, pt1, pt2, w):

        try:

            t1 = time.time()
            
            crop_img  = img_read[pt1[1]:pt2[1], pt1[0]:pt2[0]]
            
            distance = conf["distance"]
            
            #skp, tkp = cvlib.findKeyPoints(crop_img , target, distance)
 
            crop_img = cv2.medianBlur(crop_img, conf["blur_level"])
            
            gray = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)

            circles = cv2.HoughCircles(gray, cv2.cv.CV_HOUGH_GRADIENT, 
                          conf["dp"],#29, ## dp
                          conf["minDist"],#100, ## minDist
                          conf["param1"],#param1=70, 
                          conf["param2"],#param2=80, ## 
                          conf["minRadius"],#minRadius=20,
                          conf["maxRadius"]) #maxRadius=0)

            j = 0
            
            if DEBUG == "true": 
                cv2.rectangle(img_read, pt1, pt2, (0,255,0))

            """
            if DEBUG == "false":
                
                if circles == None:
        
                    #print "None."
                    save(0,0,0)
                    return 0
                    
                else:
                    
                    circles = np.uint16(np.around(circles))
                
                    save(circles)
                
                    return 1                
            """ 
            #print circles
            #print type(circles)
            if circles == None:
                #print "none"
                if DEBUG == "true": 
                    cv2.imshow("camera", img_read)
                save(0,0,0)

            else:                
                
                circles = np.uint16(np.around(circles))
                
                #x = circles[0][0][0]
                #y = circles[0][0][1]
                #r = circles[0][0][2]
                #print "else"
                for i in circles[0,:]:
                    
                    if supress(i, w):
                        
                        j = j + 1
                        
                        save(1,1,1)
                        #print i[2], i
                        if DEBUG == "true": 
                            cv2.circle(img_read,(pt1[0]+i[0],pt1[1]+i[1]),i[2],(0,255,0),2)
                            cv2.circle(img_read,(pt1[0]+i[0],pt1[1]+i[1]),2,(0,0,255),3)                       
                        
                        #cp = [ i[0], i[1] ]
                        
                        #print cp
                
                if DEBUG == "true":        
                    cv2.imshow("camera", img_read)
            
        except Exception as ex:
            #print(ex)
            #print(traceback.format_exc())
            logger.debug(ex)
Пример #39
0
Method - currently only cv2.HOUGH_GRADIENT available
dp - Inverse ratio of accumulator resolution
MinDist - the minimum distance between the center of detected circles
param1 - Gradient value used in the edge detection
param2 - Accumulator threshold for the HOUGH_GRADIENT method (lower allows more circles 
to be detected (false positives))
minRadius - limits the smallest circle to this size (via radius)
MaxRadius - similarly sets the limit for the largest circles
"""

image = cv2.imread(os.path.dirname(__file__) + '/../images/bottlecaps.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

blur = cv2.medianBlur(gray, 5)

circles = cv2.HoughCircles(blur, cv2.cv.CV_HOUGH_GRADIENT, 1.5, 10)
#circles = cv2.HoughCircles(gray, cv.CV_HOUGH_GRADIENT, 1, 10)

circles = np.uint16(np.around(
    circles))  # np.around 'Evenly round to the given number of decimals'

for i in circles[0, :]:
    # draw the outer circle
    cv2.circle(image, (i[0], i[1]), i[2], (255, 0, 0), 2)

    # draw the center of the circle
    cv2.circle(image, (i[0], i[1]), 2, (0, 255, 0), 5)

cv2.imshow('detected circles', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
 def set_population(self,punit_buf,grid_buf,simTime=1,sample_rate=1000,Density=150,youngs=50*1e3,roi=[]):  
     self.E_ym=youngs
     self.T=float (simTime) 
     self.dt = float (1/sample_rate)   #Timestep
     self.t= np.linspace(0,self.T,int( self.T/self.dt))  
     self.Rn=1
     self.Rm=len(grid_buf)
     self.stp=int(self.Rm/2)
     self.Density=Density
     self.V1=np.random.uniform(-0.000,0.000,(self.Rm*self.Rn,self.t.size))
     self.V2=np.random.uniform(-0.000,0.000,(self.Rm*self.Rn,self.t.size))
     self.G=np.mat(np.zeros((self.Rm*self.Rn,self.Rm*self.Rn)))
     self.Gi=np.mat(np.zeros((self.Rm*self.Rn,self.Rm*self.Rn))) 
     self.Va=np.random.uniform(-70,-70,(self.Rm*self.Rn,self.t.size))
     self.Vg=np.random.uniform(-0.000,0.000,(self.Rm*self.Rn,self.t.size))
     self.Is=np.random.uniform(-0.000,0.000,(self.Rm*self.Rn,self.t.size))
     self.Vs=np.zeros([self.Rm*self.Rn,self.t.size])
     self.Vnf=np.random.uniform(-0.000,0.000,(self.Rm*self.Rn,self.t.size))
     self.Vf=np.random.uniform(-0.000,0.000,(self.Rm*self.Rn,self.t.size))
     self.X0=np.zeros((self.Rm*self.Rn,self.t.size))
     self.VN=np.random.uniform(-10,10,(self.Rm*self.Rn,self.t.size)) 
     self.SN=np.random.uniform(-0,0,(self.Rm*self.Rn,self.t.size)) 
     #self.Vsa=np.array(np.zeros((self.Rm*self.Rn,self.t.size)));  
     self.Uc=np.array(np.zeros((self.Rm*self.Rn,self.t.size)));   
     #self.Ve=np.array(np.zeros((self.Rm*self.Rn,self.t.size))); 
     #self.Vi=np.array(np.zeros((self.Rm*self.Rn,self.t.size)));
     #self.area_ei=np.zeros((2,self.t.size))
     #self.U=np.array(np.zeros((self.Rm*self.Rn,self.t.size))); 
     self.Dt=np.random.uniform(-0.000,0.000,(self.Rm*self.Rn,self.t.size))
     self.spike_trains=[]
     
     #-----Locations of receptors-----#
     self.r_pos=np.array(punit_buf);  
     tmp=grid_buf+100
     entry_buf=np.int16(tmp[:,0]+tmp[:,1]*(100*2))
     self.v_pos=entry_buf;
     for i in range(self.Rm):
         [w,v]=grid_buf[i,:]
         
         tmp=grid_buf==[w-1,v]
         sel=tmp[:,0]&tmp[:,1]
         if(np.sum(sel)):
             #loc=sel.nonzero()[0][0]
             self.G[sel,i]=-1
     
         tmp=grid_buf==[w+1,v]
         sel=tmp[:,0]&tmp[:,1]
         if(np.sum(sel)):
             #loc=sel.nonzero()[0][0]
             self.G[sel,i]=-1
             
         tmp=grid_buf==[w,v-1]
         sel=tmp[:,0]&tmp[:,1]
         if(np.sum(sel)):
             #loc=sel.nonzero()[0][0]
             self.G[sel,i]=-1/self.Rwv
             
         tmp=grid_buf==[w,v+1]
         sel=tmp[:,0]&tmp[:,1]
         if(np.sum(sel)):
             #loc=sel.nonzero()[0][0]
             self.G[sel,i]=-1/self.Rwv
         self.G[i,i]=2*(1+self.Rwv)/(self.Rwv)/self.Cd
     self.Gi=self.Rc*self.G.I
     self.GiFit=self.G.I  # inverse G for fitting
     
     '---Presetup---' 
     #size of skin contact scaning image  
     self.rentrys=[]
     Wc=roi[:,0].max()-roi[:,0].min()
     Hc=roi[:,1].max()-roi[:,1].min()
     self.Nc=int(Wc/Dbp)
     self.Nr=int(Hc/Dbp)
     OEs=np.hstack([np.uint16((self.r_pos[:,1:2]-roi[:,1].min())/Hc*self.Nr),
                    np.uint16((self.r_pos[:,0:1]-roi[:,0].min())/Wc*self.Nc)])
     self.OEs=OEs
     
     Esc=np.meshgrid(np.arange(0,self.Nr,1),np.arange(0,self.Nc,1))
     Esc=np.hstack([Esc[0].reshape(Esc[0].size,1),Esc[1].reshape(Esc[0].size,1)])
     
     #Dx=roi[:,0].max()-roi[:,0].min()
     #Dy=roi[:,1].max()-roi[:,1].min()
     locs=np.hstack([Esc[:,1:2]*Wc/self.Nc+roi[:,0].min(),Esc[:,0:1]*Hc/self.Nr+roi[:,1].min()])
     
     sroi=np.vstack([roi,roi[0,:]])
     
     sel=isPoisWithinPoly(sroi,locs)
     #sel=alt.isin_multipolygon(locs,roi, contain_boundary=True)
     pdots=Esc[sel,:]  #Pixels in skin area
     self.pdots=pdots
     Esbuf=[]
     for i in range(self.Rm):
         Esbuf.append([]) 
     # select closet pxiels in SC image that attribute to each tactile unit
     for i in range(len(pdots)):
         tmp=np.float32(pdots[i,:])-np.float32(OEs)
         tmp=tmp[:,0]**2+tmp[:,1]**2
         sel=int(np.where(tmp==tmp.min())[0][0])
         Esbuf[sel].append(pdots[i,:])
         
     Nbuf=np.zeros(self.Rm)   
     for i in range(self.Rm):
         Nbuf[i]=len(Esbuf[i])
     Nrcp=int(np.max(Nbuf))+1    
     rows=np.zeros([self.Rm,Nrcp])
     cols=np.zeros([self.Rm,Nrcp])
     
     for i in range(self.Rm):
         'supply up to Nrcp pixel with center pixel for each tactile unit'
         num=len(Esbuf[i])
         if(num>0):
             eadots=np.array(Esbuf[i]).T
             supplys=np.ones([2,Nrcp-num])  
         else: 
             eadots=OEs[i,:].reshape(2,1)
             supplys=np.ones([2,Nrcp-1]) 
     
         supplys[0,:]=supplys[0,:]*OEs[i,0]
         supplys[1,:]=supplys[1,:]*OEs[i,1]
         rows[i,:]=np.hstack([eadots,supplys])[0,:]
         cols[i,:]=np.hstack([eadots,supplys])[1,:]
     self.Es=[np.uint16(rows),np.uint16(cols)]    
Пример #41
0
def _export_data_to_h5(first_time_id, last_time_id, bf, tp, pre_data):
    tp.timestamp = time()
    tp.message = 'reading time values from SQL'
    tp.save()

    first_time = RecordedTime.objects.get(id=first_time_id)
    time_id_min = BackgroundTask.objects.filter(
        label='data acquision daemon', start__lte=first_time.timestamp).last()
    if time_id_min:
        time_id_min = RecordedTime.objects.filter(
            timestamp__lte=time_id_min.start).last()
        if time_id_min:
            time_id_min = time_id_min.id
            log.debug(("time_id_min %d to first_time_id %d") %
                      (time_id_min, first_time_id))
        else:
            time_id_min = 1
    else:
        time_id_min = 1

    timevalues = [
        timestamp_unix_to_matlab(element)
        for element in RecordedTime.objects.filter(
            id__range=(first_time_id,
                       last_time_id)).values_list('timestamp', flat=True)
    ]
    time_ids = list(
        RecordedTime.objects.filter(id__range=(first_time_id,
                                               last_time_id)).values_list(
                                                   'id', flat=True))

    tp.timestamp = time()
    tp.message = 'writing time values to file'
    tp.save()

    bf.write_data('time', float64(timevalues))
    bf.reopen()

    data = {}
    active_vars = list(
        Variable.objects.filter(active=1, record=1,
                                client__active=1).values_list('pk', flat=True))
    tp.timestamp = time()
    tp.message = 'reading float data values from SQL'
    tp.save()

    raw_data = list(
        RecordedDataFloat.objects.filter(
            time_id__range=(first_time_id, last_time_id),
            variable_id__in=active_vars).values_list('variable_id', 'time_id',
                                                     'value'))

    tp.timestamp = time()
    tp.message = 'prepare raw float data'
    tp.save()

    for item in raw_data:
        if not data.has_key(item[0]):
            data[item[0]] = []
        data[item[0]].append([item[1], item[2]])

    tp.timestamp = time()
    tp.message = 'reading int data values from SQL'
    tp.save()

    raw_data = []
    raw_data = list(
        RecordedDataInt.objects.filter(
            time_id__range=(first_time_id, last_time_id),
            variable_id__in=active_vars).values_list('variable_id', 'time_id',
                                                     'value'))

    tp.timestamp = time()
    tp.message = 'prepare raw int data'
    tp.save()

    for item in raw_data:
        if not data.has_key(item[0]):
            data[item[0]] = []
        data[item[0]].append([item[1], item[2]])

    tp.timestamp = time()
    tp.message = 'reading bool data values from SQL'
    tp.save()

    raw_data = []
    raw_data = list(
        RecordedDataBoolean.objects.filter(
            time_id__range=(first_time_id, last_time_id),
            variable_id__in=active_vars).values_list('variable_id', 'time_id',
                                                     'value'))

    tp.timestamp = time()
    tp.message = 'prepare raw bool data'
    tp.save()

    for item in raw_data:
        if not data.has_key(item[0]):
            data[item[0]] = []
        data[item[0]].append([item[1], item[2]])

    raw_data = []

    tp.timestamp = time()
    tp.message = 'writing data to file'
    tp.save()

    pre_data = {}
    for var in Variable.objects.filter(active=1, record=1,
                                       client__active=1).order_by('pk'):
        tp.timestamp = time()
        tp.message = 'processing variable_id %d' % var.pk
        tp.save()

        var_id = var.pk
        variable_class = var.value_class
        first_record = False
        if data.has_key(var_id):
            records = data[var_id]
            if records[0][0] == first_time_id:
                first_record = True

        else:
            records = []

        if not first_record:
            if pre_data.has_key(var_id):
                records.insert(0, pre_data[var_id])
                first_record = True
            else:
                first_record = _last_matching_record(variable_class,
                                                     first_time_id, var_id,
                                                     time_id_min)
                if first_record:
                    records.insert(0, first_record)

        if not first_record and not records:
            tmp = [0] * len(time_ids)
            if variable_class.upper() in ['FLOAT', 'FLOAT64', 'DOUBLE']:
                tmp = float64(tmp)
            elif variable_class.upper() in ['FLOAT32', 'SINGLE', 'REAL']:
                tmp = float32(tmp)
            elif variable_class.upper() in ['INT32']:
                tmp = int32(tmp)
            elif variable_class.upper() in ['WORD', 'UINT', 'UINT16']:
                tmp = uint16(tmp)
            elif variable_class.upper() in ['INT16', 'INT']:
                tmp = int16(tmp)
            elif variable_class.upper() in ['BOOL']:
                tmp = uint8(tmp)
            else:
                tmp = float64(tmp)

            bf.write_data(var.name, tmp)
            bf.reopen()
            continue

        #blow up data ##########################################################

        tmp = [0] * len(time_ids)
        t_idx = 0
        v_idx = 0
        nb_v_idx = len(records) - 1
        for id in time_ids:
            if nb_v_idx < v_idx:
                if t_idx > 0:
                    tmp[t_idx] = tmp[t_idx - 1]
            else:
                if records[v_idx][0] == id:
                    tmp[t_idx] = records[v_idx][1]
                    laid = id
                    v_idx += 1
                elif t_idx > 0:
                    tmp[t_idx] = tmp[t_idx - 1]
                elif records[v_idx][0] <= id:
                    tmp[t_idx] = records[v_idx][1]
                    laid = id
                    v_idx += 1

                if nb_v_idx > v_idx:
                    logged = False
                    while records[v_idx][0] <= id and v_idx <= nb_v_idx:
                        if not logged:
                            log.debug(
                                ("double id %d in var %d") % (id, var_id))
                            logged = True
                        v_idx += 1
            t_idx += 1
        pre_data[var_id] = tmp[-1]
        if variable_class.upper() in ['FLOAT', 'FLOAT64', 'DOUBLE']:
            tmp = float64(tmp)
        elif variable_class.upper() in ['FLOAT32', 'SINGLE', 'REAL']:
            tmp = float32(tmp)
        elif variable_class.upper() in ['INT32']:
            tmp = int32(tmp)
        elif variable_class.upper() in ['WORD', 'UINT', 'UINT16']:
            tmp = uint16(tmp)
        elif variable_class.upper() in ['INT16', 'INT']:
            tmp = int16(tmp)
        elif variable_class.upper() in ['BOOL']:
            tmp = uint8(tmp)
        else:
            tmp = float64(tmp)

        bf.write_data(var.name, tmp)
        bf.reopen()
    return pre_data
    """
Пример #42
0
def extract_tiles(asset_path, offsets, height_target, normal_target):

	tile_indices = [[0, 1, 2, 3]] if smallMap else [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]
	num_indices = len(numpy.array(tile_indices).flatten())

	offset_scale = 2 if smallMap else 4

	sequence_index = 0
	for i, indices in enumerate(tile_indices):
		for tile_index in indices:

			if smallMap:
				tile_path = asset_path % (offsets[0], offsets[1], tile_index)
			else:
				tile_path = asset_path % (offsets[0], offsets[1], i, tile_index)
	
			tile = Image.open(tile_path)
			tile_r, tile_g, tile_b, tile_a = tile.split()

			# extract normal	
			
			channel_r = numpy.asarray(tile_b).flatten()
			channel_g = numpy.asarray(tile_a).flatten()

			channel_b = numpy.resize(numpy.uint8(), tile_size)
			channel_b.fill(255)

			rgb = numpy.dstack((channel_r, channel_g, channel_b)).flatten()

			# # extract elevation

			channel_l = numpy.left_shift(numpy.asarray(tile_r, numpy.uint16()).flatten(), 8)
			channel_l = channel_l + numpy.asarray(tile_g).flatten()
		
			# refine stitching sequence

			x = offsets[0] * offset_scale + int(i % 2) * 2 + int(tile_index % 2)
			y = offsets[1] * offset_scale + int(i / 2) * 2 + int(tile_index / 2)

			ordered_index = y * 16 + x

			# paste data

			target_x = (ordered_index % 16) * tile_width
			target_y = math.floor(ordered_index / 16) * tile_height

			# write normal tile
			
			normal_tile = Image.frombytes('RGB', (tile_width, tile_height), rgb)
			normal_target.paste(normal_tile, (target_x, target_y))

			# write height tile

			height_tile = Image.frombytes('I;16', (tile_width, tile_height), numpy.asarray(channel_l, order = 'C'))
			height_target.paste(height_tile, (target_x, target_y))

			# display progress

			progress = (offsets[0] * 4 + offsets[1]) * num_indices + sequence_index + 1
			
			print ('processing', str(progress).rjust(2 if smallMap else 3, '0'), 'of', numTiles, 
				flush = True, end = ('\r' if progress < numTiles else '\n'))

			sequence_index += 1
Пример #43
0
 def get_address(cls, cpu, data_bytes: bytes) -> Optional[int]:
     return np.uint16(int.from_bytes(data_bytes, byteorder='little') + cls.get_offset(cpu))
Пример #44
0
 def get_address(cls, cpu: 'c.CPU', data_bytes: bytes):
     return np.uint16(super().get_address(cpu, data_bytes) + cpu.y_reg)
Пример #45
0
    def find_in_circle(self, frame: np.ndarray, erode=False):

        g_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        ys, ye, xs, xe = self.get_roi(frame)
        g_frame = self.__eliminate_non_roi(g_frame, ys, ye, xs, xe)

        _filter = self._filter

        if _filter == 'threshold':
            blur = cv.GaussianBlur(g_frame, tuple(self.blur), 0)
            _, mask = cv.threshold(blur, self.threshold, 255,
                                   cv.THRESH_BINARY_INV)
        elif _filter == 'adaptative':
            mask = cv.adaptiveThreshold(g_frame, 255,
                                        cv.ADAPTIVE_THRESH_MEAN_C,
                                        cv.THRESH_BINARY, 5, 2)
        else:
            cut_frame = frame.copy()
            cut_frame = self.__eliminate_non_roi(cut_frame, ys, ye, xs, xe)
            if _filter == 'hsv':
                cvt_frame = cv.cvtColor(cut_frame, cv.COLOR_BGR2HSV)
                lower = np.array((self.table_hsv[0][0], self.table_hsv[0][1],
                                  self.table_hsv[0][2]), np.uint8)
                higher = np.array((self.table_hsv[1][0], self.table_hsv[1][1],
                                   self.table_hsv[1][2]), np.uint8)
            else:
                cvt_frame = cv.cvtColor(cut_frame, cv.COLOR_RGB2LAB)
                lower = np.array((self.table_hsv[0][0], self.table_hsv[0][1],
                                  self.table_hsv[0][2]), np.uint8)
                higher = np.array((self.table_hsv[1][0], self.table_hsv[1][1],
                                   self.table_hsv[1][2]), np.uint8)
            mask = cv.inRange(cvt_frame, lower, higher)

        if erode:
            mask = cv.erode(mask, None, iterations=2)
            mask = cv.dilate(mask, None, iterations=2)

        if self.debug_tank:
            cv.imshow('debug_tank', mask)

        self.circles = cv.HoughCircles(mask,
                                       cv.HOUGH_GRADIENT,
                                       param1=self.params[0],
                                       param2=self.params[1],
                                       minDist=self.min_dist,
                                       dp=self.radius[0],
                                       minRadius=self.radius[1],
                                       maxRadius=self.radius[2])
        self.found = False
        self.x, self.y, self.w, self.h = 0, 0, 0, 0
        if self.circles is not None:
            circles = np.uint16(np.around(self.circles))
            for x, y, r in circles[0, :]:
                calc_x = int(x -
                             r) if (x - r) > 0 and x < frame.shape[1] else 0
                calc_y = int(y -
                             r) if (y - r) > 0 and y < frame.shape[0] else 0
                if (calc_x > 0 and calc_x < frame.shape[1]) and (
                        calc_y > 0 and calc_y < frame.shape[0]):
                    self.w, self.h = 2 * abs(r), 2 * abs(r)
                    self.x, self.y = calc_x, calc_y
                    self.found = True
                    self.image = frame[self.y:self.y + self.h,
                                       self.x:self.x + self.w]
Пример #46
0
        'ply_output_dir')  # e.g. /kpcn/data/shapenetV1/train/partial
    parser.add_argument('num_scans', type=int)
    args = parser.parse_args()

    render_output_dir = os.path.join(
        os.path.dirname(os.path.realpath(__file__)), 'render_out')

    intrinsics = np.loadtxt(os.path.join(render_output_dir, 'intrinsics.txt'))
    width = int(intrinsics[0, 2] * 2)
    height = int(intrinsics[1, 2] * 2)

    depth_dir = os.path.join(render_output_dir, 'depth', args.cat_model_id)
    ply_dir = os.path.join(args.ply_output_dir, args.cat_model_id)
    os.makedirs(depth_dir, exist_ok=True)
    os.makedirs(ply_dir, exist_ok=True)
    for i in range(args.num_scans):
        exr_path = os.path.join(render_output_dir, 'exr', args.cat_model_id,
                                '%d.exr' % i)
        pose_path = os.path.join(render_output_dir, 'pose', args.cat_model_id,
                                 '%d.txt' % i)

        depth = read_exr(exr_path, height, width)
        depth_img = o3d.geometry.Image(np.uint16(depth * 1000))
        o3d.io.write_image(os.path.join(depth_dir, '%d.png' % i), depth_img)

        pose = np.loadtxt(pose_path)
        points = depth2pcd(depth, intrinsics, pose)
        cloud = o3d.geometry.PointCloud()
        cloud.points = o3d.utility.Vector3dVector(points)
        o3d.io.write_point_cloud(os.path.join(ply_dir, '%d.ply' % i), cloud)
Пример #47
0
def read_uint16(field: str) -> np.uint16:
    """Read a uint16."""
    return np.uint16(field) if field != "" else 0
Пример #48
0
td_64 / dt_64  # E: No overload
td_64 % 1  # E: Unsupported operand types
td_64 % dt_64  # E: Unsupported operand types


class A:
    def __float__(self):
        return 1.0


np.int8(A())  # E: incompatible type
np.int16(A())  # E: incompatible type
np.int32(A())  # E: incompatible type
np.int64(A())  # E: incompatible type
np.uint8(A())  # E: incompatible type
np.uint16(A())  # E: incompatible type
np.uint32(A())  # E: incompatible type
np.uint64(A())  # E: incompatible type

np.void("test")  # E: incompatible type

np.generic(1)  # E: Cannot instantiate abstract class
np.number(1)  # E: Cannot instantiate abstract class
np.integer(1)  # E: Cannot instantiate abstract class
np.signedinteger(1)  # E: Cannot instantiate abstract class
np.unsignedinteger(1)  # E: Cannot instantiate abstract class
np.inexact(1)  # E: Cannot instantiate abstract class
np.floating(1)  # E: Cannot instantiate abstract class
np.complexfloating(1)  # E: Cannot instantiate abstract class
np.character("test")  # E: Cannot instantiate abstract class
np.flexible(b"test")  # E: Cannot instantiate abstract class
Пример #49
0
def evaluate(opt):
    """Evaluates a pretrained model using a specified test set
    """
    MIN_DEPTH = 1e-3
    MAX_DEPTH = 80

    assert sum((opt.eval_mono, opt.eval_stereo)) == 1, \
        "Please choose mono or stereo evaluation by setting either --eval_mono or --eval_stereo"

    if opt.ext_disp_to_eval is None:

        opt.load_weights_folder = os.path.expanduser(opt.load_weights_folder)

        assert os.path.isdir(opt.load_weights_folder), \
            "Cannot find a folder at {}".format(opt.load_weights_folder)

        print("-> Loading weights from {}".format(opt.load_weights_folder))

        filenames = readlines(
            os.path.join(splits_dir, opt.eval_split, "test_files.txt"))
        encoder_path = os.path.join(opt.load_weights_folder, "encoder.pth")
        decoder_path = os.path.join(opt.load_weights_folder, "depth.pth")

        encoder_dict = torch.load(encoder_path)

        dataset = datasets.KITTIRAWDataset(opt.data_path,
                                           filenames,
                                           encoder_dict['height'],
                                           encoder_dict['width'], [0],
                                           4,
                                           is_train=False)
        dataloader = DataLoader(dataset,
                                16,
                                shuffle=False,
                                num_workers=opt.num_workers,
                                pin_memory=True,
                                drop_last=False)

        encoder = networks.ResnetEncoder(opt.num_layers, False)
        depth_decoder = networks.DepthDecoder(encoder.num_ch_enc)

        model_dict = encoder.state_dict()
        encoder.load_state_dict(
            {k: v
             for k, v in encoder_dict.items() if k in model_dict})
        depth_decoder.load_state_dict(torch.load(decoder_path))

        encoder.cuda()
        encoder.eval()
        depth_decoder.cuda()
        depth_decoder.eval()

        pred_disps = []

        print("-> Computing predictions with size {}x{}".format(
            encoder_dict['width'], encoder_dict['height']))

        with torch.no_grad():
            for data in dataloader:
                input_color = data[("color", 0, 0)].cuda()

                if opt.post_process:
                    # Post-processed results require each image to have two forward passes
                    input_color = torch.cat(
                        (input_color, torch.flip(input_color, [3])), 0)

                output = depth_decoder(encoder(input_color))

                pred_disp, _ = disp_to_depth(output[("disp", 0)],
                                             opt.min_depth, opt.max_depth)
                pred_disp = pred_disp.cpu()[:, 0].numpy()

                if opt.post_process:
                    N = pred_disp.shape[0] // 2
                    pred_disp = batch_post_process_disparity(
                        pred_disp[:N], pred_disp[N:, :, ::-1])

                pred_disps.append(pred_disp)

        pred_disps = np.concatenate(pred_disps)

    else:
        # Load predictions from file
        print("-> Loading predictions from {}".format(opt.ext_disp_to_eval))
        pred_disps = np.load(opt.ext_disp_to_eval)

        if opt.eval_eigen_to_benchmark:
            eigen_to_benchmark_ids = np.load(
                os.path.join(splits_dir, "benchmark",
                             "eigen_to_benchmark_ids.npy"))

            pred_disps = pred_disps[eigen_to_benchmark_ids]

    if opt.save_pred_disps:
        output_path = os.path.join(opt.load_weights_folder,
                                   "disps_{}_split.npy".format(opt.eval_split))
        print("-> Saving predicted disparities to ", output_path)
        np.save(output_path, pred_disps)

    if opt.no_eval:
        print("-> Evaluation disabled. Done.")
        quit()

    elif opt.eval_split == 'benchmark':
        save_dir = os.path.join(opt.load_weights_folder,
                                "benchmark_predictions")
        print("-> Saving out benchmark predictions to {}".format(save_dir))
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        for idx in range(len(pred_disps)):
            disp_resized = cv2.resize(pred_disps[idx], (1216, 352))
            depth = STEREO_SCALE_FACTOR / disp_resized
            depth = np.clip(depth, 0, 80)
            depth = np.uint16(depth * 256)
            save_path = os.path.join(save_dir, "{:010d}.png".format(idx))
            cv2.imwrite(save_path, depth)

        print(
            "-> No ground truth is available for the KITTI benchmark, so not evaluating. Done."
        )
        quit()

    gt_path = os.path.join(splits_dir, opt.eval_split, "gt_depths.npz")
    gt_depths = np.load(gt_path, fix_imports=True, encoding='latin1')["data"]

    print("-> Evaluating")

    if opt.eval_stereo:
        print("   Stereo evaluation - "
              "disabling median scaling, scaling by {}".format(
                  STEREO_SCALE_FACTOR))
        opt.disable_median_scaling = True
        opt.pred_depth_scale_factor = STEREO_SCALE_FACTOR
    else:
        print("   Mono evaluation - using median scaling")

    errors = []
    ratios = []

    for i in range(pred_disps.shape[0]):

        # 获得groundtruth 的尺寸
        gt_depth = gt_depths[i]
        gt_height, gt_width = gt_depth.shape[:2]

        # resize 预测深度图像
        pred_disp = pred_disps[i]
        pred_disp = cv2.resize(pred_disp, (gt_width, gt_height))
        pred_depth = 1 / pred_disp

        # 获取掩码
        if opt.eval_split == "eigen":
            # 逻辑与运算获得在约定范围内的gt
            mask = np.logical_and(gt_depth > MIN_DEPTH, gt_depth < MAX_DEPTH)

            # 裁减有效区域
            crop = np.array([
                0.40810811 * gt_height, 0.99189189 * gt_height,
                0.03594771 * gt_width, 0.96405229 * gt_width
            ]).astype(np.int32)
            crop_mask = np.zeros(mask.shape)
            crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1
            mask = np.logical_and(mask, crop_mask)

        else:
            mask = gt_depth > 0

        pred_depth = pred_depth[mask]
        gt_depth = gt_depth[mask]

        pred_depth *= opt.pred_depth_scale_factor
        if not opt.disable_median_scaling:
            ratio = np.median(gt_depth) / np.median(pred_depth)
            ratios.append(ratio)
            pred_depth *= ratio

        pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH
        pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH

        errors.append(compute_errors(gt_depth, pred_depth))

    if not opt.disable_median_scaling:
        ratios = np.array(ratios)
        med = np.median(ratios)
        print(" Scaling ratios | med: {:0.3f} | std: {:0.3f}".format(
            med, np.std(ratios / med)))

    mean_errors = np.array(errors).mean(0)

    print("\n  " +
          ("{:>8} | " * 7
           ).format("abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3"))
    print(("&{: 8.3f}  " * 7).format(*mean_errors.tolist()) + "\\\\")
    print("\n-> Done!")
Пример #50
0
def model_test(use_existing):

    cm.mkdir(cm.workingPath.testingResults_path)

    # Loading test data:

    filename = cm.filename
    modelname = cm.modellist[0]

    # Single CT:
    originFile_list = sorted(
        glob(cm.workingPath.originTestingSet_path + filename))
    maskAortaFile_list = sorted(
        glob(cm.workingPath.aortaTestingSet_path + filename))
    maskPulFile_list = sorted(
        glob(cm.workingPath.pulTestingSet_path + filename))

    # Zahra CTs:
    # originFile_list = sorted(glob(cm.workingPath.originTestingSet_path + "vol*.dcm"))
    # maskAortaFile_list = sorted(glob(cm.workingPath.aortaTestingSet_path + "vol*.dcm"))
    # maskPulFile_list = sorted(glob(cm.workingPath.pulTestingSet_path + "vol*.dcm"))

    # Lidia CTs:
    # originFile_list = sorted(glob(cm.workingPath.originLidiaTestingSet_path + "vol*.dcm"))[61:]
    # maskAortaFile_list = sorted(glob(cm.workingPath.originLidiaTestingSet_path + "vol*.dcm"))[61:]
    # maskPulFile_list = sorted(glob(cm.workingPath.originLidiaTestingSet_path + "vol*.dcm"))[61:]

    # Abnormal CTs:
    # originFile_list = sorted(glob(cm.workingPath.originAbnormalTestingSet_path + "vol126*.dcm"))
    # maskAortaFile_list = sorted(glob(cm.workingPath.originAbnormalTestingSet_path + "vol126*.dcm"))
    # maskPulFile_list = sorted(glob(cm.workingPath.originAbnormalTestingSet_path + "vol126*.dcm"))

    for i in range(len(originFile_list)):

        # Show runtime:
        starttime = datetime.datetime.now()

        vol_slices = []
        out_test_images = []
        out_test_masks = []

        current_file = originFile_list[i].split('/')[-1]
        current_dir = cm.workingPath.testingResults_path + str(
            current_file[:-17])
        cm.mkdir(current_dir)
        cm.mkdir(current_dir + '/Plots/')
        cm.mkdir(current_dir + '/Surface_Distance/')
        cm.mkdir(current_dir + '/DICOM/')
        cm.mkdir(current_dir + '/mhd/')

        stdout_backup = sys.stdout
        log_file = open(current_dir + "/logs.txt", "w")
        sys.stdout = log_file

        print('-' * 30)
        print('Loading test data %04d/%04d...' % (i + 1, len(originFile_list)))

        originVol, originVol_num, originVolwidth, originVolheight = dp.loadFile(
            originFile_list[i])
        maskAortaVol, maskAortaVol_num, maskAortaVolwidth, maskAortaVolheight = dp.loadFile(
            maskAortaFile_list[i])
        maskPulVol, maskPulVol_num, maskPulVolwidth, maskPulVolheight = dp.loadFile(
            maskPulFile_list[i])
        maskVol = maskAortaVol

        for j in range(len(maskAortaVol)):
            maskAortaVol[j] = np.where(maskAortaVol[j] != 0, 1, 0)
        for j in range(len(maskPulVol)):
            maskPulVol[j] = np.where(maskPulVol[j] != 0, 2, 0)

        maskVol = maskVol + maskPulVol

        for j in range(len(maskVol)):
            maskVol[j] = np.where(maskVol[j] > 2, 0, maskVol[j])
            # maskVol[j] = np.where(maskVol[j] != 0, 0, maskVol[j])

        # Make the Vessel class
        for j in range(len(maskVol)):
            maskVol[j] = np.where(maskVol[j] != 0, 1, 0)

        for i in range(originVol.shape[0]):
            img = originVol[i, :, :]

            out_test_images.append(img)
        for i in range(maskVol.shape[0]):
            img = maskVol[i, :, :]

            out_test_masks.append(img)

        vol_slices.append(originVol.shape[0])

        maskAortaVol = None
        maskPulVol = None
        maskVol = None
        originVol = None
        nb_class = 2
        outmasks_onehot = to_categorical(out_test_masks, num_classes=nb_class)
        final_test_images = np.ndarray([sum(vol_slices), 512, 512, 1],
                                       dtype=np.int16)
        final_test_masks = np.ndarray([sum(vol_slices), 512, 512, nb_class],
                                      dtype=np.int8)

        for i in range(len(out_test_images)):
            final_test_images[i, :, :, 0] = out_test_images[i]
            final_test_masks[i, :, :, :] = outmasks_onehot[i]

        outmasks_onehot = None
        out_test_masks = None
        out_test_images = None

        row = cm.img_rows_3d
        col = cm.img_cols_3d
        row_1 = int((512 - row) / 2)
        row_2 = int(512 - (512 - row) / 2)
        col_1 = int((512 - col) / 2)
        col_2 = int(512 - (512 - col) / 2)
        slices = cm.slices_3d
        gaps = cm.gaps_3d

        final_images_crop = final_test_images[:, row_1:row_2, col_1:col_2, :]
        final_masks_crop = final_test_masks[:, row_1:row_2, col_1:col_2, :]

        sitk.WriteImage(
            sitk.GetImageFromArray(np.uint16(final_test_masks[:, :, :, 1])),
            current_dir + '/DICOM/masksAortaGroundTruth.dcm')
        # sitk.WriteImage(sitk.GetImageFromArray(np.uint16(final_test_masks[:, :, :, 2])), current_dir + '/DICOM/masksPulGroundTruth.dcm')

        sitk.WriteImage(
            sitk.GetImageFromArray(np.uint16(final_test_masks[:, :, :, 1])),
            current_dir + '/mhd/masksAortaGroundTruth.mhd')
        # sitk.WriteImage(sitk.GetImageFromArray(np.uint16(final_test_masks[:, :, :, 2])), current_dir + '/mhd/masksPulGroundTruth.mhd')

        # clear the masks for the final step:
        final_test_masks = np.where(final_test_masks == 0, 0, 0)

        num_patches = int((sum(vol_slices) - slices) / gaps)

        test_image = np.ndarray([1, slices, row, col, 1], dtype=np.int16)

        predicted_mask_volume = np.ndarray(
            [sum(vol_slices), row, col, nb_class], dtype=np.float32)

        # model = DenseUNet_3D.get_3d_denseunet()
        # model = UNet_3D.get_3d_unet_bn()
        # model = RSUNet_3D.get_3d_rsunet(opti)
        # model = UNet_3D.get_3d_wnet(opti)
        model = UNet_3D.get_3d_unet()
        # model = RSUNet_3D_Gerda.get_3d_rsunet_Gerdafeature(opti)

        using_start_end = 1
        start_slice = cm.start_slice
        end_slice = -1

        if use_existing:
            model.load_weights(modelname)

        for i in range(num_patches):
            count1 = i * gaps
            count2 = i * gaps + slices
            test_image[0] = final_images_crop[count1:count2]

            predicted_mask = model.predict(test_image)

            if i == int(num_patches * 0.63):
                vs.visualize_activation_in_layer_one_plot(
                    model, test_image, current_dir)
            else:
                pass

            predicted_mask_volume[count1:count2] += predicted_mask[
                0, :, :, :, :]

        t = len(predicted_mask_volume)
        for i in range(0, slices, gaps):
            predicted_mask_volume[i:(
                i +
                gaps)] = predicted_mask_volume[i:(i + gaps)] / (i / gaps + 1)

        for i in range(0, slices, gaps):
            predicted_mask_volume[(t - i -
                                   gaps):(t - i)] = predicted_mask_volume[
                                       (t - i - gaps):(t - i)] / (i / gaps + 1)

        for i in range(slices, (len(predicted_mask_volume) - slices)):
            predicted_mask_volume[i] = predicted_mask_volume[i] / (slices /
                                                                   gaps)

        np.save(cm.workingPath.testingSet_path + 'testImages.npy',
                final_images_crop)
        np.save(cm.workingPath.testingSet_path + 'testMasks.npy',
                final_masks_crop)
        np.save(cm.workingPath.testingSet_path + 'masksTestPredicted.npy',
                predicted_mask_volume)

        final_images_crop = None
        final_masks_crop = None
        predicted_mask_volume = None

        imgs_origin = np.load(cm.workingPath.testingSet_path +
                              'testImages.npy').astype(np.int16)
        imgs_true = np.load(cm.workingPath.testingSet_path +
                            'testMasks.npy').astype(np.int8)
        imgs_predict = np.load(cm.workingPath.testingSet_path +
                               'masksTestPredicted.npy').astype(np.float32)
        imgs_predict_threshold = np.load(cm.workingPath.testingSet_path +
                                         'masksTestPredicted.npy').astype(
                                             np.float32)

        ########## ROC curve aorta

        actual = imgs_true[:, :, :, 1].reshape(-1)
        predictions = imgs_predict[:, :, :, 1].reshape(-1)
        # predictions = np.where(predictions < (0.7), 0, 1)

        false_positive_rate_aorta, true_positive_rate_aorta, thresholds_aorta = roc_curve(
            actual, predictions, pos_label=1)
        roc_auc_aorta = auc(false_positive_rate_aorta,
                            true_positive_rate_aorta)
        plt.figure(1, figsize=(6, 6))
        plt.figure(1)
        plt.title('ROC of Aorta')
        plt.plot(false_positive_rate_aorta, true_positive_rate_aorta, 'b')
        label = 'AUC = %0.2f' % roc_auc_aorta
        plt.legend(loc='lower right')
        plt.plot([0, 1], [0, 1], 'r--')
        plt.xlim([-0.0, 1.0])
        plt.ylim([-0.0, 1.0])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        # plt.show()
        saveName = '/Plots/ROC_Aorta_curve.png'
        plt.savefig(current_dir + saveName)
        plt.close()
        ########## ROC curve pul

        # actual = imgs_true[:, :, :, 2].reshape(-1)
        # predictions = imgs_predict[:, :, :, 2].reshape(-1)
        #
        # false_positive_rate_pul, true_positive_rate_pul, thresholds_pul = roc_curve(actual, predictions, pos_label=1)
        # roc_auc_pul = auc(false_positive_rate_pul, true_positive_rate_pul)
        # plt.figure(2, figsize=(6, 6))
        # plt.figure(2)
        # plt.title('ROC of pul')
        # plt.plot(false_positive_rate_pul, true_positive_rate_pul, 'b')
        # label = 'AUC = %0.2f' % roc_auc_pul
        # plt.legend(loc='lower right')
        # plt.plot([0, 1], [0, 1], 'r--')
        # plt.xlim([-0.0, 1.0])
        # plt.ylim([-0.0, 1.0])
        # plt.xlabel('False Positive Rate')
        # plt.ylabel('True Positive Rate')
        # # plt.show()
        # saveName = '/Plots/ROC_Pul_curve.png'
        # plt.savefig(current_dir + saveName)
        # plt.close()

        false_positive_rate_aorta = None
        true_positive_rate_aorta = None
        false_positive_rate_pul = None
        true_positive_rate_pul = None

        imgs_predict_threshold = np.where(imgs_predict_threshold < (0.5), 0, 1)

        if using_start_end == 1:
            aortaMean = lf.dice_coef_np(
                imgs_predict_threshold[start_slice:end_slice, :, :, 1],
                imgs_true[start_slice:end_slice, :, :, 1])
            # pulMean = lf.dice_coef_np(imgs_predict_threshold[start_slice:end_slice, :, :, 2],
            #                           imgs_true[start_slice:end_slice, :, :, 2])
        else:
            aortaMean = lf.dice_coef_np(imgs_predict_threshold[:, :, :, 1],
                                        imgs_true[:, :, :, 1])
            # pulMean = lf.dice_coef_np(imgs_predict_threshold[:, :, :, 2], imgs_true[:, :, :, 2])

        np.savetxt(current_dir + '/Plots/AortaDicemean.txt',
                   np.array(aortaMean).reshape(1, ),
                   fmt='%.5f')
        # np.savetxt(current_dir + '/Plots/PulDicemean.txt', np.array(pulMean).reshape(1, ), fmt='%.5f')

        print('Model file:', modelname)
        print('-' * 30)
        print('Aorta Dice Coeff', aortaMean)
        # print('Pul Dice Coeff', pulMean)
        print('-' * 30)

        # Draw the subplots of figures:

        color1 = 'gray'  # ***
        color2 = 'viridis'  # ******
        # color = 'plasma'  # **
        # color = 'magma'  # ***
        # color2 = 'RdPu'  # ***
        # color = 'gray'  # ***
        # color = 'gray'  # ***

        transparent1 = 1.0
        transparent2 = 0.5

        # Slice parameters:

        #################################### Aorta
        # Automatically:

        steps = 40
        slice = range(0, len(imgs_origin), steps)
        plt_row = 3
        plt_col = int(len(imgs_origin) / steps)

        plt.figure(3, figsize=(25, 12))
        plt.figure(3)

        for i in slice:
            if i == 0:
                plt_num = int(i / steps) + 1
            else:
                plt_num = int(i / steps)

            if plt_num <= plt_col:

                ax1 = plt.subplot(plt_row, plt_col, plt_num)
                title = 'slice=' + str(i)
                plt.title(title)
                ax1.imshow(imgs_origin[i, :, :, 0],
                           cmap=color1,
                           alpha=transparent1)
                ax1.imshow(imgs_true[i, :, :, 1],
                           cmap=color2,
                           alpha=transparent2)

                ax2 = plt.subplot(plt_row, plt_col, plt_num + plt_col)
                title = 'slice=' + str(i)
                plt.title(title)
                ax2.imshow(imgs_origin[i, :, :, 0],
                           cmap=color1,
                           alpha=transparent1)
                ax2.imshow(imgs_predict[i, :, :, 1],
                           cmap=color2,
                           alpha=transparent2)

                ax3 = plt.subplot(plt_row, plt_col, plt_num + 2 * plt_col)
                title = 'slice=' + str(i)
                plt.title(title)
                ax3.imshow(imgs_origin[i, :, :, 0],
                           cmap=color1,
                           alpha=transparent1)
                ax3.imshow(imgs_predict_threshold[i, :, :, 1],
                           cmap=color2,
                           alpha=transparent2)
            else:
                pass

        modelname = cm.modellist[0]

        imageName = re.findall(r'\d+\.?\d*', modelname)
        epoch_num = int(imageName[0]) + 1
        accuracy = float(
            np.loadtxt(current_dir + '/Plots/AortaDicemean.txt', float))

        # saveName = 'epoch_' + str(epoch_num) + '_dice_' +str(accuracy) + '.png'
        saveName = '/Plots/epoch_Aorta_%02d_dice_%.3f.png' % (epoch_num - 1,
                                                              accuracy)

        plt.subplots_adjust(left=0.0,
                            bottom=0.05,
                            right=1.0,
                            top=0.95,
                            hspace=0.3,
                            wspace=0.3)
        plt.savefig(current_dir + saveName)
        plt.close()
        # plt.show()

        ################################ Pulmonary
        # steps = 40
        # slice = range(0, len(imgs_origin), steps)
        # plt_row = 3
        # plt_col = int(len(imgs_origin) / steps)
        #
        # plt.figure(4, figsize=(25, 12))
        # plt.figure(4)
        # for i in slice:
        #   if i == 0:
        #     plt_num = int(i / steps) + 1
        #   else:
        #     plt_num = int(i / steps)
        #
        #   if plt_num <= plt_col:
        #
        #     ax1 = plt.subplot(plt_row, plt_col, plt_num)
        #     title = 'slice=' + str(i)
        #     plt.title(title)
        #     ax1.imshow(imgs_origin[i, :, :, 0], cmap=color1, alpha=transparent1)
        #     ax1.imshow(imgs_true[i, :, :, 2], cmap=color2, alpha=transparent2)
        #
        #     ax2 = plt.subplot(plt_row, plt_col, plt_num + plt_col)
        #     title = 'slice=' + str(i)
        #     plt.title(title)
        #     ax2.imshow(imgs_origin[i, :, :, 0], cmap=color1, alpha=transparent1)
        #     ax2.imshow(imgs_predict[i, :, :, 2], cmap=color2, alpha=transparent2)
        #
        #     ax3 = plt.subplot(plt_row, plt_col, plt_num + 2 * plt_col)
        #     title = 'slice=' + str(i)
        #     plt.title(title)
        #     ax3.imshow(imgs_origin[i, :, :, 0], cmap=color1, alpha=transparent1)
        #     ax3.imshow(imgs_predict_threshold[i, :, :, 2], cmap=color2, alpha=transparent2)
        #   else:
        #     pass
        #
        # modelname = cm.modellist[0]
        #
        # imageName = re.findall(r'\d+\.?\d*', modelname)
        # epoch_num = int(imageName[0]) + 1
        # accuracy = float(np.loadtxt(current_dir + '/Plots/PulDicemean.txt', float))
        #
        # # saveName = 'epoch_' + str(epoch_num) + '_dice_' +str(accuracy) + '.png'
        # saveName = '/Plots/epoch_Pul_%02d_dice_%.3f.png' % (epoch_num - 1, accuracy)
        #
        # plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95, hspace=0.3, wspace=0.3)
        # plt.savefig(current_dir + saveName)
        # plt.close()
        # # plt.show()

        print('Images saved')
        # Save npy as dcm files:

        final_test_aorta_predicted_threshold = final_test_masks[:, :, :, 1]
        # final_test_pul_predicted_threshold = final_test_masks[:, :, :, 2]

        final_test_aorta_predicted_threshold[:, row_1:row_2, col_1:
                                             col_2] = imgs_predict_threshold[:, :, :,
                                                                             1]
        # final_test_pul_predicted_threshold[:, row_1:row_2, col_1:col_2] = imgs_predict_threshold[:, :, :, 2]

        new_imgs_dcm = sitk.GetImageFromArray(
            np.uint16(final_test_images + 4000))
        new_imgs_aorta_predict_dcm = sitk.GetImageFromArray(
            np.uint16(final_test_aorta_predicted_threshold))
        # new_imgs_pul_predict_dcm = sitk.GetImageFromArray(np.uint16(final_test_pul_predicted_threshold))

        sitk.WriteImage(new_imgs_dcm,
                        current_dir + '/DICOM/imagesPredicted.dcm')
        sitk.WriteImage(new_imgs_aorta_predict_dcm,
                        current_dir + '/DICOM/masksAortaPredicted.dcm')
        # sitk.WriteImage(new_imgs_pul_predict_dcm, current_dir + '/DICOM/masksPulPredicted.dcm')

        sitk.WriteImage(new_imgs_dcm, current_dir + '/mhd/imagesPredicted.mhd')
        sitk.WriteImage(new_imgs_aorta_predict_dcm,
                        current_dir + '/mhd/masksAortaPredicted.mhd')
        # sitk.WriteImage(new_imgs_pul_predict_dcm, current_dir + '/mhd/masksPulPredicted.mhd')

        mt.SegmentDist(current_dir + '/mhd/masksAortaPredicted.mhd',
                       current_dir + '/mhd/masksAortaGroundTruth.mhd',
                       current_dir + '/Surface_Distance/Aorta')
        # mt.SegmentDist(current_dir + '/mhd/masksPulPredicted.mhd',current_dir + '/mhd/masksPulGroundTruth.mhd', current_dir + '/Surface_Distance/Pul')

        # ds1 = dicom.read_file(maskAortaFile_list[0])
        # ds2 = dicom.read_file(cm.workingPath.testingSet_path + 'masksAortaPredicted.dcm')
        # ds1.PixelData = ds2.PixelData
        # ds1.pop('pixel_array')
        # ds1["pixel_array"] = ds2["pixel_array"]
        # ds1.save_as(cm.workingPath.testingSet_path + 'masksAortaPredicted.dcm')
        # ds1.save_as(cm.workingPath.testingSet_path + 'masksAortaPredicted.mhd')
        #
        # ds1 = dicom.read_file(maskPulFile_list[0])
        # ds2 = dicom.read_file(cm.workingPath.testingSet_path + 'masksPulPredicted.dcm')
        # ds1.PixelData = ds2.PixelData
        # ds1["pixelarray"] = ds2["pixelarray"]
        # ds1.save_as(cm.workingPath.testingSet_path + 'masksPulPredicted.dcm')
        # ds1.save_as(cm.workingPath.testingSet_path + 'masksPulPredicted.mhd')
        print('DICOM saved')

        # Clear memory for the next testing sample:

        final_test_aorta_predicted_threshold = None
        final_test_pul_predicted_threshold = None
        imgs_predict_threshold = None
        new_imgs_dcm = None
        new_imgs_aorta_predict_dcm = None
        new_imgs_pul_predict_dcm = None
        final_test_images = None
        final_test_masks = None
        imgs_origin = None
        imgs_predict = None
        imgs_true = None
        predicted_mask = None
        predictions = None

        endtime = datetime.datetime.now()
        print('-' * 30)
        print('running time:', endtime - starttime)

        log_file.close()
        sys.stdout = stdout_backup
Пример #51
0
def write_APS_file(awgData, fileName, miniLLRepeat=1):
    '''
	Main function to pack channel LLs into an APS h5 file.
	'''

    #Preprocess the LL data to handle APS restrictions
    LLs12 = [
        preprocess_APS(miniLL, awgData['ch12']['wfLib'])
        for miniLL in awgData['ch12']['linkList']
    ]
    LLs34 = [
        preprocess_APS(miniLL, awgData['ch34']['wfLib'])
        for miniLL in awgData['ch34']['linkList']
    ]

    #Merge the the marker data into the IQ linklists
    merge_APS_markerData(LLs12, awgData['ch1m1']['linkList'], 1)
    merge_APS_markerData(LLs12, awgData['ch2m1']['linkList'], 2)
    merge_APS_markerData(LLs34, awgData['ch3m1']['linkList'], 1)
    merge_APS_markerData(LLs34, awgData['ch4m1']['linkList'], 2)

    #Open the HDF5 file
    if os.path.isfile(fileName):
        os.remove(fileName)
    with h5py.File(fileName, 'w') as FID:

        #List of which channels we have data for
        #TODO: actually handle incomplete channel data
        channelDataFor = [1, 2, 3, 4]
        FID['/'].attrs['Version'] = 2.0
        FID['/'].attrs['channelDataFor'] = np.uint16(channelDataFor)
        FID['/'].attrs['miniLLRepeat'] = np.uint16(miniLLRepeat - 1)

        #Create the waveform vectors
        wfInfo = []
        for wfLib in (awgData['ch12']['wfLib'], awgData['ch34']['wfLib']):
            wfInfo.append(
                create_wf_vector({key: wf.real
                                  for key, wf in wfLib.items()}))
            wfInfo.append(
                create_wf_vector({key: wf.imag
                                  for key, wf in wfLib.items()}))

        LLData = [LLs12, LLs34]
        #Create the groups and datasets
        for chanct in range(4):
            chanStr = '/chan_{0}'.format(chanct + 1)
            chanGroup = FID.create_group(chanStr)
            chanGroup.attrs['isIQMode'] = np.uint8(1)
            #Write the waveformLib to file
            FID.create_dataset('{0}/waveformLib'.format(chanStr),
                               data=wfInfo[chanct][0])

            #For A channels (1 & 3) we write link list data
            if np.mod(chanct, 2) == 0:
                chanGroup.attrs['isLinkListData'] = np.uint8(1)
                groupStr = chanStr + '/linkListData'
                LLGroup = FID.create_group(groupStr)
                LLDataVecs, numEntries = create_LL_data(
                    LLData[chanct // 2], wfInfo[chanct][1],
                    os.path.basename(fileName))
                LLGroup.attrs['length'] = numEntries
                for key, dataVec in LLDataVecs.items():
                    FID.create_dataset(groupStr + '/' + key, data=dataVec)
            else:
                chanGroup.attrs['isLinkListData'] = np.uint8(0)
Пример #52
0
def single2uint16(img):
    return np.uint16((img.clip(0, 1) * 65535.).round())
Пример #53
0
def run():
    if not 'screenshots' in os.listdir('./'):
        os.mkdir('./screenshots')
    #load Matlab data


#    import scipy.io
#    mri = scipy.io.loadmat('brainweb_128.mat')
#    activity = scipy.io.loadmat('L.mat')
#    v1 = uint16( activity['L']*(2**16*(1.0/dynrange)/activity['L'].max()) )
#    v2 = uint16( mri['t1_128']*(2**16/mri['t1_128'].max()) )

#load Nifty data
    from nifti import NiftiImage
    v1 = NiftiImage('./activity_128.nii').data
    v2 = 0 * v1

    #create volume renderer and initialize it
    V = VolumeRender((N, N, N), (512, 512))
    V.show()
    V.set_volume1(v1)
    V.set_volume2(v2)

    #set visualization parameters
    V.set_density1(0.05)
    V.set_brightness1(7.1)
    V.set_transferOffset1(-0.04)
    V.set_transferScale1(1.1)
    V.set_density2(0.05)
    V.set_brightness2(0.3)
    V.set_transferOffset2(0.05)
    V.set_transferScale2(1)

    #visualize a dynamic scene and save screenshots
    N_frames = N * image_steps
    d_rotation_x = 0.0
    d_rotation_y = 0.5
    d_zoom = 0.004

    im_frame_prev = 0
    frame = 0
    t_frame = 0
    while 1:
        frame += 1
        if frame < 1460:
            t_frame += 1
            if frame < 150 + 250:
                V.rotate(d_rotation_x, d_rotation_y)
            if (frame > 150 + 150 and frame < 150 + 500):
                V.zoom(d_zoom)
            if frame > 150 + 250:
                V.rotate(d_rotation_x / 4, d_rotation_y / 4)
            im_frame = t_frame / image_steps
            if not im_frame == im_frame_prev:
                im_frame_prev = im_frame
                if (im_frame < (N - thickness)):
                    v1b = double(v1)
                    v1b[:, :,
                        im_frame:im_frame + thickness] = dynrange * double(
                            v1[:, :, im_frame:im_frame + thickness])
                    #v1b = v1b*(2**16/v1b.max())
                    V.set_volume1(uint16(v1b))
        if frame > 1500:
            t_frame -= 1
            im_frame = t_frame / image_steps
            if not im_frame == im_frame_prev:
                im_frame_prev = im_frame
                if (im_frame < (N - thickness)):
                    v1b = double(v1)
                    v1b[:, :,
                        im_frame:im_frame + thickness] = dynrange * double(
                            v1[:, :, im_frame:im_frame + thickness])
                    #v1b = v1b*(2**16/v1b.max())
                    V.set_volume1(uint16(v1b))
        if (frame > 2550 + 100):
            break
        elif (frame > 2550 + 75):
            V.zoom(-d_zoom * 3.5)
        elif frame > 2550:
            V.rotate(d_rotation_x, d_rotation_y)
            V.zoom(-d_zoom * 3.5)
        V.dump_screenshot("./screenshots/%d.png" % frame)
        sleep(0.003)

    while 1:
        pass
Пример #54
0
 def generate_advanced_indices(self, N, many=True):
     choices = [np.int16([0, N - 1, -2])]
     if many:
         choices += [np.uint16([0, 1, N - 1]), np.bool_([0, 1, 1, 0])]
     return choices
        process_pdf(rsrcmgr, device, pdf)
        device.close()
        content = retstr.getvalue()
        retstr.close()
        # 获取所有行
        lines = str(content).split("\n")
        return lines

    #%%
    my_pdf = open(mypdf[0], "rb")
    out = read_pdf(my_pdf)
    my_pdf.close()

    theta_s = np.float64(out[40])
    date = out[36]
    year = np.uint16(date.split('年')[0])
    tmp1 = date.split('月')
    month = np.uint8(tmp1[0].split('年')[-1])
    day = np.uint8(tmp1[1].split('日')[0])

    for i in range(len(out)):
        each = out[i]
        if '增益参数' in each:
            #        print(i)
            gain = out[i + 3:i + 3 + 32]
        if '偏移参数' in each:
            #        print(i)
            bias = out[i + 3:i + 3 + 32]

    gain = np.float32(gain)
    bias = np.float32(bias)
Пример #56
0
def read_str_u16(f):
    return np.uint16(read_str_int(f, 'u16'))
Пример #57
0
def unpack(file):
    """Unpacks PulsOn 440 radar data from input file"""
    with open(file, 'rb') as f:

        # Read in configuration data
        config_message = f.read(44)
        config = dict()
        config['type'] = hex(
            np.frombuffer(config_message[0:2], dtype='>u2')[0])
        config['id'] = np.frombuffer(config_message[2:4], dtype='>u2')[0]
        config['node_id'] = np.frombuffer(config_message[4:8], dtype='>u4')[0]
        config['scan_start'] = np.frombuffer(config_message[8:12],
                                             dtype='>i4')[0]
        config['scan_end'] = np.frombuffer(config_message[12:16],
                                           dtype='>i4')[0]
        config['scan_res'] = np.frombuffer(config_message[16:18],
                                           dtype='>u2')[0]
        config['pii'] = np.frombuffer(config_message[18:20], dtype='>u2')[0]
        config['ant_mode'] = np.uint16(config_message[32])
        config['tx_gain'] = np.uint16(config_message[33])
        config['code_chan'] = np.uint16(config_message[34])
        config['persist_flag'] = np.uint16(config_message[35])
        config['time_stamp'] = np.frombuffer(config_message[36:40],
                                             dtype='>u4')[0]
        config['status'] = np.frombuffer(config_message[40:44], dtype='>u4')[0]

        # Compute number of range bins in data
        dTmin = 1 / (512 * 1.024)
        Tbin = 32 * dTmin
        dNbin = 96
        dT0 = 10
        scan_start_time = float(config['scan_start'])
        scan_end_time = float(config['scan_end'])
        num_range_bins = dNbin * math.ceil(
            (scan_end_time - scan_start_time) / (Tbin * 1000 * dNbin))
        num_packets_per_scan = math.ceil(num_range_bins / 350)
        start_range = SPEED_OF_LIGHT * (
            (scan_start_time * 1e-12) - dT0 * 1e-9) / 2
        drange_bins = SPEED_OF_LIGHT * Tbin * 1e-9 / 2
        range_bins = start_range + drange_bins * np.arange(
            0, num_range_bins, 1)

        # Read data
        data = dict()
        data = {
            'scan_data': [],
            'time_stamp': [],
            'packet_ind': [],
            'packet_pulse_ind': [],
            'range_bins': range_bins
        }
        single_scan_data = []
        packet_count = 0
        pulse_count = 0

        while True:

            # Read a single data packet and break loop if not a complete packet
            # (in terms of size)
            packet = f.read(1452)
            if len(packet) < 1452:
                break
            packet_count += 1

            # Packet index
            data['packet_ind'].append(np.frombuffer(packet[48:50], dtype='u2'))

            # Extract radar data samples from current packet; process last
            # packet within a scan  seperately to get all data
            if packet_count % num_packets_per_scan == 0:
                num_samples = num_range_bins % 350
                packet_data = np.frombuffer(packet[52:(52 + 4 * num_samples)],
                                            dtype='>i4')
                single_scan_data.append(packet_data)
                data['scan_data'].append(np.concatenate(single_scan_data))
                data['time_stamp'].append(
                    np.frombuffer(packet[8:12], dtype='>u4'))
                single_scan_data = []
                pulse_count += 1
            else:
                num_samples = 350
                packet_data = np.frombuffer(packet[52:(52 + 4 * num_samples)],
                                            dtype='>i4')
                single_scan_data.append(packet_data)

        # Add last partial scan if present
        if single_scan_data:
            single_scan_data = np.concatenate(single_scan_data)
            num_pad = data['scan_data'][0].size - single_scan_data.size
            single_scan_data = np.pad(single_scan_data, (0, num_pad),
                                      'constant',
                                      constant_values=0)
            data['scan_data'].append(single_scan_data)

        # Stack scan data into 2-D array
        # (rows -> pulses, columns -> range bins)
        data['scan_data'] = np.stack(data['scan_data'])

        # Finalize remaining entries in data
        data['time_stamp']

        return data
Пример #58
0
    def bilateral_filter(self, edge):
        # bilateral filter based upon the work of
        # Jiawen Chen, Sylvain Paris, and Fredo Durand, 2007 work

        # note: if edge data is not provided, image is served as edge
        # this is called normal bilateral filter
        # if edge data is provided, then it is called cross or joint
        # bilateral filter

        # get width and height of the image
        width, height = helpers(self.data).get_width_height()

        # sigma_spatial
        sigma_spatial = min(height, width) / 16.

        # calculate edge_delta
        edge_min = np.min(edge)
        edge_max = np.max(edge)
        edge_delta = edge_max - edge_min

        # sigma_range and sampling_range
        sigma_range = 0.1 * edge_delta
        sampling_range = sigma_range
        sampling_spatial = sigma_spatial

        # derived_sigma_spatial and derived_sigma_range
        derived_sigma_spatial = sigma_spatial / sampling_spatial
        derived_sigma_range = sigma_range / sampling_range

        # paddings
        padding_xy = np.floor(2. * derived_sigma_spatial) + 1.
        padding_z = np.floor(2. * derived_sigma_range) + 1.

        # downsamples
        downsample_width = np.uint16(
            np.floor((width - 1.) / sampling_spatial) + 1. + 2. * padding_xy)
        downsample_height = np.uint16(
            np.floor((height - 1.) / sampling_spatial) + 1. + 2. * padding_xy)
        downsample_depth = np.uint16(
            np.floor(edge_delta / sampling_range) + 1. + 2. * padding_z)

        grid_data = np.zeros(
            (downsample_height, downsample_width, downsample_depth))
        grid_weight = np.zeros(
            (downsample_height, downsample_width, downsample_depth))

        jj, ii = np.meshgrid(np.arange(0, width, 1),
                             np.arange(0, height, 1))

        di = np.uint16(np.round(ii / sampling_spatial) + padding_xy + 1.)
        dj = np.uint16(np.round(jj / sampling_spatial) + padding_xy + 1.)
        dz = np.uint16(np.round((edge - edge_min) /
                                sampling_range) + padding_z + 1.)

        for i in range(0, height):
            for j in range(0, width):

                data_z = self.data[i, j]
                if not np.isnan(data_z):
                    dik = di[i, j]
                    djk = dj[i, j]
                    dzk = dz[i, j]

                    grid_data[dik, djk, dzk] = grid_data[dik,
                                                         djk, dzk] + data_z
                    grid_weight[dik, djk,
                                dzk] = grid_weight[dik, djk, dzk] + 1.

        kernel_width = 2. * derived_sigma_spatial + 1.
        kernel_height = kernel_width
        kernel_depth = 2. * derived_sigma_range + 1.

        half_kernel_width = np.floor(kernel_width / 2.)
        half_kernel_height = np.floor(kernel_height / 2.)
        half_kernel_depth = np.floor(kernel_depth / 2.)

        grid_x, grid_y, grid_z = np.meshgrid(np.arange(0, kernel_width, 1),
                                             np.arange(0, kernel_height, 1),
                                             np.arange(0, kernel_depth, 1))

        grid_x = grid_x - half_kernel_width
        grid_y = grid_y - half_kernel_height
        grid_z = grid_z - half_kernel_depth

        grid_r_squared = ((np.multiply(grid_x, grid_x) +
                           np.multiply(grid_y, grid_y)) / np.multiply(derived_sigma_spatial, derived_sigma_spatial)) + \
                         (np.multiply(grid_z, grid_z) /
                          np.multiply(derived_sigma_range, derived_sigma_range))

        kernel = np.exp(-0.5 * grid_r_squared)
        blurred_grid_data = ndimage.convolve(grid_data, kernel, mode='reflect')
        blurred_grid_weight = ndimage.convolve(
            grid_weight, kernel, mode='reflect')

        # divide
        blurred_grid_weight = np.asarray(blurred_grid_weight)
        mask = blurred_grid_weight == 0
        blurred_grid_weight[mask] = -2.
        normalized_blurred_grid = np.divide(
            blurred_grid_data, blurred_grid_weight)
        mask = blurred_grid_weight < -1
        normalized_blurred_grid[mask] = 0.
        blurred_grid_weight[mask] = 0.

        # upsample
        jj, ii = np.meshgrid(np.arange(0, width, 1),
                             np.arange(0, height, 1))

        di = (ii / sampling_spatial) + padding_xy + 1.
        dj = (jj / sampling_spatial) + padding_xy + 1.
        dz = (edge - edge_min) / sampling_range + padding_z + 1.

        # arrange the input points
        n_i, n_j, n_z = np.shape(normalized_blurred_grid)
        points = (np.arange(0, n_i, 1), np.arange(
            0, n_j, 1), np.arange(0, n_z, 1))

        # query points
        xi = (di, dj, dz)

        # multidimensional interpolation
        output = interpolate.interpn(
            points, normalized_blurred_grid, xi, method='linear')

        return output
Пример #59
0
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# Blur using 3 * 3 kernel.
gray_blurred = cv2.blur(gray, (3, 3))

# Apply Hough transform on the blurred image.
detected_circles = cv2.HoughCircles(gray_blurred,
                                    cv2.HOUGH_GRADIENT,
                                    1,
                                    20,
                                    param1=50,
                                    param2=30,
                                    minRadius=1,
                                    maxRadius=40)

# Draw circles that are detected.
if detected_circles is not None:

    # Convert the circle parameters a, b and r to integers.
    detected_circles = np.uint16(np.around(detected_circles))

    for pt in detected_circles[0, :]:
        a, b, r = pt[0], pt[1], pt[2]

        # Draw the circumference of the circle.
        cv2.circle(img, (a, b), r, (0, 255, 0), 2)

        # Draw a small circle (of radius 1) to show the center.
        cv2.circle(img, (a, b), 1, (0, 0, 255), 3)
        cv2.imshow("Detected Circle", img)
        cv2.waitKey(0)
        track[0, :, w, u] = track_initials[w, :, u]

        Diffusion_selection = npy.random.multinomial(1, weights)
        Diffusion_selection_elem = npy.nonzero(Diffusion_selection)
        Diffuse_state = npy.sum(Diffusion_selection_elem)
        track_D[w, 0, u] = Diffuse_state

for j in range(num_bacteria):
    yst_image = cv2.circle(image, (int(bac_cells[j, 0]), int(bac_cells[j, 1])),
                           int(bac_cells[j, 2]), (0, 255, 0), -1, 8)

yst_image_final = cv2.cvtColor(yst_image, cv2.COLOR_BGR2GRAY)
#  Check if the circles are overlapping
yst_image_16_reform = yst_image_final.reshape(pixels, resolution, pixels,
                                              resolution).sum(3).sum(1)
yst_image_16 = npy.uint16(yst_image_16_reform)
non_zer_find = npy.nonzero(yst_image_16)
yst_image_16[yst_image_16 != 0] = 1
filename_bin = filename.replace(".tif", "_binary.tif")
#filename_bin.replace(".tif", "binary.tif")
#print(filename_bin)
cv2.imwrite(filename_bin, yst_image_16)
print('Initialization Complete')
with tifffile.TiffWriter(filename, bigtiff=True) as tif:

    yst_array = npy.array(yst_image_final)
    max_cell_value = npy.amax(yst_array)
    index_max = npy.argwhere(yst_array == max_cell_value)
    #index_max_background = npy.argwhere(rt4 != max_cell_value)
    #rt4[index_max] = cell_background
    for m in range(len(index_max)):