Exemple #1
0
def score_distribution(file, peptide, charge, modi_list=['cc'], mass_list=[0]):
    
    peptide_scores=[]
   # print 'peptide: ',peptide,', charge:',charge, ' , mass:', mass_list, 'modi:',modi_list

    for mass in mass_list:
            y_scores=[]
            b_scores=[]
            # fetch matching hits
            y_seq,y_mz,y_amp, b_seq,b_mz,b_amp = id.identify(file,peptide,charge,modi_list,mass)
    
            # loop over given peptide match according to localise()
            for i in np.arange(0,len(peptide)):
                #if not a gap, rise temp score and scale with amplitude of hit
                if y_seq[i]!= '-':
                    temp_score=y_amp[i]
                else:
                    temp_score= 0
                #assemble list of temp scores, building a seprate row in matrix
                y_scores.append(temp_score)
           
        
                if b_seq[i]!= '-':
                    temp_score=b_amp[i]
                else:
                    temp_score = 0
                b_scores.append(temp_score)
            
            #append temp scores to peptide score list
            peptide_scores.append(b_scores)
            peptide_scores.append(y_scores)
    # cast peptide score list of arrays together as matrix
    A = np.matrix(peptide_scores)
    return(A)
Exemple #2
0
def main():
    print('Type in the C declaration (or \'f\' to input a .txt file):',
          end=' ')
    response = input()

    if response == 'f':
        print('.txt file name:', end=' ')
        filename = input()

        try:
            f = open(filename, 'r')
        except IOError:
            print('The file \'' + filename + '\' could not be found.')
            return 0
        response = f.read()

    identify(response.splitlines())
Exemple #3
0
 def predict(self):
     if os.path.exists(self.image_dir):
         pic = misc.imread(self.image_dir)
         result = identify(pic)
         self.result.setText(result)
     else:
         reply = QMessageBox.information(self,
                                         "Error",
                                         "FileNotFoundError",
                                         QMessageBox.Yes | QMessageBox.No)
Exemple #4
0
 def run(self):
     global result_name
     # 进行任务操作
     if os.path.exists(self.image_dir):
         pic = misc.imread(self.image_dir)
         result = identify(pic)
         self.signal.emit(result)
     else:
         reply = QMessageBox.information(self, "Error", "FileNotFoundError",
                                         QMessageBox.Yes | QMessageBox.No)
Exemple #5
0
def peptide_score(file, peptide_list,max_charge,mass_list, command=''):
    
    #item holder for output:
    score_list=[]
    # loop over every peptide, charge status, modification type and mass patter
    for peptide in peptide_list:
        peptide_score=0
        for mass in mass_list:
            
            # fetch matching hits
            y_seq,y_mz,y_amp, b_seq,b_mz,b_amp = id.identify(file,peptide,max_charge,['cc'],mass)
    
            # counters
            score=0
            consecutive_hits=0
            number_of_hits=0
           # raise the score through summing the matches for y series, scaled over their amplitude
            for i in np.arange(0,len(peptide)):
                if y_seq[i]!= '-':
                    number_of_hits+=1
                    score +=y_amp[i]
                    # if adjacent series, raise counter
                    if i >0 and y_seq[i-1]!= '-':
                        consecutive_hits +=1
                if b_seq[i]!= '-':
                    number_of_hits+=1
                    score +=b_amp[i] 
                    if i >0 and b_seq[i-1]!= '-':
                        consecutive_hits +=1
                        
            # compute final score: sum up intensities of hits (peptide_score), multiply with number of hits,
        # add bonus for consecutive hits and divide through number of possible hits (len(peptide)= #ions)            
            peptide_score += (score*number_of_hits*(1+consecutive_hits))/len(peptide)
            #print peptide+' at '+str(int(mass))+': '+str(int(peptide_score))+ ' and consecutive hits:'+str(consecutive_hits)    
        
        score_list.append((peptide_score))  
        
        #output single computation results 
        if command=='-o':     
            print 'peptide:',peptide, ' -> score:', int(peptide_score)
     #find max value in scores
    if len(peptide_list)>1:   
        max_score = max(score_list)  
        max_score_peptide= peptide_list[score_list.index(max_score)]
        best_score=[max_score_peptide,max_score]
    else:
        best_score=[peptide_list,score_list]
      
    #report best score  
    if command=='-o' or command=='-b':
        print 'best score: ', best_score
        
    return_list=zip(peptide_list,score_list) 
        
    return(best_score,return_list)  
def picture_identify(name):
    original = cv2.imread(name)
    rawImage = cv2.imread(name)
    #高斯模糊
    image = cv2.GaussianBlur(rawImage, (3, 3), 0)
    #转为灰度图像
    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    #Canny边缘检测
    image = cv2.Canny(image, 100, 300)
    #Sobel算子
    Sobel_x = cv2.Sobel(image, cv2.CV_16S, 1, 0)
    absX = cv2.convertScaleAbs(Sobel_x)
    image = absX

    #图像二值化
    ret, image = cv2.threshold(image, 0, 255, cv2.THRESH_OTSU)

    #闭操作
    kernelX = cv2.getStructuringElement(cv2.MORPH_RECT, (30, 10))
    image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernelX, iterations=1)

    # #膨胀腐蚀

    kernelX = cv2.getStructuringElement(cv2.MORPH_RECT, (19, 1))
    kernelY = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 19))

    image = cv2.dilate(image, kernelX)
    image = cv2.erode(image, kernelX)

    image = cv2.erode(image, kernelY)
    image = cv2.dilate(image, kernelY)

    #中值滤波
    image = cv2.medianBlur(image, 15)

    #查找轮廓
    contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)

    for item in contours:
        x, y, weight, height = cv2.boundingRect(item)
        if weight > (height * 2) and weight > 50 and height > 25:
            # 裁剪区域图片
            cv2.rectangle(image, (x, y), (x + weight, y + height), (0, 255, 0),
                          2)
            chepai = original[y:y + height, x:x + weight]
            list = identify(chepai)
            print(list)
            if (len(list) > 3):
                cv2.resize(chepai, (300, 100))
                cv2.imwrite("pai.jpg", chepai)
                return list
    return [0]
Exemple #7
0
def movienamer(movie):
    directory = '/'.join(movie.split('/')[:-1])
    filename, extension = os.path.splitext(os.path.basename(movie))

    results = identify(filename, directory)
    if len(results) == 0:
        print 'No results found. Skipping movie file\n'
        return False

    action = confirm(results, filename, extension)

    if action == 'SKIP':
        print 'Skipping movie file\n'
        return False
    elif action == 'QUIT':
        print 'Exiting movienamer'
        sys.exit()
    else:
        i = int(action)
        result = results[i-1]

        if directory == '':
            directory = '.'

        dest = (directory + '/' +
                result['title'] +
                ' [' + result['year'] + ']' +
                extension)

        if os.path.isfile(dest):
            print 'File already exists: ' + dest
            print 'Overwrite?'
            final_confirmation = raw_input('([y]/n/q)'.encode('utf-8')).lower()
            if final_confirmation == '':
                final_confirmation = 'y'

            if final_confirmation not in ['y', 'n', 'q']:
                final_confirmation = raw_input(
                    '([y]/n/q)'.encode('utf-8')).lower()
                if final_confirmation == '':
                    final_confirmation = 'y'

            if final_confirmation == 'n':
                print 'Skipping movie file\n'
                return False
            elif final_confirmation == 'q':
                print 'Exiting movienamer'
                sys.exit()

        return movie, dest
Exemple #8
0
def movienamer(movie):
    directory = '/'.join(movie.split('/')[:-1])
    filename, extension = os.path.splitext(os.path.basename(movie))

    results = identify(filename, directory)
    if len(results) == 0:
        print movie + ': No results found. Skipping movie file\n'
        return False

    action = confirm(results, filename, extension)

    if action == 'SKIP':
        print 'Skipping movie file\n'
        return False
    elif action == 'QUIT':
        print 'Exiting movienamer'
        sys.exit()
    else:
        i = int(action)
        result = results[i - 1]

        if directory == '':
            directory = '.'

        title = makeValidFilename(result['title'], False, True, "", "_")

        dest = (directory + '/' + title + ' (' + result['year'] + ')' +
                extension)

        if os.path.isfile(dest):
            print 'File already exists: ' + dest
            print 'Overwrite?'
            final_confirmation = raw_input('([y]/n/q)'.encode('utf-8')).lower()
            if final_confirmation == '':
                final_confirmation = 'y'

            if final_confirmation not in ['y', 'n', 'q']:
                final_confirmation = raw_input(
                    '([y]/n/q)'.encode('utf-8')).lower()
                if final_confirmation == '':
                    final_confirmation = 'y'

            if final_confirmation == 'n':
                print 'Skipping movie file\n'
                return False
            elif final_confirmation == 'q':
                print 'Exiting movienamer'
                sys.exit()

        return movie, dest
Exemple #9
0
def binstatus1(items):
    object = itemsensor()
    open = door.door()
    while object == False and open == True:
        object = itemsensor()
        open = door.door()
    if object == True:
        time.sleep(2)
        rotate.rotate(
        )  #rotate objects in the top compartment 90 degrees and update items
        photo.photo()  #capture image of the object and assign it to variable
        a = identify.identify()  #identify object in the image captured
        time.sleep(7)  #wait 7 seconds for object identification
        items[1] = a  #assign object type to compartments array
        print(items)
        return (items)
    if object == False and open == False:
        return (items)
Exemple #10
0
def xcorr(file,peptide,max_charge, modi_list=['cc'], mass=0,command='-o',error=0.5):
    #read file
    mz,params,amplitudes= id.read_mgf(file)
    #create theoretical spectra of peptide
    theo_y= list(id.fragments_y(peptide,maxcharge=max_charge))
    theo_b= list(id.fragments_b(peptide,maxcharge=max_charge))
    #fetch data
    y_seq,y_mz,y_amp, b_seq,b_mz,b_amp = id.identify(file,peptide,max_charge,modi_list,mass,'',error)
  
 
    # correlation coefficient; return matrix with XX,XY,YX,XX.  we need xy, so enter only this part of matrix 
    xcorr_y = np.round(np.corrcoef(theo_y,y_mz)[0,1],3)
    
    xcorr_b= np.round(np.corrcoef(theo_b,b_mz)[0,1],3)
    
    if command=='-o':
        print 'seq: ',peptide, '==> xcorr y:', xcorr_y,', xcorr b:', xcorr_b  
    return(xcorr_y,xcorr_b)
Exemple #11
0
	def identify(self, upload):
		fileobjs = self.getuploadfiles(upload)
		files = []
		for fileobj in fileobjs:
			f = {}
			f["obj"] = fileobj
			f["name"] = fileobj.name
			f["mtime"] = fileobj.mtime.timestamp()
			f["size"] = fileobj.size
			files.append(f)
		clusters = identify.identify(files)
		for c in clusters:
			cobj = Cluster()
			self.session.add(cobj)
			for f in c["files"]:
				fobj = f["obj"]
				fobj.cluster = cobj
			for e in c["events"]:
				self.session.add(Eventconnector(cluster=cobj, event=e["lecture_id"]))
Exemple #12
0
def binstatus1(items):
    print("Checking item")
    object = itemsensor.itemsensor()
    print("Checking door")
    open = door.door()
    while object == False and open == True:
        print("Checking item2")
        object = itemsensor.itemsensor()
        print("Checking door2")
        open = door.door()
    if object == True:
        print("rotating the plate")
        centralmotor.anticlock(
        )  #rotate objects in the top compartment 90 degrees and update items
        photo.photo()  #capture image of the object and assign it to variable
        a = identify.identify()  #identify object in the image captured
        time.sleep(7)  #wait 7 seconds for object identification
        items[1] = a  #assign object type to compartments array
        print(items)
        return (items)
    if object == False and open == False:
        return (items)
Exemple #13
0
def scan_person():
	video = cv2.VideoCapture(0)

	for x in range(5):
		_, frame = video.read()
		cv2.imwrite('capture.jpg', frame)
		rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA) #TODO is this necessary? I forgot what it does.
		person, encoding = identify.identify(frame)
		#if it is a list it is "unknown" or "no faces"
		if person != 'no faces':						#maybe save unknown images where there was a face so if last one doesn't find a face we can still do it. 
			break										#maybe don't check 5 times as it's like p-hacking. Will have to see how accuracy is affected when done.
		else:
			print('No face found; error code: ' + person)

	video.release()
	cv2.destroyAllWindows()

	if person == 'no faces':
		say('No face found.')
	elif person == 'unknown':
		say('Unknown')
		clicks = input('Clicks: ') #this would happen right after conversation
		if clicks == 'x':
			#create directory entry
			say('Who is it?')
			name = listen(3)
			with open('assets/directory.txt') as file:
				filename = str(len(file.readlines()))
			with open('assets/directory.txt', 'a') as file:
				file.write(filename + ',' + name + ',"random data yay"\n') #TODO this needs to be cleaned up to look something like the style in the readme
			#save encoding to knowns file
			np.save((os.path.join('assets', 'knowns', filename) + '.npy'), encoding) #TODO learn more about os.path.join. Do I want my standard to be "/" or that?
		else: #if you double click
			unix_time = str(int(datetime.datetime.utcnow().timestamp())) #utc timezone to avoid overwriting pictures when switching timezones
			np.save((os.path.join('assets', 'unknowns', unix_time) + '.npy'), encoding) #TODO change this to store face encodings if they are saved for later to speed it up when this is classified
			return
	else:
		say(get_info(int(person)))
Exemple #14
0
def color_identify(name):
    #颜色范围
    lower = np.array([70, 110, 110])
    upper = np.array([130, 255, 255])

    raw = cv2.imread(name)
    original = cv2.imread(name)

    #中值滤波
    hsv_one = cv2.medianBlur(raw, 5)
    hsv = cv2.cvtColor(hsv_one, cv2.COLOR_BGR2HSV)

    #颜色提取
    mask_blue = cv2.inRange(hsv, lower, upper)
    res = cv2.bitwise_and(raw, raw, mask=mask_blue)
    res = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
    ret, iIMAGE = cv2.threshold(res, 0, 255, cv2.THRESH_OTSU)

    #查找轮廓
    con, hierarchy = cv2.findContours(iIMAGE, cv2.RETR_TREE,
                                      cv2.CHAIN_APPROX_SIMPLE)

    img = cv2.imread(name)

    for item in con:
        x, y, w, h = cv2.boundingRect(item)
        if w > (h * 2) and w > 100 and h > 100:
            # 裁剪区域图片
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
            chepai = original[y:y + h, x:x + w]
            list = identify(chepai)
            print(list)
            if (len(list) > 3):
                cv2.resize(chepai, (300, 100))
                cv2.imwrite("pai.jpg", chepai)
                return list
    return [0]
Exemple #15
0
def respond(voice_data):
    if 'your name' in voice_data:
        speak.jarvis_speak(
            'My name is Jarvis, just a rather very intelligent system, except a lobotomized version'
        )

    if 'chemistry question' in voice_data:
        chem_element = inputs.record_audio(
            'I know the periodic table, what element do you want to know about?'
        )
        chem_qs.el_lookup(chem_element)

    if 'time is it' in voice_data:
        speak.jarvis_speak(ctime())

    if 'search' in voice_data or 'can you find' in voice_data:
        search = inputs.record_audio('What do you want to search for?')
        url = 'https://google.com/search?q=' + search
        webbrowser.get().open(url)
        speak.jarvis_speak('Here is what i found for ' + search)

    if 'find location' in voice_data:
        location = inputs.record_audio('What is the location?')
        url = 'https://google.nl/maps/place/' + location + '/&'
        webbrowser.get().open(url)
        speak.jarvis_speak('Here is the location of ' + location)

    if 'exit' in voice_data or "good bye" in voice_data or "ok bye" in voice_data:
        speak.jarvis_speak('Always happy to help')
        exit()

    if 'weather' in voice_data:
        city_name = inputs.record_audio('whats the city name')
        report = weather.weather_report(city_name)
        speak.jarvis_speak(report)

    if 'quick question' in voice_data:
        question = inputs.record_audio('whats the question')
        answer = quick_question.ask(question)
        speak.jarvis_speak(answer)

    if 'say hello' in voice_data or 'introduce yourself' in voice_data:
        names = identify.identify(identify.peek())
        hour = datetime.datetime.now().hour
        for name in names:
            if hour >= 0 and hour < 12:
                speak.jarvis_speak('Hello, Good Morning ' + name)
            elif hour >= 12 and hour < 18:
                speak.jarvis_speak('Hello, Good Afternoon ' + name)
            else:
                speak.jarvis_speak('Hello, Good Evening ' + name)

    if 'open my email' in voice_data:
        url = 'https://mail.google.com/mail/u/0/#inbox'
        webbrowser.get().open(url)
        speak.jarvis_speak('Opening email')

    if 'play some music' in voice_data:
        url = 'https://www.youtube.com/playlist?list=PLBuxhzrxSigRFEI1WIMgsLvSUAMXr3-xX'
        webbrowser.get().open(url)
        speak.jarvis_speak('Dropping needle')
Exemple #16
0
def identify_plot(file, peptide, charge, modi_mass=[], zoom=1):

    #read file
    values, params, amplitude = id.read_mgf(file)
    #procentual amplitdues for relative abundance
    ampl = (amplitude / max(amplitude)) * 100
    #offset and scales for zoom into the spectra
    if zoom > 0 and zoom <= 1:
        offset = (max(ampl) * zoom) / 4
        y_lim_scale = max(ampl) * zoom
        one_per = y_lim_scale / 100
    # create theoretical fragmentation of given peptide
    theo_y = list(id.fragments_y(peptide, maxcharge=charge))
    theo_b = list(id.fragments_b(peptide, maxcharge=charge))

    # create plot title
    headline = 'File: ' + file + ', Peptide: ' + peptide + ' (charge ' + str(
        charge) + ')'

    #headline = 'File: '+ file
    #create figure
    py.figure()
    py.xlabel('m/z')
    py.ylabel('Intensity, rel. units %')
    py.title(headline)

    # plot experimental spectra in grey
    py.bar(values,
           ampl,
           width=2.5,
           linewidth=0.1,
           color='grey',
           alpha=0.3,
           label='data')

    # some label and axis configs
    py.xlim(xmin=0)
    py.ylim(ymax=y_lim_scale)
    py.xticks(np.arange(0, max(values) + 1, 50))
    py.tick_params(axis='both', which='major', labelsize=8)
    py.tick_params(axis='both', which='minor', labelsize=5)
    #

    # --- 0. benchmarks
    #0.1 plot theoretical spectras if no modi given
    if modi_mass == []:
        py.bar(theo_y,
               np.ones(len(theo_y)) * offset,
               linewidth=0.1,
               width=2.5,
               alpha=0.3,
               color='red',
               label='theo y')
        py.bar(theo_b,
               np.ones(len(theo_y)) * offset,
               linewidth=0.1,
               width=3.5,
               alpha=0.3,
               color='lightgreen',
               label='theo b')

        # 0.2 annotate peaks
        #0.2.1 theo y ions
        for i in np.arange(0, len(peptide)):
            py.annotate(peptide[i],
                        xy=(theo_y[i], offset),
                        xytext=(theo_y[i], offset + 2 * one_per),
                        bbox=dict(boxstyle='round,pad=0.2',
                                  fc='red',
                                  alpha=0.5))
            # 0.2.1 theo b ion
            py.annotate(peptide[i],
                        xy=(theo_b[i], offset),
                        xytext=(theo_b[i], offset + 6 * one_per),
                        bbox=dict(boxstyle='round,pad=0.2',
                                  fc='lightgreen',
                                  alpha=0.5))

    alpha = 0.75
    for mass in modi_mass:
        # --- 1. mono matches
        if mass == 0:
            # 1.0 use def identify to find matching aa sequences
            y_seq, y_mz, y_amp, b_seq, b_mz, b_amp = id.identify(
                file, peptide, charge, ['cc'], 0)

            # 1.1 plot peaks
            py.bar(y_mz,
                   y_amp,
                   width=5,
                   linewidth=0.1,
                   color='red',
                   label='y [M+1H]+' + str(charge))
            py.bar(b_mz,
                   b_amp,
                   width=5,
                   linewidth=0.1,
                   color='lightgreen',
                   label='b [M+1H]+' + str(charge))

            # 1.2  annotate labels to peak hits
            # 1.2.1 y ion label

            for i in np.arange(0, len(peptide)):
                if y_seq[i] == '-':
                    py.annotate(y_seq[i],
                                xy=(theo_y[i], 0),
                                xytext=(theo_y[i], offset + 4 * one_per))
                else:
                    py.annotate(y_seq[i],
                                xy=(y_mz[i], offset),
                                xytext=(y_mz[i], offset + 4 * one_per),
                                bbox=dict(boxstyle='round,pad=0.2', fc='red'))

            # 1.2.2 b ion label
                if b_seq[i] == '-':
                    py.annotate(b_seq[i],
                                xy=(theo_b[i], 0),
                                xytext=(theo_b[i], offset + 8 * one_per))
                else:
                    py.annotate(b_seq[i],
                                xy=(b_mz[i], offset),
                                xytext=(b_mz[i], offset + 8 * one_per),
                                bbox=dict(boxstyle='round,pad=0.2',
                                          fc='lightgreen'))

    # --- 2. cc matches
        else:
            #compute sequence
            y_seq, y_mz, y_amp, b_seq, b_mz, b_amp = id.identify(
                file, peptide, charge, ['cc'], mass)
            # 2.1. plot peaks
            py.bar(y_mz,
                   y_amp,
                   width=5,
                   linewidth=0.1,
                   color='orange',
                   label='y + ' + str(int(mass)) + '[M+1H]+' + str(charge),
                   alpha=alpha)
            py.bar(b_mz,
                   b_amp,
                   width=5,
                   linewidth=0.1,
                   color='blue',
                   label='b + ' + str(int(mass)) + '[M+1H]+' + str(charge),
                   alpha=alpha)

            # 2.2.annotate labels to peak
            # 2.2.1 y ions
            offset = offset + 12 * one_per
            for i in np.arange(0, len(peptide)):
                if y_seq[i] == '-':
                    py.annotate(y_seq[i],
                                xy=(theo_y[i] + (mass / charge), 0),
                                xytext=(theo_y[i] + (mass / charge), offset))
                else:
                    py.annotate(y_seq[i],
                                xy=(y_mz[i], offset),
                                xytext=(y_mz[i], offset),
                                bbox=dict(boxstyle='round,pad=0.2',
                                          fc='orange',
                                          alpha=alpha))

        # 2.2.2 b ions
                if b_seq[i] == '-':
                    py.annotate(b_seq[i],
                                xy=(theo_b[i] + (mass / charge), 0),
                                xytext=(theo_b[i] + (mass / charge),
                                        offset + 4 * one_per))
                else:
                    py.annotate(b_seq[i],
                                xy=(b_mz[i], offset),
                                xytext=(b_mz[i], offset + 4 * one_per),
                                bbox=dict(boxstyle='round,pad=0.2',
                                          fc='blue',
                                          alpha=alpha))
            alpha = alpha - 0.25

    #--- 3. reporter matches
    reporter = [284, 447]
    r_val = []
    r_amp = []
    # 3.1 check for occurence
    for i in reporter:
        closest = min(values, key=lambda x: abs(x - i))
        if round(closest) == i:
            r_val.append(closest)
            r_amp.append(ampl[values.tolist().index(closest)])
    # 3.2 plot occurence
    if r_val != []:
        py.bar(r_val, r_amp, width=5, color='yellow', label='reporter')
        # 3.2.1 annotate occurence
        for i in np.arange(0, len(r_val)):
            py.annotate(int(round(r_val[i])),
                        xy=(r_val[i], r_amp[i]),
                        ha='center',
                        xytext=(r_val[i], r_amp[i] + 2 * one_per),
                        bbox=dict(boxstyle='round,pad=0.2', fc='yellow'))

    py.legend(loc=2, prop={'size': 11})
    py.show()
Exemple #17
0
 def identify_recording(self):
     language = identify()
     self.status.configure(text=language)
     self.root.update_idletasks()
Exemple #18
0
def getSong(username, token, sp):  #record, identify, return id
    record()
    return identify()