Example #1
0
  def crawlAnalyze(self):

    try:
      self.Analyzer = Analyze.UnionFile('Comment.csv', 'Likelist.csv', 'ClubMember.csv')
      self.Analyzer.InitSet()
      self.Analyzer.Union()
      self.Analyzer.Diff()
      self.Analyzer.PrintSet()
      self.Analyzer.WriteFile()
    except:
      try:
        tmpfile3fd = open('ClubMember.csv', 'r')
        tmpfile3fd.close()
      except:
        #print('缺少社團成員名單,開始爬社團成員')
        self.crawlClubList()

      try:
        tmpfile1fd = open('Comment.csv', 'r')
        tmpfile2fd = open('Likelist.csv', 'r')
        tmpfile1fd.close()
        tmpfile2fd.close()
      except:
        #print('缺少Post,開始爬留言按讚...')
        self.crawlPost()

      
      #print('重新開始分析潛水名單...')
      self.Analyzer = Analyze.UnionFile('Comment.csv', 'Likelist.csv', 'ClubMember.csv')
      self.Analyzer.InitSet()
      self.Analyzer.Union()
      self.Analyzer.Diff()
      self.Analyzer.PrintSet()
      self.Analyzer.WriteFile()
Example #2
0
def insert_data_markov(sentence_id,sentence, channel_id):
    # データベースInstanceの作成
    psgr = Psgr()
    try:
        #トランザクション処理開始
        psgr.begin()

        # messageの形態素解析
        analyze = Analyze()
        # 分かち書きした文章を取得する
        # 吾輩 は 猫 で ある
        parse_data = analyze.parse_wakati_sentence(sentence)

        w1 = ''
        w2 = ''
        for data in parse_data:
            if data[0] == '':
                continue
            # 登録用データ↓↓
            word = data[0]
            if w1 and w2:
                values = (sentence_id, w1, w2, word, channel_id)
                sqlcom = "INSERT INTO markov_chain (sentence_id, word1, word2, word3, channel_id) VALUES (%s,%s,%s,%s,%s)"
                psgr.execute(sqlcom,values)
            w1, w2 = w2, word

        psgr.commit()
        del analyze
    except Exception as e:
        print (e)
        psgr.rollback()

    del psgr
Example #3
0
def execute_command(user_input, argv):
    """
    dispatcher function to activate subroutines
    :param user_input: the function to activate
    :param argv: CLI-style argv arguments, if applicable
    """
    if user_input == 'Q' or user_input == 'QUIT':
        exit(1)
    elif user_input == '!HELP':
        print('Supported functions:\n'
              '    -QUOTE\n'
              '    -WATCH\n'
              '    -ANALYZE\n')
    elif user_input == 'QUOTE':
        import quote
        quote.__main__(argv)

    elif user_input == 'WATCH':
        import watch
        watch.__main__(argv)

    elif user_input == 'ANALYZE':
        import Analyze
        Analyze.__main__(argv)

    else:
        print("Invalid command")
Example #4
0
def suggestauthors(topic,num):
#read file and find the profs closest match to rawlabel and confirm/return actual label
  authordict = getauthordict('authordict.csv')
  ranking = []
  similars = []

  author = Analyze.idauthor(authordict,topic)
  if author == '':
    score=[]
    stops=nltk.corpus.stopwords.words('english') #stopwords to weed out
    stops = stops + ['we',',','.','(',')','using','new','propose','investigate']
    stops = stops + ['-','show','infer','novel','method']

    tokens1 = nltk.word_tokenize(topic)
    pairs1 = nltk.bigrams(tokens1)
    tokens1 = tokens1+[bg for bg in pairs1 if bg[0] not in stops and bg[1] not in stops]
    for auth in authordict.keys():
      keyw2 = authordict[auth]['Keywords']
      tokens2 = list(filter(None,re.split(r',',keyw2)+re.split(r'[ ,]',keyw2)))

      score = -sum(1 for token in tokens1 if token in tokens2)
      heappush(ranking,[score,auth])
    while len(similars)<num:
      authscore = heappop(ranking)
      similars.append([authscore[1],authordict[authscore[1]]['Keywords']])
  else:
    for auth in authordict:
      score = Analyze.similarauthors(authordict[author],authordict[auth])
      heappush(ranking,[score,auth])
    while len(similars)<num:
      authscore = heappop(ranking)
      similars.append([authscore[1],authordict[authscore[1]]['Keywords']])
    print(authordict[author]['Keywords'])
  return similars
Example #5
0
def idauthor(filename,rawlabel):
#read file and find the profs closest match to rawlabel and confirm/return actual label
  f1 = open(filename,'r+',encoding='utf-8')
  rawdata=csv.reader(f1)
  authordict = Analyze.dictauthor(rawdata)
  f1.close()

  return Analyze.idauthor(authordict,rawlabel)
Example #6
0
def runTests(debug):

	analyzeTest = Analyze()
	analyzeTest.setDebug(debug)

	print ("\nRunning Analyze Tests")
	stanDevTestSet = [50, 60, 45, 35, 28, 30, 58, 70, 32, 80,
						90, 85, 75, 70, 50, 60, 70, 80, 35, 45]
	stanDevAns = 19.140010
	stanDevResult = analyzeTest.calculateSD(stanDevTestSet)
	print ("Running Standard Deviation Test. Answer should be %f, received %f" % (stanDevAns, stanDevResult))
Example #7
0
def analyze_message(sentence, sentence_id,channel_id):
    psgr = Psgr()
    cur = psgr.getCur()

    # messageの解析
    try:
        psgr.begin()

        analyze = Analyze()
        parse_data = analyze.parse_sentence(sentence)
        for data in parse_data:
            detail_array = ['*'] * 9
            detail_array[:len(data[1].split(','))] = data[1].split(',')

            # 登録用データ↓↓
            # word = data[0] # Surfaceデータが取れないので。
            word = detail_array[6]
            part_of_speech = detail_array[0]
            if part_of_speech == "BOS/EOS":
                continue

            part_of_speech_detail1 = detail_array[1]
            part_of_speech_detail2 = detail_array[2]
            part_of_speech_detail3 = detail_array[3]
            conjugate1 = detail_array[4]
            conjugate2 = detail_array[5]
            original = detail_array[6]
            pronunciation1 = detail_array[7]
            pronunciation2 = detail_array[8]

            # 単語の登録
            values = (word,part_of_speech,part_of_speech_detail1,part_of_speech_detail2,part_of_speech_detail3,conjugate1,conjugate2,original,pronunciation1,pronunciation2)
            sqlcom = "INSERT INTO words (word,part_of_speech,part_of_speech_detail1,part_of_speech_detail2,part_of_speech_detail3,conjugate1,conjugate2,original,pronunciation1,pronunciation2) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) RETURNING word_id;"
            psgr.execute(sqlcom,values)
            word_id = psgr.lastrowid()

            # 単語と文章を繋ぐ
            values = (sentence_id,word_id,channel_id)
            sqlcom = "INSERT INTO sentence_word (sentence_id, word_id, channel_id) VALUES (%s,%s,%s) RETURNING sentence_word_id;"
            psgr.execute(sqlcom,values)

    except Exception as err:
        print(err)
        psgr.rollback()

    # コミット
    psgr.dbCommit()
    del psgr
Example #8
0
def rank(filename,criterion,num):
#read data and rank authors based on criteria, and output first num authors
  criteria = ['Citations','NumPapers','Co-Authors','Citation Rate']
  if criterion not in criteria:
      print('Error: criterion can only be one of the following strings: Citations/NumPapers/Co-Authors')
      return []
  f1 = open(filename,'r+',encoding='utf-8')
  rawdata=csv.reader(f1)
  authordict = Analyze.dictauthor(rawdata)
  f1.close()

  ranking = []
  for author in authordict.keys():
      if criterion == 'Co-Authors':
          heappush(ranking, [-len(authordict[author][criterion]), author])
      elif criterion == 'Citation Rate':
          heappush(ranking, [-(authordict[author]['Citations']/authordict[author]['NumPapers']), author])
      else:
          heappush(ranking, [-authordict[author][criterion], author])

  bestofcriterion = []
  while len(bestofcriterion)<num:
      pick = heappop(ranking)
      bestofcriterion.append([-pick[0],pick[1]])
  return bestofcriterion
Example #9
0
def results(request):
    # Handle file upload
    if request.method == 'POST':
        print "Results being called"
        form = DocumentForm(request.POST, request.FILES)
        if form.is_valid():
            newdoc = Document(docfile = request.FILES['docfile'])
            newdoc.save()
            parser = TextParsing.TextParsing(request.FILES['docfile'].name)
            analyzer = Analyze.Analyze()
            arr = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(), "trust")
            score = analyzer.getAverageConversationScores(parser)
            convo = analyzer.getConversationScore(parser)["trust"]
            # labels = ['compound', 'neg', 'neu', 'pos']
            # ind = np.arange(4)
            # width = .5
            # plt.bar(ind, score, width)
            # plt.ylabel('Normalized Score')
            # plt.xticks(ind,labels)
            # fig, ax = plt.subplots()
            # plot = ax.bar(ind, score, width)
            # plt.savefig('ConversationAnalysis/media/graph.png')
            # Redirect to the document list after POST
            # return HttpResponseRedirect('MainPage.views.main')
    else:
        form = DocumentForm() # A empty, unbound form

    return render(request, 'appTemps/results.html', {'arr': arr, 'score':score, 'convo':convo})
def results(request):
    # Handle file upload
    global arr, score, emoarr, cwords, arr2, p, cmpd
    if request.method == 'POST':
        # print "Results being called"
        form = DocumentForm(request.POST, request.FILES)
        if form.is_valid():
            newdoc = Document(docfile=request.FILES['docfile'])
            newdoc.save()
            parser = TextParsing.TextParsing(request.FILES['docfile'].name)
            analyzer = Analyze.Analyze()
            analyzer.popDialogEmotion(parser)
            analyzer.setDialogSentiment(parser)
            p = json.dumps(analyzer.getPersonData(parser),
                           separators=(',', ':'))
            arr2 = parser.plotlyBarFreqDist("everyone")
            cmpd = analyzer.plotlyCompoundSenti(parser)
            score = analyzer.getAverageConversationScores(parser)
            emo1 = analyzer.getConversationScore(parser)["anger"]
            emo2 = analyzer.getConversationScore(parser)["anticipation"]
            emo3 = analyzer.getConversationScore(parser)["disgust"]
            emo4 = analyzer.getConversationScore(parser)["fear"]
            emo5 = analyzer.getConversationScore(parser)["joy"]
            emo6 = analyzer.getConversationScore(parser)["sadness"]
            emo7 = analyzer.getConversationScore(parser)["surprise"]
            emo8 = analyzer.getConversationScore(parser)["trust"]
            emoarr = [emo1, emo2, emo3, emo4, emo5, emo6, emo7, emo8]
            cwords = parser.getNCommonWords(50)
            # print freqdist
            anger = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                           "anger")
            anticipation = analyzer.plotlyEmotion(parser,
                                                  parser.speakerDict.keys(),
                                                  "anticipation")
            disgust = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                             "disgust")
            fear = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                          "fear")
            joy = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                         "joy")
            sadness = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                             "sadness")
            trust = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                           "trust")
            arr = [anger, anticipation, disgust, fear, joy, sadness, trust]

    else:
        form = DocumentForm()  # A empty, unbound form

    return render(
        request, 'appTemps/results.html', {
            'arr': arr,
            'score': score,
            'emoarr': emoarr,
            'cwords': cwords,
            'arr2': arr2,
            "person": p,
            "form": form,
            "documents": documents
        })
Example #11
0
	def search_music(self, btn, store, flag):
		search_key = self.search_entry.get_text().strip()
		if search_key == '':
			return None
		  
		xml_url="http://www.google.cn/music/search?cat=song&q=%s&start=%d&num=%d&cad=player&output=xml" % (search_key, self.count * 20, ONE_PAGE_SIZE)
		song_list = Analyze.getSongList(xml_url, False)
		if not song_list or len(song_list) == 0:
			msgBox = gtk.MessageDialog(parent=self.dialog, flags=gtk.DIALOG_MODAL, type=gtk.MESSAGE_ERROR, 
					buttons=gtk.BUTTONS_CLOSE, message_format=_("No more song!"))
			msgBox.connect("response", lambda a, b: msgBox.hide())
			msgBox.run()
			return None
		  
		if flag:
			store.clear()
		iter = None
		#'', 'Title', 'Album', 'Artist', 'Time', 'ID'
		for song in song_list:
			title = song['name']
			album = song['album']
			artist = song['artist']
			time = song['duration']
			id = song['id']
			iter = store.append([False, title, album, artist, time, id])
		
		if not flag:
			self.search_tree.scroll_to_cell(store.get_path(iter))
		self.count += 1
		if len(song_list) < ONE_PAGE_SIZE:
			self.has_next = False
		else:
			self.has_next = True
Example #12
0
	def run(self):
		if not self.songs:
			self.listType, self.listId, self.album = self.preference.get_prefs()
			
			#url="http://www.google.cn/music/chartlisting?cat=song&q=chinese_songs_cn&output=xml"
			url="http://www.google.cn/music/album?id=B679efdab97c7afc7&output=xml"
			if self.listType is not None and self.listId is not None:
				if self.listType == 'song':
					url="http://www.google.cn/music/chartlisting?cat=song&q=%s&output=xml" % self.listId
				elif self.listType == 'topic':
					url="http://www.google.cn/music/topiclisting?cat=song&q=%s&output=xml" % self.listId
		  
			print 'Load List from the %s' % url
			self.songs = Analyze.getSongList(url, True)
			if not self.songs:
				return None
			for row in self.source.props.query_model:
				entry = row[0]
				self.db.entry_delete(entry)
		
		self.is_run = True
		load_count = 0
		load_all = len(self.songs)
		gobject.idle_add(self.source.notify_progress, True, load_count, load_all)
		for song in self.songs:
			if not self.set_stop:
				time.sleep(0.2) 
				self.add_song(song)
				load_count += 1
				gobject.idle_add(self.source.notify_progress, True, load_count, load_all)
				self.db.commit()
		self.is_run = False
		gobject.idle_add(self.source.notify_progress, False, 1, 1)
Example #13
0
def query_score():
    username = request.form.get('username')
    password = request.form.get('password')
    term = request.form.get('term')
    # print username
    # print term
    json_score = Analyze.get_score(username, password, term)
    return json_score
Example #14
0
def kMeans(data_set, score_funcs, k):
    assert (k <= len(data_set))
    results_list = []
    old_centroids = _initializeCentroids(k, data_set)
    clusters = _group_points(data_set, old_centroids)

    results_list.append(Analyze.analyze_clusters(clusters, score_funcs))

    new_centroids = _findMeanVectors(clusters, data_set)
    while not _hasConverged(old_centroids, new_centroids):
        old_centroids = new_centroids
        clusters = _group_points(data_set, new_centroids)
        # keep recording the data
        results_list.append(Analyze.analyze_clusters(clusters, score_funcs))
        new_centroids = _findMeanVectors(clusters, data_set)
    # last item is a repeat:
    return results_list[:-1]
def Startsimulation():
	global status
	global Analyze
	Analyze = importlib.reload(Analyze)
	result = Analyze.__main__()
	if not(simType == "Sweep"):
		displayonwindow("Simulation Results", result)
	status = "Simulation Done"
Example #16
0
 def analyze(self):
     global rawData
     global rawTime
     global pic
     global scan
     rawData, rawTime, pic, scan = Analyze.Analyze(scan, protein, nd2_file,
                                                   Roi_Data_file,
                                                   status_file, time_file,
                                                   n)
     self.analyze_btn.setText('Analyze scan:%d' % scan)
     scan += 1
Example #17
0
def Render(lastStages, logFilePath=None):
    try:
        if (not isinstance(lastStages,list)):
            lastStages = [lastStages]
        
        Analyze.Initialize(logFilePath)
        
        out = []
        for stage in lastStages:
            stage.Reset()
            out.append(stage.GetOutput())
        
        Analyze.PrintResults()
        return out
    except Exception, e:
#            exc_type, exc_value, exc_traceback = sys.exc_info()
        Analyze.WriteStatus(traceback.format_exc())            
#            traceback.print_exception(exc_type, exc_value, exc_traceback, file=Analyze.GetLog())
#            traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
#            sys.stderr.write("\n"*2)
        raise e
Example #18
0
    def run(self):
        # First the selected dataset needs to be loaded
        dataset_name = self.data_selection.get()
        if dataset_name == "Iris":
            print("Selecting Iris!")
            data = load_data.load_iris()
        elif dataset_name == "Seeds":
            data = load_data.load_seeds()
        elif dataset_name == "Glass":
            data = load_data.load_glass()
        elif dataset_name == "Banknote":
            data = load_data.load_banknote()
        elif dataset_name == "Customers":
            data = load_data.load_cust_data()

        # Now run the selected clustering algorithm
        score_list = [score_funcs.cluster_sse]
        if self.alg_selection.get() == "K-Means":
            Analyze.analyze(
                data, dataset_name, 10,
                self.build_kMeans_func(*kMeans_params[dataset_name]),
                score_list)
        elif self.alg_selection.get() == "DBSCAN":
            Analyze.analyze(
                data, dataset_name, 10,
                self.build_dbscan_func(*dbscan_params[dataset_name]),
                score_list)
        elif self.alg_selection.get() == "Competitive Learning":
            Analyze.analyze(data, dataset_name, 10,
                            self.build_cl_func(*cl_params[dataset_name]),
                            score_list)
        elif self.alg_selection.get() == "PSO":
            Analyze.analyze(data, dataset_name, 10,
                            self.build_pso_function(*pso_params[dataset_name]),
                            score_list)
        elif self.alg_selection.get() == "ACO":
            Analyze.analyze(data, dataset_name, 10,
                            self.build_aco_func(*aco_params[dataset_name]),
                            score_list)
Example #19
0
def dbscan(data_pts, radius, minpts, score_funcs=None):
    # labels is a dictionary with points as keys and values as the cluster label
    labels = _cluster(data_pts, radius, minpts)

    # Assign points to cluster making labels keys and values list of points belonging to individual clusters
    clusters = defaultdict(list)
    for key, label in labels.items():
        clusters[label].append(key)

    # Calculate cluster sse
    result = [Analyze.analyze_clusters(clusters, score_funcs)]

    return result
def run_optimal():
    # Returns the current state of the agent
    current_state = Monitor.monitor(None, agent)
    # See what moves are available from the current positions
    adj_states = Analyze.analyze(current_state)
    # Get the next action
    desired_action = Planning.optimalPolicy(adj_states, knowledge)
    # Apply Environmental uncertainty
    actual_action = knowledge.action_func(desired_action)
    # Execute onto the environment
    next_state = Execution.execute(adj_states + [current_state], actual_action)

    agent.update(next_state, knowledge.state_value_dict[next_state])
Example #21
0
def ACO(dataset, iterations, num_clusters, num_ants, beta, prob_cutoff,
        num_elite_ants, decay_rate, q, score_funcs):
    ''' The main function for the ACO algorithm. Takes in the dataset to be clustered, 
        maximum number of iterations, the number of ants to be included, and the score
        functions to be used. Creates individual ants, tracks the pheromone matrix,
        and updates the best clustering found so far. '''

    pheromone_matrix = _initialize_pheromones(dataset, num_clusters)
    ants = _initialize_ants(dataset, num_ants, num_clusters, beta, prob_cutoff,
                            pheromone_matrix)
    best_score = iteration_best_score = float("inf")
    best_clustering = None
    results = []

    #_print_ant_info(ants)

    for iteration in range(iterations):

        #Loop through all data points and have all ants cluster each data point
        for point_number in range(len(dataset)):

            for i, ant in enumerate(ants):

                ant.update_beliefs()

        #After all data points have been assigned to a cluster for all ants, rank the ants by objective function
        rank_info = _rank_ants(ants)
        ants = [ranked_ant[0] for ranked_ant in rank_info.ants_and_scores]

        #Let the elite (best scoring) ants update the pheromone matrix, then update ants' matrices
        pheromone_matrix = _update_pheromones(pheromone_matrix,
                                              ants[0:num_elite_ants],
                                              decay_rate, q)
        _update_ants_pheromones(pheromone_matrix, ants)

        iteration_best_score = rank_info.best_score
        iteration_best_clustering = rank_info.best_clustering

        #If we found a better clustering this iteration, update the global best
        if iteration_best_score < best_score:
            best_score = iteration_best_score
            best_clustering = iteration_best_clustering

        #Reset the ants' memory lists
        _reset_ants(ants)

        #Score the best cluster, and append it to the list of values to be returned
        result = Analyze.analyze_clusters(best_clustering, score_funcs)
        results.append(result)

    return results
Example #22
0
def setUp():
    global related
    global pathNames
    global all
    Notes.openFiles()
    pathNames = Notes.getPathNames()
    for x in range(len(pathNames)):
        A.organizeHutchNERData(Notes.readXmlFile(x))
        A.isGIrelated()
        related = A.getGIrelated()
        all = A.getAll()
        mapToTxtFileRelated(x)
        # mapToCsv(x)
        A.clear()
        Notes.clear()
    def open_plot(self, widget):
        if not self.project: return

        marker = self.project.timeline.ruler.get_playback_marker()
        if not marker: return

        start, duration = marker

        frq, power = self.project.appsinkpipeline.get_spectrum(
            start, start + duration)

        w = Analyze.Analyze()
        w.show_all()
        w.plot_spectrum(frq, power)
    def vis_plot_evolution(self, viscontrol, semitone):
        if not self.project: return

        playback_marker = self.project.timeline.ruler.get_playback_marker()
        if not playback_marker: return

        import scipy.interpolate

        start, duration = playback_marker

        ##########
        dialog = gtk.Dialog(
            title="interval",
            flags=gtk.DIALOG_DESTROY_WITH_PARENT | gtk.DIALOG_MODAL,
            buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, gtk.STOCK_OK,
                     gtk.RESPONSE_ACCEPT))
        entry = gtk.Entry()
        entry.set_text("0.1")
        dialog.vbox.add(entry)
        dialog.show_all()
        dialog.run()
        interval = float(entry.get_text())
        dialog.destroy()
        #############

        #		interval = 0.1
        delta = interval / 2.
        steps = 1.0 * duration / delta

        x = []
        y = []
        for step in xrange(int(steps)):
            pos = start + step * delta
            frq, power = self.project.appsinkpipeline.get_spectrum(
                pos - interval / 2., pos + interval / 2.)
            #			frq, power = self.project.appsinkpipeline.get_spectrum(pos,pos+interval)

            spline = scipy.interpolate.InterpolatedUnivariateSpline(
                frq, power, None, [None, None], 1)

            lower = Math.semitone_to_frequency(semitone - 0.5)
            upper = Math.semitone_to_frequency(semitone + 0.5)
            total = spline.integral(lower, upper)

            x.append(pos)
            y.append(total)

        w = Analyze.Analyze()
        w.show_all()
        w.simple_plot(x, y)
Example #25
0
 def measure_start(self):
     if self.adjust == 1:  #Adjust gain?
         self.meas_adjust()
     else:
         self.date = ephem.now().tuple()  #Date for FITS-file
         self.receiver.start()
         self.sig_time = 0
         self.ref_time = 0
         self.totpowTime = 0
         self.config.set('CTRL', 'abort', '0')
         self.config.set('CTRL', 'state', 'integrating')
         with open(self.configfil, 'wb') as configfile:
             self.config.write(configfile)
         index = 0
         start = time.time()
         while index < self.loops:
             self.set_index(index)
             if self.switched == 1:
                 self.measure_switch_in()
             else:
                 self.loops = 1
                 self.measure_tot_pow()
                 self.counter = 0
                 self.sigCount = 0
                 self.refCount = 0
             if int(self.config.get('CTRL', 'abort')) != 1:
                 tc = Analyze(self.sigCount, self.refCount, index,
                              self.fftSize, self.c_freq, self.samp_rate,
                              self.switched, self.user)
             index += 1
         stop = time.time()
         print "Total time: "
         print stop - start
         edit = 0
         if int(self.config.get('CTRL', 'abort')) != 1:
             td = Finalize(index, self.fftSize, self.c_freq, self.samp_rate,
                           edit, self.sig_time, self.ref_time,
                           self.switched, self.totpowTime, self.user,
                           self.date)
         self.receiver.stop()
         self.receiver.wait()
         files = glob.glob('/tmp/ramdisk/*')
         for f in files:
             if f.endswith(self.index):
                 os.remove(f)
             else:
                 continue
         self.config.set('CTRL', 'state', 'ready')
         with open(self.configfil, 'wb') as configfile:
             self.config.write(configfile)
Example #26
0
def mapToCsv(num):
    index = A.getListOfGITRelatedTerms()
    # for x in range(len(related)):
    temp = {
        "probability": pd.Series([""], index=index),
        "context": pd.Series([""], index=index),
    }
    df = pd.DataFrame(temp)
    for token in related:
        for prob in index:
            if token[0] == prob:
                df.set_value(prob, 'context', token[2])
                df.set_value(prob, 'probability', token[1])
        temp = os.path.join(csv, pathNames[num]) + ".csv"
        df.to_csv(temp)
Example #27
0
	def get_playback_uri(self, entry):
		if not entry:return None
		url = self.db.entry_get (entry, rhythmdb.PROP_LOCATION)
	
		if not url.endswith('.mp3'):
			print 'Load the google music:%s' % url
			url = Analyze.getSongByURL(url)['url']
			
			if url is not None:
				self.db.set(entry, rhythmdb.PROP_LOCATION, url)
				print 'Get google song url: %s' % url
				return url
			else:
				return None
		else:
			return url
Example #28
0
    def load_lyrics(self, title, artist, location, lyrics_url):
        start_time = time.time() * 1000

        self.lyrics_is_loaded = False
        self.lyrics_is_txt = True
        self.liststore.clear()
        self.liststore.append([0, _("Loading lyric ......"), "00:00.00"])
        genreRe = re.compile("^S[a-zA-z0-9]{16}")
        # result = genreRe.match(genre)

        def loader_cb(data):
            if data is None:
                self.load_default(title, artist, location)
            else:
                self.loadData(data)
                self.lyrics_is_loaded = True
                end_time = time.time() * 1000
                print "Load lryics spend time %f:" % (end_time - start_time)

        if lyrics_url is not None:
            print "Load lyrics from google: %s" % lyrics_url

            # lyrics = Analyze.getLyrics(lyrics_url)
            l = rb.Loader()
            l.get_url(lyrics_url, loader_cb)

            return

        filePath = self.find_file(title, artist, location)
        if filePath is not None:
            print "Load lyrics from file: %s" % filePath
            lyrics = Analyze.getLyricsByFile(filePath)
            if lyrics is None:
                self.load_default(title, artist, location)
            else:
                self.loadData(lyrics)
                self.lyrics_is_loaded = True
            end_time = time.time() * 1000
            print "Load lryics spend time %f:" % (end_time - start_time)
            return

        self.load_default(title, artist, location)
        # 看起来没有多大必要设置
        # self.lyrics_is_loaded = True

        end_time = time.time() * 1000
        print "Load lryics spend time %f:" % (end_time - start_time)
def strategy_bollinger(d_data, ldt_timestamps):
    ls_symbols = list(d_data['close'].keys())
    df_index_bollinger = BollingerBands.bollinger_bands(d_data['close'], ldt_timestamps,
                                                        ls_symbols=ls_symbols)
    mkt_benchmark = Analyze.get_mktBenchmark(ldt_timestamps)
    mkt_bollinger = BollingerBands.bollinger_bands(mkt_benchmark, ldt_timestamps)

    df_events = copy.deepcopy(df_index_bollinger) * np.NAN
    for i in range(1,len(ldt_timestamps)):
        boll_today = df_index_bollinger.ix[ldt_timestamps[i]]
        boll_yest = df_index_bollinger.ix[ldt_timestamps[i-1]]
        for c_sym in ls_symbols:
            if boll_today[c_sym] <= -2.0 and boll_yest[c_sym] > -2.0 and \
                      mkt_bollinger['$SPX'].ix[ldt_timestamps[i]] >= 1.0:
                df_events[c_sym].ix[ldt_timestamps[i]] =1

    return df_events
		def loader_cb(data):
			topic_list = Analyze.getTopicList(data)
			if topic_list is not None:
				store.clear()
				for topic in topic_list:
					chartName = topic['name'].encode("utf-8")
					chartId = topic['id']
					chartType = topic['type']
					chartPicURL = topic['picURL']
					chartDesc = topic['desc'].encode("utf-8")
					store.append([chartName, chartId, chartType, chartPicURL, chartDesc])
				self.load_label.set_label(_("Loaded"))
			else:
				msgBox = gtk.MessageDialog(parent=self.dialog, flags=gtk.DIALOG_MODAL, type=gtk.MESSAGE_ERROR, 
							buttons=gtk.BUTTONS_CLOSE, message_format=_("No more topic list!"))
				msgBox.connect("response", lambda a, b: msgBox.hide())
				msgBox.run()
				self.load_label.set_label(_("Loaded"))
Example #31
0
def suggestpapers(myfilename,filename,num):
##read data and find similar papers to the papers in file
##
  f2 = open(myfilename,'r+',encoding='utf-8')
  mydata = csv.reader(f2)
  mypapers=[]
  for paper in mydata:
    mypapers.append([paper[0],paper[1],paper[2],paper[3],paper[4]])
  f2.close()
  del mypapers[0]
    
  similars = []
  num = 10
  for paper in mypapers:
    f1 = open(filename,'r+',encoding='utf-8')
    rawdata = csv.reader(f1)
    similars.append([paper,Analyze.similarpapers(paper,rawdata,num)])
    f1.close()
  
  return similars
Example #32
0
	def add_song(self,song):
		try:
			aSong = Analyze.getSongByURL(song['url'])
			if not aSong: 
				return
			entry = self.db.entry_new(self.entry_type, aSong['url'])
			print song
			print aSong
			self.db.set(entry, rhythmdb.PROP_TITLE, song['name'])
			self.db.set(entry, rhythmdb.PROP_ARTIST, song['artist'])
			self.db.set(entry, rhythmdb.PROP_ALBUM, song['album'])
			self.db.set(entry, rhythmdb.PROP_DURATION, song['duration'])
			#self.db.set(entry, rhythmdb.PROP_TRACK_NUMBER, song['track_number'])
			self.db.set(entry, rhythmdb.PROP_GENRE, self.n2str(aSong['genre']))
			self.db.set(entry, rhythmdb.PROP_COMMENT, 
				'id=%s\nimage=%s\nlyrics=%s' % 
				(song['id'], self.n2str(aSong['image']), self.n2str(aSong['lyrics'])))
			#self.db.set(entry, rhythmdb.PROP_IMAGE, aSong['image'])
		except Exception, e:
			traceback.print_exc()
Example #33
0
def pltCitCollab(filename):
####read data from csv file, plot the collaboration-citation data
  f1 = open(filename,'r+',encoding='utf-8')
  rawdata = csv.reader(f1)
  count = sum(1 for row in rawdata)
  f1.close()

  print(str(count)+' papers analyzed.')

  f1 = open(filename,'r+',encoding='utf-8')
  rawdata=csv.reader(f1)
  data = Analyze.citacollab(rawdata,count)
  f1.close()

  pyp.plot(data[1],data[0],'ro')
  pyp.xlabel('Citations')
  pyp.ylabel('Number of co-authors')
  pyp.show()
  pyp.close()
  return data
Example #34
0
def pltCitClarity(filename):
##read data from csv file, plot the clarity-citations data
  f1 = open(filename,'r+',encoding='utf-8')
  rawdata = csv.reader(f1)
  count = sum(1 for row in rawdata)
  f1.close()

  print(str(count)+' papers analyzed.')
  
  f1 = open(filename,'r+',encoding='utf-8')
  rawdata=csv.reader(f1)
  data = Analyze.citaclarity(rawdata,count)
  f1.close()
  
  pyp.plot(data[0],data[1],'ro')
  pyp.xlabel('Abstract length')
  pyp.ylabel('Citations')
  pyp.show()
  pyp.close()
  return data
Example #35
0
def pltCitRate(filename):
##read data from csv file, plot the clarity-citations data
  f1 = open(filename,'r+',encoding='utf-8')
  rawdata=csv.reader(f1)
  authordict = Analyze.dictauthor(rawdata)
  f1.close()

  data = numpy.zeros((2,len(authordict)))
  i=0
  for author in authordict.keys():
      data[0,i] = authordict[author]['NumPapers']
      data[1,i] = authordict[author]['Citations']/authordict[author]['NumPapers']
      i += 1
      
  pyp.plot(data[0],data[1],'ro')
  pyp.xlabel('Number of papers published')
  pyp.ylabel('Average citations per paper')
  pyp.show()
  pyp.close()
  return data
def competitive_learning(data_set, eta, num_clusters, iterations, score_funcs):
    ''' The main competitive learning algorithm. Creates a two layer network,
        then trains the weights of the network by updating the weights of the
        node with the strongest output for each training example '''

    #Initialize variables
    num_inputs = len(
        data_set[0])  # Number of inputs is equal to the number of features
    weight_layer = Layer.Layer(num_inputs, num_clusters, eta)
    results = []

    for iteration in range(iterations):

        #Train the network, score the resulting clustering, append the score
        #to the list of scores, and move on to next iteration
        weight_layer = _train_network(data_set, weight_layer, num_clusters)
        clustering = _cluster(data_set, weight_layer)
        result = Analyze.analyze_clusters(clustering, score_funcs)
        results.append(result)

    return results
def gen_report_individual(individual,dic_notas,dic_horarios):
    dataframe=an.create_dataframe(individual)
    print(dataframe)
    score=schdl_value(individual,dic_notas,dic_horarios)
    print("VALOR: "+str(score))
    genotype=Crossover.create_genotype(individual)
    materias=[]
    for gen in genotype:
        info=gen.split("-")
        materia=info[0]
        if materia not in materias:
            paralelo=info[2]
            profesor=dic_horarios[materia][str(paralelo)]["Profesor"]
            rank=get_rank(profesor,materia,dic_notas)
            nota=np.mean(np.array(dic_notas[materia][profesor]))
            print("MATERIA: "+materia)
            print("PROFESOR: "+profesor)
            print("RANGO: "+str(rank))
            print("NOTA: "+str(nota))
            print("▀"*30)    
            materias.append(materia)           
Example #38
0
def runTraining(folderName):
    trainingFiles = []  #Liste des fichiers d'entrainement
    trainingData = []  #données étudiées
    pos = 0
    neg = 0

    for dir in os.walk(folderName):
        dir = dir[1]  #get directory list
        for i, f in enumerate(dir):
            actualFolder = os.path.join(folderName, f)
            for files in os.walk(
                    actualFolder):  #Parcourir liste dossier dans training
                trainingFiles.append(files[2])
                for j, tf in enumerate(
                        files[2]):  #Parcourir liste des fichiers dans *star
                    tempP, tempN = (an.analyzeFile(
                        os.path.join(actualFolder, tf)))
                    pos = (pos + tempP) / 2
                    neg = (neg + tempN) / 2
                trainingData.append([pos, neg])
    return trainingData
Example #39
0
def main():
  if len(sys.argv)==1:
    print '\n/-----\\\n|ERROR|===>>> Try inputs of the form python Analyze.py foo.p where foo.p is a pickle... not literally.\n\-----/\n'
    sys.exit(0)
  picklename=sys.argv[1]
  SET=[]
  x=np.array([0.0000000020128,0.000000002108307,0.000000002205033,0.000000002304373,0.000000002404483,0.000000002506767,0.000000002609444,0.000000002714023,0.000000002819564,0.000000002925106,0.000000003032267,0.000000003139197,0.000000003247665,0.000000003356696,0.000000003465344,0.00000000357534,0.000000003684857,0.000000003795656,0.000000003905897,0.000000004017367,0.000000004129093,0.000000004240215,0.000000004352506,0.000000004464105])
  x=[i*10**9 for i in x]
  basename='./20130506_06/A'
  filenumbers=range(101)[40:71]#[6:20]
  filenumbers=[('%03d'%filenumber) for filenumber in filenumbers]
  filenames=[basename+filenumber+'.dat' for filenumber in filenumbers]
  #GET RID OF THIS NEXT!
  #filenames=filenames[12:15]
  for filename in filenames: print filename
  print ''
  for filename in filenames:
    
    success=False
    while success==False:
      TR=Analyze.Analyze(filename)
      succ=False
      while succ==False:
        IN=raw_input("y to accept, or 'skip' or 'repeat': ")
        if IN=='y':
          succ=True
          SET.append(TR)
          success=True
        elif IN=='skip':
          succ=True
          success=True
        elif IN=='repeat':
          succ=True
        elif IN!='':
          print "What!?"
        
  pickle.dump(SET,open( picklename, 'w'))
  
  pb.show()
def doubleresults(request):
    global arr, score, emoarr, cwords, arr2, p, cmpd
    form = DocumentForm(request.POST, request.FILES)
    if form.is_valid():
        newdoc = Document(docfile=request.FILES['docfile'])
        newdoc.save()
        parser = TextParsing.TextParsing(request.FILES['docfile'].name)
        analyzer = Analyze.Analyze()
        analyzer.popDialogEmotion(parser)
        analyzer.setDialogSentiment(parser)
        anger = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                       "anger")
        anticipation = analyzer.plotlyEmotion(parser,
                                              parser.speakerDict.keys(),
                                              "anticipation")
        disgust = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                         "disgust")
        fear = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                      "fear")
        joy = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(), "joy")
        sadness = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                         "sadness")
        trust = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                       "trust")
        arr1 = [anger, anticipation, disgust, fear, joy, sadness, trust]
        cmpd1 = analyzer.plotlyCompoundSenti(parser)
    return render(
        request, 'appTemps/doubleresults.html', {
            'arr': arr,
            'score': score,
            'emoarr': emoarr,
            'cwords': cwords,
            'arr2': arr2,
            "person": p,
            'arr1': arr1,
            'cmpd1': cmpd1,
            'cmpd': cmpd
        })
def alternativeAbundance(matchf, pds, topn = 50):
    MASTER = '/home/anthill/fzheng/home/scripts/termanal_updating'

    # generate the original structures of topn hits
    # pds = General.changeExt(pdb, 'pds')
    # cmd = [MASTER + '/createPDS', '--type', 'query', '--pdb', pdb, '--pds', pds]
    # cmd = ' '.join(cmd)
    # os.system(cmd)
    cmd = [MASTER + '/master', '--query', pds, '--matchIn', matchf, '--structOut', General.getBase(pds) + 'tmp', '--outType', 'match', '--bbRMSD', '--topN', str(topn)]
    cmd = ' '.join(cmd)
    os.system(cmd)

    # for these N structures, calculate RMSD between any two. should be O(N^2)
    odir = os.getcwd()
    ndir = General.getBase(pds) + 'tmp'
    os.chdir(ndir)
    mpdbs = glob.glob('*.pdb')
    mpdbs.sort()
    RMSDs = []
    print 'calculating pairwise RMSD'
    for i in range(len(mpdbs)-1):
        for j in range(i+1, len(mpdbs)):
            mol1, mol2 = parsePDB(mpdbs[i]), parsePDB(mpdbs[j])
            bbAtoms1, bbAtoms2 = mol1.select('backbone').copy(), mol2.select('backbone').copy()
            trans = calcTransformation(bbAtoms2, bbAtoms1)
            bbAtoms2_t = applyTransformation(trans, bbAtoms2)
            rmsd = calcRMSD(bbAtoms1, bbAtoms2_t)
            RMSDs.append(round(rmsd, 3))
    print 'finish calculating RMSD'
    os.chdir(odir)
    # now calculate the average Z-score of all the rmsds of the query
    RMSDs = np.array(RMSDs)
    qRMSD = Analyze.readColumn(matchf, 0, top = topn)
    qRMSD = np.array([float(x) for x in qRMSD])
    meanRMSD, stdRMSD = np.mean(RMSDs), np.std(RMSDs)
    Z_qRMSD = (qRMSD - meanRMSD) / stdRMSD
    return round(np.median(Z_qRMSD), 3)
if args.conres != None:
	tempfile2 = General.changeExt(args.m, 'seqcontext.fasta2')
	tempfh2 = open(ldir + '/' + tempfile2, 'w')

# output file names
nr_matchf = args.outh + '_' + args.m
nr_seqf = General.changeExt(nr_matchf, 'seq')
nr_env = None
oenv = General.changeExt(args.m, args.env)
if os.path.isfile(oenv):
	nr_env = General.changeExt(nr_matchf, args.env)


# write a custom .fasta file
for match in matches:
	match_region_indices = Analyze.index_from_match(match)
	central_index = match_region_indices[args.cres - 1]

	match_id = General.getBase( General.removePath( match.split()[1] ) )
	fullsequence = database[match_id]
	if central_index - args.wd < 1:
		seqcontext = fullsequence[0:(2 * args.wd + 1)]
	elif central_index + args.wd > len(fullsequence):
		seqcontext = fullsequence[-(2 * args.wd + 1):]
	else:
		seqcontext = fullsequence[(central_index - args.wd - 1):(central_index + args.wd)]
	tempfh.write('>match:'+str(matchind)+'\n'+seqcontext+'\n')

	if args.conres != None:
		con_index = match_region_indices[args.conres -1]
		if con_index - args.wd < 1:
Example #43
0
        if homol.split()[0] == sid:
            exclude = homol.split()[1:]
            break
            
# do it on local directories
odir = os.getcwd()
ldir = General.createLocalSpace()
# copy .match and .seq
sub.call(['cp', matchf, seqf, ldir])
os.chdir(ldir) 

tmphead = args.head

if args.rmsd != None:
    if noHomoNow == 0:
        tmphead = Analyze.trimByRMSD(matchf, -1, tmphead, 'th1', args.rmsd, sorted = False, h**o = exclude, homo_log = True)
        noHomoNow = 1
    else:
        tmphead = Analyze.trimByRMSD(matchf, -1, tmphead, 'th1', args.rmsd, sorted = False)
if args.smart:
    if len(segments) == 1:
        smartcut = mustpress.rmsdEff(segments, 20, 0.9)
    else:
        if max(resnums) - min(resnums) < 8: # this is considered close, even there is gap between two segments
            smartcut = mustpress.rmsdEff(segments, 20, 0.9) - 0.2 # this is about 0.5
        else:
            smartcut = mustpress.rmsdEff(segments, 15, 1.1) # this is about 0.9
    if noHomoNow == 0:
        tmphead = Analyze.trimByRMSD(matchf.replace(args.head, tmphead), 0, tmphead, 'ts', smartcut, h**o = exclude, homo_log = True)
        noHomoNow = 1
    else:
Example #44
0
 def StartProcess(self):
     name = self.__class__.__name__
     if (len(self._properties)>0):
         sargs = ["%s = %s" %(str(x),self._properties[x]) for x in sorted(self._properties.keys())]
         name = "%s [%s]" % (name, string.join(sargs,", "))
     Analyze.StartProcess(name)
for seqf in seqfs:
	pdbf = General.changeExt( seqf.replace(args.head + '_', ''), 'pdb')
	if not os.path.isfile(pdbf):
		print(pdbf + ' doesn\'t exist!')
		continue

	outf = General.changeExt(pdbf, args.o)

	if args.wgap != None: # specific to gap
		assert args.conR == False, 'wgap and conR cannot be specified simultaneously'
		dirname = General.getBase(pdbf)
		pdbf = args.wgap + '/' + dirname + '/'+ pdbf

	index = PDB.findPositionInPDB(pdbf, resnum, cid)
	aacol = Analyze.readColumn(seqf, index, top = args.uplimit)

	if args.conR: # should contacting residue be constrained?
		conid = General.getBase(seqf).split('_')[-1]
		ccid, cresnum = conid[0], conid[1:]
		cindex = PDB.findPositionInPDB(pdbf, cresnum, ccid)
		cres = PDB.getResByInd(pdbf, ccid, cresnum).getResname()
		cres = PDB.t2s(cres)
		caacol = Analyze.readColumn(seqf, cindex, top = args.uplimit)

	if args.env != None: # environment corrected counts
		envf = General.getBase(seqf.replace(args.head, args.envhead)) + '.' + args.env
		if not os.path.isfile(envf):
			print(envf + ' doesn\'t exist!')
			continue
import glob
import Analyze

dscdat = glob.glob('*.dsc50.TR*.dat')
dscdat.sort()

oldpath = '/home/anthill/fzheng/home/designScore/allfeatures_individual/'
for d in dscdat:
	modelname = d.split('.')[-2]
	oldfile = oldpath + 'allfeatures.' + modelname + '.dat'
	newdsc = Analyze.readColumn(d, -1)
	ad = d.replace('dsc', 'abd')
	sd = d.replace('dsc', 'ssc')
	newabd = Analyze.readColumn(ad, -1)
	newssc = Analyze.readColumn(sd, -1)
	newdsc.insert(0, 'new_designscore')
	newabd.insert(0, 'new_abundance')
	newssc.insert(0, 'new_structurescore')
	newfile = modelname + '.dat'
	newfh = open(newfile, 'w')
	array = open(oldfile).readlines()
	for i in range(len(array)):
		outstr = '\t'.join(array[i].split()[0:3] + [newdsc[i], newabd[i], newssc[i]])
#		outstr = '\t'.join(array[i].split()[0:3] + [newdsc[i]])
		newfh.write(outstr + '\n')
Example #47
0
def run():
    try:
        while True:
            # checks if there are any new messages
            if mess_util.list_after(
                    Main.NEWEST_MESSAGE_ANALYZED_ID) is not None:
                current_message = mess_util.list()[0]  # newest message
                text = current_message.text.lower()
                name = current_message.name
                at_bot = '@' + Main.BOT.name
                # checks if the bot is mentioned
                if at_bot.lower() in text:
                    # print(text)
                    Analyze.analyze_message(text, name)  # analyzes the message
                Main.NEWEST_MESSAGE_ANALYZED_ID = current_message.id  # sets the new analyzed id
            # if it is time to check for the day's events (8:30 by default) or
            # if the bot just started
            if not Main.checked_events and datetime.datetime.now(
            ) > CHECK_TIME and datetime.datetime.now() < CHECK_TIME_END:
                Functions.check_date()  # checks if any events have passed
    # enters terminal command mode
    except KeyboardInterrupt:
        Log.log_debug(str(datetime.datetime.now()) + " >> KeyboardInterrupt")
        command = str(input("Would you like to do?\n")).lower()
        # reads the chats that have not been read yet
        if command == 'read':
            Log.log_debug(str(datetime.datetime.now()) + " >> Messages Read")
            if mess_util.list_after(Main.NEWEST_MESSAGE_READ_ID) is not None:
                for m in mess_util.list_after(Main.NEWEST_MESSAGE_READ_ID):
                    print(m.name + ': ' + m.text)
                Main.NEWEST_MESSAGE_READ_ID = mess_util.list()[0].id
        # post into the group as the bot
        elif command == 'post':
            message = str(
                input(
                    'What would you like to say? (type "cancel" to cancel message)\n'
                ))
            if message.lower() == 'cancel':
                run()
            Log.log_debug(
                str(datetime.datetime.now()) + " >> Manual Posting: " +
                message)
            Functions.post_message(message)
        # get help information
        elif command == 'create event':
            date = str(input('Enter the date of the event\n')).split('/')
            name = str(input('Enter the name of the event\n'))
            desc = str(input('Enter the description of the event\n'))
            if len(date[2]) != 4:
                date[2] = '20' + date[2]
            response = Functions.create_event(name, int(date[2]), int(date[1]),
                                              int(date[0]), desc)
            print(response)
        elif command == 'list events':
            response = Functions.list_events()
            print(response)
        elif command == 'delete event':
            name = str(input('Enter the name of the event\n'))
            Functions.delete_event(name)
            print("Event Deleted")
        elif command == 'create reminder':
            day = str(input('Enter the day for the reminder\n'))
            id = str(input('Enter the name of the reminder\n'))
            desc = str(input('Enter the description of the event\n'))
            response = Functions.create_reminder(day, id, desc)
            print(response)
        elif command == 'list reminders':
            response = Functions.list_reminders()
            print(response)
        elif command == 'delete reminder':
            name = str(input('Enter the name of the reminder\n'))
            Functions.delete_reminder(name)
        elif command == 'help':
            Log.log_debug(str(datetime.datetime.now()) + " >> Help")
            print("Possible Commands:")
            for command in console_commands:
                print("\t" + command)
        elif command == 'info':
            Log.log_debug(str(datetime.datetime.now()) + " >> Info")
            command = str(input("What command would you like info about?\n"))
            print(Info.get_info(command))
        # shutdown the system
        elif command == 'shutdown':
            Log.log_debug(str(datetime.datetime.now()) + " >> System Shutdown")
            exit()
        # cancel command mode
        elif command == 'cancel':
            pass
        Log.log_debug(
            str(datetime.datetime.now()) + " >> Continuing operation")
        run()
Example #48
0
par.add_argument('--o', required = True, help = 'name of output file')
par.add_argument('--uplimit', type = float, help = 'top n sequences to calculate')
args = par.parse_args()

database_path = '/home/anthill/fzheng/home/searchDB/support_bc-30-sc-correct-20141022/others'
odir = os.getcwd()
ldir = General.createLocalSpace()
outfh = open(ldir + '/' + args.o, 'w')

uplimit = args.uplimit
nseq = 0
for match_line in open(args.m):
    if (uplimit != None) and (nseq == uplimit):
        break
    match_line = match_line.strip()
    indices = Analyze.index_from_match(match_line)
    index1, index2 = indices[args.n[0]], indices[args.n[1]]

    target_pds = match_line.split()[1]
    targetid = General.getBase( General.removePath(match_line.split()[1]) )
    env_dict = database_path + '/' + targetid[1:3] + '/' + targetid + '.freedom.db'
    db = shelve.open(env_dict, 'r')

    # extract post-processed pdb files from target_pds
    resfile = database_path + '/' + targetid[1:3] + '/' + General.changeExt( General.removePath(target_pds), 'post.res')
    allres = open(resfile).read().splitlines()
    resid1, resid2 = allres[index1], allres[index2]
    resid1, resid2 = resid1[0] + ',' + resid1[1:], resid2[0] + ',' + resid2[1:]
    fields = ['sumcond', 'crwdnes', 'freedom', 'phi', 'psi', 'aa']
    outfh.write(targetid + '\t')
    if not resid1 in db:
Example #49
0
def getauthor(filename,rawlabel):
#read file and find the details of the author
  authordict = getauthordict('authordict.csv')
  author = Analyze.idauthor(authordict,rawlabel)

  return author,authordict[author]
Example #50
0
def cleanfile(infilename,outfilename):
#read file and find the research keywords of the given author
  f1 = open(infilename,'r+',encoding='utf-8')
  rawdata=csv.reader(f1)
  Analyze.cleanfile(rawdata,outfilename)
  f1.close()
    def test(self, widget):
        if not self.project: return

        marker = self.project.timeline.ruler.get_playback_marker()
        if not marker: return

        start, duration = marker
        power = numpy.array(
            self.project.appsinkpipeline.get_data(start,
                                                  start + duration))**2.0
        rate = self.project.appsinkpipeline.caps[0]["rate"]

        delta_t = self.builder.get_object("delta_t").get_value()
        decay = self.builder.get_object("decay").get_value()
        separation = self.builder.get_object("beat_separation").get_value()
        #		delta_t = 0.01
        #		decay = 0.5 # time needed to get to 1/e
        # k*decay = 1
        # power(t) = exp(-1)*power(t-decay)
        # power(t) = exp(-k*decay)*power(t-decay)
        # power(t) = exp(-k*delta_t)*power(t-delta_t)
        decay_per_chunk = numpy.exp(-delta_t / decay)
        samples = int(rate * delta_t)

        limit = numpy.average(power[0:samples])
        t = []
        level = []
        lim = []
        tp1 = []

        w = Analyze.Analyze()

        for i in xrange(1, int(len(power) / samples)):
            limit *= decay_per_chunk
            chunk = power[samples * i:samples * (i + 1)]
            avg_power = numpy.average(chunk)
            power_spectrum = Math.windowed_fft(chunk)
            bands = len(power_spectrum)
            frqs = 0.5 * (numpy.arange(bands) + 0.5) * rate / bands
            time = delta_t * i + start
            min_frq_idx = numpy.min(numpy.nonzero(frqs > 80.))
            max_frq_idx = numpy.max(numpy.nonzero(frqs < 1000.))
            min_frq = frqs[min_frq_idx]
            max_frq = frqs[max_frq_idx]
            print frqs[0], min_frq, max_frq, frqs[-1]
            total_power1 = numpy.trapz(power_spectrum[min_frq_idx:max_frq_idx],
                                       frqs[min_frq_idx:max_frq_idx])
            tp1.append(total_power1)

            #			if avg_power>=limit*(1.0+separation):
            #			if avg_power>=limit+separation:
            #				w.add_line(time, color="g")
            if avg_power >= limit:
                limit = avg_power

            t.append(time)
            level.append(avg_power)
            lim.append(limit)

        w.show_all()
        w.simple_plot(numpy.array(t), numpy.array(level), color="r")
        w.simple_plot(numpy.array(t), numpy.array(tp1), color="b")
        w.simple_plot(numpy.array(t), numpy.array(lim), color="g")

        # markers
        for tap in self.project.timeline.rhythm.taps:
            time = tap.get_time()
            if not start < time and time < start + duration: continue

            if type(tap.weight) == float: pass
            else:
                w.add_line(time, color="r")
# run confind for all structures
for pdb in pdbs:
    pdbf = args.i + '/' + pdb + '.pdb'
    assert os.path.isfile(
        pdbf), 'the pdb file ' + pdbf + ' does not exist; quit...'
    confind_out = changeExt(pdbf, 'conf')
    if os.path.isfile(confind_out):
        continue
    else:  # run confind
        Master.confind(p=pdbf, o=confind_out, rLib=PATH_rotLib)

# find homologs
H**o = []
if args.homof != None:
    H**o = Analyze.findHomo(args.homof)

# contact identification
pos2cons = {}
pos2pdb = {}

odir = os.getcwd()
for pos in positions:
    os.chdir(odir)
    if os.path.isdir(pos):
        os.system('rm -rf %s/*' % pos)
    else:
        os.mkdir(pos)
    os.chdir(pos)
    pid, ipos = pos.split('_')
    icid, iresnum = ipos[0], ''.join(ipos[1:])
Example #53
0
        if y < 8:
            A.append(round(a / gamesA, 4))
            B.append(round(b / gamesB, 4))
            Change.append(round((b / gamesB) - (a / gamesA), 4))
        else:
            A.append(round(a, 4))
            B.append(round(b, 4))
            Change.append(round(c1, 4))

    PlayerData.append(A)
    PlayerData.append(Change)
    PlayerData.append(B)
    DataList.append(PlayerData)

# Access the SignificanceArray by calling function from Analyze.py
SignificanceArray = Analyze.getSignificance()

success = []

# Calculate the projected success for each player
for player in DataList:
    playerSuccess = 0

    # Iterate through each statistic for each year
    for year in range(0, 3):
        for stat in range(0, 12):

            # Project player success by multiplying player statistic by the calculated significance of that statistic
            playerSuccess += player[year +
                                    2][stat] * SignificanceArray[year * 12 +
                                                                 stat][2]
Example #54
0
    print "   d : display internal data structures for debugging"
    print
    print "Entering interactive mode. (Press Enter to quit.)"
    interactive = True

    import os
    cwd = os.getcwd()  # get current working dir
    if os.path.basename(cwd) == "Checker" : # is cwd the folder where this pgm lives?
        # then, move up one level; don't want to deposit junk here!
        os.chdir(os.pardir)
    print "Current path is ", os.getcwd()

    filename = raw_input("Type filename (relative to current path) to check: ")
    options = raw_input("Type options (any or none of  vaxnd): ")
    while filename != "" :
        result = Analyze.main(filename, options)
        if result and html :
            generateHtml(filename)
        print
        repeat = raw_input("Press  Enter  to repeat with this file; Press  q  to quit: ")
        if repeat == "q" : filename = ""

else :  # it's a command line startup with all info supplied
    if sys.argv[1][0] == "-" :
        filename = sys.argv[2]
        if "n" in sys.argv[1] :  html = False
        result = Analyze.main(filename, sys.argv[1])
    else :
        filename = sys.argv[1]
        result = Analyze.main(filename)