Пример #1
0
def insert_data_markov(sentence_id,sentence, channel_id):
    # データベースInstanceの作成
    psgr = Psgr()
    try:
        #トランザクション処理開始
        psgr.begin()

        # messageの形態素解析
        analyze = Analyze()
        # 分かち書きした文章を取得する
        # 吾輩 は 猫 で ある
        parse_data = analyze.parse_wakati_sentence(sentence)

        w1 = ''
        w2 = ''
        for data in parse_data:
            if data[0] == '':
                continue
            # 登録用データ↓↓
            word = data[0]
            if w1 and w2:
                values = (sentence_id, w1, w2, word, channel_id)
                sqlcom = "INSERT INTO markov_chain (sentence_id, word1, word2, word3, channel_id) VALUES (%s,%s,%s,%s,%s)"
                psgr.execute(sqlcom,values)
            w1, w2 = w2, word

        psgr.commit()
        del analyze
    except Exception as e:
        print (e)
        psgr.rollback()

    del psgr
Пример #2
0
def results(request):
    # Handle file upload
    if request.method == 'POST':
        print "Results being called"
        form = DocumentForm(request.POST, request.FILES)
        if form.is_valid():
            newdoc = Document(docfile = request.FILES['docfile'])
            newdoc.save()
            parser = TextParsing.TextParsing(request.FILES['docfile'].name)
            analyzer = Analyze.Analyze()
            arr = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(), "trust")
            score = analyzer.getAverageConversationScores(parser)
            convo = analyzer.getConversationScore(parser)["trust"]
            # labels = ['compound', 'neg', 'neu', 'pos']
            # ind = np.arange(4)
            # width = .5
            # plt.bar(ind, score, width)
            # plt.ylabel('Normalized Score')
            # plt.xticks(ind,labels)
            # fig, ax = plt.subplots()
            # plot = ax.bar(ind, score, width)
            # plt.savefig('ConversationAnalysis/media/graph.png')
            # Redirect to the document list after POST
            # return HttpResponseRedirect('MainPage.views.main')
    else:
        form = DocumentForm() # A empty, unbound form

    return render(request, 'appTemps/results.html', {'arr': arr, 'score':score, 'convo':convo})
def results(request):
    # Handle file upload
    global arr, score, emoarr, cwords, arr2, p, cmpd
    if request.method == 'POST':
        # print "Results being called"
        form = DocumentForm(request.POST, request.FILES)
        if form.is_valid():
            newdoc = Document(docfile=request.FILES['docfile'])
            newdoc.save()
            parser = TextParsing.TextParsing(request.FILES['docfile'].name)
            analyzer = Analyze.Analyze()
            analyzer.popDialogEmotion(parser)
            analyzer.setDialogSentiment(parser)
            p = json.dumps(analyzer.getPersonData(parser),
                           separators=(',', ':'))
            arr2 = parser.plotlyBarFreqDist("everyone")
            cmpd = analyzer.plotlyCompoundSenti(parser)
            score = analyzer.getAverageConversationScores(parser)
            emo1 = analyzer.getConversationScore(parser)["anger"]
            emo2 = analyzer.getConversationScore(parser)["anticipation"]
            emo3 = analyzer.getConversationScore(parser)["disgust"]
            emo4 = analyzer.getConversationScore(parser)["fear"]
            emo5 = analyzer.getConversationScore(parser)["joy"]
            emo6 = analyzer.getConversationScore(parser)["sadness"]
            emo7 = analyzer.getConversationScore(parser)["surprise"]
            emo8 = analyzer.getConversationScore(parser)["trust"]
            emoarr = [emo1, emo2, emo3, emo4, emo5, emo6, emo7, emo8]
            cwords = parser.getNCommonWords(50)
            # print freqdist
            anger = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                           "anger")
            anticipation = analyzer.plotlyEmotion(parser,
                                                  parser.speakerDict.keys(),
                                                  "anticipation")
            disgust = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                             "disgust")
            fear = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                          "fear")
            joy = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                         "joy")
            sadness = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                             "sadness")
            trust = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                           "trust")
            arr = [anger, anticipation, disgust, fear, joy, sadness, trust]

    else:
        form = DocumentForm()  # A empty, unbound form

    return render(
        request, 'appTemps/results.html', {
            'arr': arr,
            'score': score,
            'emoarr': emoarr,
            'cwords': cwords,
            'arr2': arr2,
            "person": p,
            "form": form,
            "documents": documents
        })
Пример #4
0
 def analyze(self):
     global rawData
     global rawTime
     global pic
     global scan
     rawData, rawTime, pic, scan = Analyze.Analyze(scan, protein, nd2_file,
                                                   Roi_Data_file,
                                                   status_file, time_file,
                                                   n)
     self.analyze_btn.setText('Analyze scan:%d' % scan)
     scan += 1
    def open_plot(self, widget):
        if not self.project: return

        marker = self.project.timeline.ruler.get_playback_marker()
        if not marker: return

        start, duration = marker

        frq, power = self.project.appsinkpipeline.get_spectrum(
            start, start + duration)

        w = Analyze.Analyze()
        w.show_all()
        w.plot_spectrum(frq, power)
    def vis_plot_evolution(self, viscontrol, semitone):
        if not self.project: return

        playback_marker = self.project.timeline.ruler.get_playback_marker()
        if not playback_marker: return

        import scipy.interpolate

        start, duration = playback_marker

        ##########
        dialog = gtk.Dialog(
            title="interval",
            flags=gtk.DIALOG_DESTROY_WITH_PARENT | gtk.DIALOG_MODAL,
            buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, gtk.STOCK_OK,
                     gtk.RESPONSE_ACCEPT))
        entry = gtk.Entry()
        entry.set_text("0.1")
        dialog.vbox.add(entry)
        dialog.show_all()
        dialog.run()
        interval = float(entry.get_text())
        dialog.destroy()
        #############

        #		interval = 0.1
        delta = interval / 2.
        steps = 1.0 * duration / delta

        x = []
        y = []
        for step in xrange(int(steps)):
            pos = start + step * delta
            frq, power = self.project.appsinkpipeline.get_spectrum(
                pos - interval / 2., pos + interval / 2.)
            #			frq, power = self.project.appsinkpipeline.get_spectrum(pos,pos+interval)

            spline = scipy.interpolate.InterpolatedUnivariateSpline(
                frq, power, None, [None, None], 1)

            lower = Math.semitone_to_frequency(semitone - 0.5)
            upper = Math.semitone_to_frequency(semitone + 0.5)
            total = spline.integral(lower, upper)

            x.append(pos)
            y.append(total)

        w = Analyze.Analyze()
        w.show_all()
        w.simple_plot(x, y)
Пример #7
0
 def measure_start(self):
     if self.adjust == 1:  #Adjust gain?
         self.meas_adjust()
     else:
         self.date = ephem.now().tuple()  #Date for FITS-file
         self.receiver.start()
         self.sig_time = 0
         self.ref_time = 0
         self.totpowTime = 0
         self.config.set('CTRL', 'abort', '0')
         self.config.set('CTRL', 'state', 'integrating')
         with open(self.configfil, 'wb') as configfile:
             self.config.write(configfile)
         index = 0
         start = time.time()
         while index < self.loops:
             self.set_index(index)
             if self.switched == 1:
                 self.measure_switch_in()
             else:
                 self.loops = 1
                 self.measure_tot_pow()
                 self.counter = 0
                 self.sigCount = 0
                 self.refCount = 0
             if int(self.config.get('CTRL', 'abort')) != 1:
                 tc = Analyze(self.sigCount, self.refCount, index,
                              self.fftSize, self.c_freq, self.samp_rate,
                              self.switched, self.user)
             index += 1
         stop = time.time()
         print "Total time: "
         print stop - start
         edit = 0
         if int(self.config.get('CTRL', 'abort')) != 1:
             td = Finalize(index, self.fftSize, self.c_freq, self.samp_rate,
                           edit, self.sig_time, self.ref_time,
                           self.switched, self.totpowTime, self.user,
                           self.date)
         self.receiver.stop()
         self.receiver.wait()
         files = glob.glob('/tmp/ramdisk/*')
         for f in files:
             if f.endswith(self.index):
                 os.remove(f)
             else:
                 continue
         self.config.set('CTRL', 'state', 'ready')
         with open(self.configfil, 'wb') as configfile:
             self.config.write(configfile)
Пример #8
0
def analyze_message(sentence, sentence_id,channel_id):
    psgr = Psgr()
    cur = psgr.getCur()

    # messageの解析
    try:
        psgr.begin()

        analyze = Analyze()
        parse_data = analyze.parse_sentence(sentence)
        for data in parse_data:
            detail_array = ['*'] * 9
            detail_array[:len(data[1].split(','))] = data[1].split(',')

            # 登録用データ↓↓
            # word = data[0] # Surfaceデータが取れないので。
            word = detail_array[6]
            part_of_speech = detail_array[0]
            if part_of_speech == "BOS/EOS":
                continue

            part_of_speech_detail1 = detail_array[1]
            part_of_speech_detail2 = detail_array[2]
            part_of_speech_detail3 = detail_array[3]
            conjugate1 = detail_array[4]
            conjugate2 = detail_array[5]
            original = detail_array[6]
            pronunciation1 = detail_array[7]
            pronunciation2 = detail_array[8]

            # 単語の登録
            values = (word,part_of_speech,part_of_speech_detail1,part_of_speech_detail2,part_of_speech_detail3,conjugate1,conjugate2,original,pronunciation1,pronunciation2)
            sqlcom = "INSERT INTO words (word,part_of_speech,part_of_speech_detail1,part_of_speech_detail2,part_of_speech_detail3,conjugate1,conjugate2,original,pronunciation1,pronunciation2) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) RETURNING word_id;"
            psgr.execute(sqlcom,values)
            word_id = psgr.lastrowid()

            # 単語と文章を繋ぐ
            values = (sentence_id,word_id,channel_id)
            sqlcom = "INSERT INTO sentence_word (sentence_id, word_id, channel_id) VALUES (%s,%s,%s) RETURNING sentence_word_id;"
            psgr.execute(sqlcom,values)

    except Exception as err:
        print(err)
        psgr.rollback()

    # コミット
    psgr.dbCommit()
    del psgr
Пример #9
0
def main():
  if len(sys.argv)==1:
    print '\n/-----\\\n|ERROR|===>>> Try inputs of the form python Analyze.py foo.p where foo.p is a pickle... not literally.\n\-----/\n'
    sys.exit(0)
  picklename=sys.argv[1]
  SET=[]
  x=np.array([0.0000000020128,0.000000002108307,0.000000002205033,0.000000002304373,0.000000002404483,0.000000002506767,0.000000002609444,0.000000002714023,0.000000002819564,0.000000002925106,0.000000003032267,0.000000003139197,0.000000003247665,0.000000003356696,0.000000003465344,0.00000000357534,0.000000003684857,0.000000003795656,0.000000003905897,0.000000004017367,0.000000004129093,0.000000004240215,0.000000004352506,0.000000004464105])
  x=[i*10**9 for i in x]
  basename='./20130506_06/A'
  filenumbers=range(101)[40:71]#[6:20]
  filenumbers=[('%03d'%filenumber) for filenumber in filenumbers]
  filenames=[basename+filenumber+'.dat' for filenumber in filenumbers]
  #GET RID OF THIS NEXT!
  #filenames=filenames[12:15]
  for filename in filenames: print filename
  print ''
  for filename in filenames:
    
    success=False
    while success==False:
      TR=Analyze.Analyze(filename)
      succ=False
      while succ==False:
        IN=raw_input("y to accept, or 'skip' or 'repeat': ")
        if IN=='y':
          succ=True
          SET.append(TR)
          success=True
        elif IN=='skip':
          succ=True
          success=True
        elif IN=='repeat':
          succ=True
        elif IN!='':
          print "What!?"
        
  pickle.dump(SET,open( picklename, 'w'))
  
  pb.show()
def doubleresults(request):
    global arr, score, emoarr, cwords, arr2, p, cmpd
    form = DocumentForm(request.POST, request.FILES)
    if form.is_valid():
        newdoc = Document(docfile=request.FILES['docfile'])
        newdoc.save()
        parser = TextParsing.TextParsing(request.FILES['docfile'].name)
        analyzer = Analyze.Analyze()
        analyzer.popDialogEmotion(parser)
        analyzer.setDialogSentiment(parser)
        anger = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                       "anger")
        anticipation = analyzer.plotlyEmotion(parser,
                                              parser.speakerDict.keys(),
                                              "anticipation")
        disgust = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                         "disgust")
        fear = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                      "fear")
        joy = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(), "joy")
        sadness = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                         "sadness")
        trust = analyzer.plotlyEmotion(parser, parser.speakerDict.keys(),
                                       "trust")
        arr1 = [anger, anticipation, disgust, fear, joy, sadness, trust]
        cmpd1 = analyzer.plotlyCompoundSenti(parser)
    return render(
        request, 'appTemps/doubleresults.html', {
            'arr': arr,
            'score': score,
            'emoarr': emoarr,
            'cwords': cwords,
            'arr2': arr2,
            "person": p,
            'arr1': arr1,
            'cmpd1': cmpd1,
            'cmpd': cmpd
        })
    def test(self, widget):
        if not self.project: return

        marker = self.project.timeline.ruler.get_playback_marker()
        if not marker: return

        start, duration = marker
        power = numpy.array(
            self.project.appsinkpipeline.get_data(start,
                                                  start + duration))**2.0
        rate = self.project.appsinkpipeline.caps[0]["rate"]

        delta_t = self.builder.get_object("delta_t").get_value()
        decay = self.builder.get_object("decay").get_value()
        separation = self.builder.get_object("beat_separation").get_value()
        #		delta_t = 0.01
        #		decay = 0.5 # time needed to get to 1/e
        # k*decay = 1
        # power(t) = exp(-1)*power(t-decay)
        # power(t) = exp(-k*decay)*power(t-decay)
        # power(t) = exp(-k*delta_t)*power(t-delta_t)
        decay_per_chunk = numpy.exp(-delta_t / decay)
        samples = int(rate * delta_t)

        limit = numpy.average(power[0:samples])
        t = []
        level = []
        lim = []
        tp1 = []

        w = Analyze.Analyze()

        for i in xrange(1, int(len(power) / samples)):
            limit *= decay_per_chunk
            chunk = power[samples * i:samples * (i + 1)]
            avg_power = numpy.average(chunk)
            power_spectrum = Math.windowed_fft(chunk)
            bands = len(power_spectrum)
            frqs = 0.5 * (numpy.arange(bands) + 0.5) * rate / bands
            time = delta_t * i + start
            min_frq_idx = numpy.min(numpy.nonzero(frqs > 80.))
            max_frq_idx = numpy.max(numpy.nonzero(frqs < 1000.))
            min_frq = frqs[min_frq_idx]
            max_frq = frqs[max_frq_idx]
            print frqs[0], min_frq, max_frq, frqs[-1]
            total_power1 = numpy.trapz(power_spectrum[min_frq_idx:max_frq_idx],
                                       frqs[min_frq_idx:max_frq_idx])
            tp1.append(total_power1)

            #			if avg_power>=limit*(1.0+separation):
            #			if avg_power>=limit+separation:
            #				w.add_line(time, color="g")
            if avg_power >= limit:
                limit = avg_power

            t.append(time)
            level.append(avg_power)
            lim.append(limit)

        w.show_all()
        w.simple_plot(numpy.array(t), numpy.array(level), color="r")
        w.simple_plot(numpy.array(t), numpy.array(tp1), color="b")
        w.simple_plot(numpy.array(t), numpy.array(lim), color="g")

        # markers
        for tap in self.project.timeline.rhythm.taps:
            time = tap.get_time()
            if not start < time and time < start + duration: continue

            if type(tap.weight) == float: pass
            else:
                w.add_line(time, color="r")
Пример #12
0
import Cut
import brightness1
import Analyze
import Action
import BrightAnallyze
from PIL import Image
from PIL import ImageStat
P_SERVO = 22
fPWM = 50
a = 10
b = 2
IN1 = 11
IN2 = 13
IN3 = 15
IN21 = 12
IN22 = 16
IN23 = 18
c = [0]
c2 = [0]

setup()

while True:
    setDirection(80)
    a = input("please input")
    take_photo()
    X, Y, Z = Analyze(a)
    print(X, Y, Z)
    Action(X, Y)
    BrightAnallyze(Z)
Пример #13
0
def main():
    # utf-8に変更
    set_default_encoding('utf-8')
    # PsgrのInstance作成

    # ツイートの取得
    # twt = Twapi()
    # api = twt.twitter_api
    # status = api.home_timeline()[0]
    #
    # sentence = status.text
    # tweet_id = status.id
    # user_id = status.user.screen_name
    # user_name = status.user.name
    sentence = "すもももももももものうち"
    tweet_id = 760430632317038592
    user_id = "snow_moment09"
    user_name = "雪村刹那"

    # データベースInstanceの作成
    psgr = Psgr()
    try:
        #トランザクション処理開始
        psgr.begin()
        values = (sentence, tweet_id, user_id, user_name)
        sqlcom = "INSERT INTO sentences (sentence,tweet_id,user_id,user_name) VALUES (%s,%s,%s,%s) RETURNING sentence_id;"
        psgr.execute(sqlcom, values)
        sentence_id = psgr.lastrowid()

        # テキストの解析
        analyze = Analyze()
        # print analyze.get_version()
        parse_data = analyze.parse_sentence(sentence.encode('utf-8'))
        for data in parse_data:
            # print data[0],'\t',data[1].split(',')[0]
            detail_array = ['*'] * 9
            detail_array[:len(data[1].split(','))] = data[1].split(',')

            # 登録用データ↓↓
            word = data[0]
            part_of_speech = detail_array[0]
            if part_of_speech == "BOS/EOS":
                continue

            part_of_speech_detail1 = detail_array[1]
            part_of_speech_detail2 = detail_array[2]
            part_of_speech_detail3 = detail_array[3]
            conjugate1 = detail_array[4]
            conjugate2 = detail_array[5]
            original = detail_array[6]
            pronunciation1 = detail_array[7]
            pronunciation2 = detail_array[8]

            values = (word, part_of_speech, part_of_speech_detail1,
                      part_of_speech_detail2, part_of_speech_detail3,
                      conjugate1, conjugate2, original, pronunciation1,
                      pronunciation2)
            sqlcom = "INSERT INTO words (word,part_of_speech,part_of_speech_detail1,part_of_speech_detail2,part_of_speech_detail3,conjugate1,conjugate2,original,pronunciation1,pronunciation2) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) RETURNING word_id;"
            psgr.execute(sqlcom, values)
            word_id = psgr.lastrowid()

            values = (sentence_id, word_id)
            sqlcom = "INSERT INTO sentence_word (sentence_id,word_id) VALUES (%s,%s) RETURNING sentence_word_id;"
            psgr.execute(sqlcom, values)

        psgr.commit()
        del analyze
    except Exception as e:
        print e
        psgr.rollback()

    del psgr