Ejemplo n.º 1
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name)
    if (tweets == None):
        sys.exit(
            "Screen name doesn't exist or a screen name's tweets are private")

    # absolute paths to lists
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")
    analyzer = Analyzer(positives, negatives)

    positive, negative, neutral = 0.0, 0.0, 0
    for tweet in tweets:
        if (analyzer.analyze(tweet) > 0):
            positive += 1
        elif (analyzer.analyze(tweet) < 0):
            negative += 1
        else:
            neutral += 1

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 2
0
def main():
  # Init colorama
  colorama.init(autoreset=True)

  # Load the messages
  messages = []
  messages += MessageParser.read('corpus/2013-09-16 tot 2014-01-24.messages')
  messages += MessageParser.read('corpus/2014-01-26 tot 2014-02-12.messages')
  messages += MessageParser.read('corpus/2014-02-07 tot 2014-03-03.messages')
  messages += MessageParser.read('corpus/2014-03-03 tot 2014-05-03.messages')
  messages += MessageParser.read('corpus/2014-05-03 tot 2014-09-20.messages')
  messages += MessageParser.read('corpus/2014-09-20 tot 2016-06-13.messages')
  messages += MessageParser.read('corpus/2016-06-13 tot 2018-01-23.messages')

  # Analyze the messages
  analyzer = Analyzer(messages)
  analyzer.analyze()

  # Use the prompt
  prompt = Prompt(messages,analyzer)
  prompt.prompt = '> '
  prompt.cmdloop('\nDone loading! List available commands with "help" or detailed help with "help cmd".')

  # De-init colorama
  colorama.deinit()
Ejemplo n.º 3
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name, count=100)

    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")
    anz = Analyzer(positives, negatives)

    po = ne = nu = 0
    for tweet in tweets:
        if anz.analyze(tweet) > 0:
            po += 1
        elif anz.analyze(tweet) < 0:
            ne += 1
        else:
            nu += 1

    positive, negative, neutral = po / len(tweets) * 100, ne / len(
        tweets) * 100, nu / len(tweets) * 100

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 4
0
 def analyze(self):
     for i, line in enumerate(self.segment):
         if i == 0:
             self.vicar.name = line
         else:
             analyzer = Analyzer(line, self.vicar)
             analyzer.analyze()
Ejemplo n.º 5
0
def search():
    pa = 0
    na = 0
    nea = 0

    # validate screen_name
    screen_name = request.args.get("screen_name", "").lstrip("@")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    tweetss = helpers.get_user_timeline(screen_name)
    if not tweetss:
        return redirect(url_for("index"))

    # TODO
    b = Analyzer(positives, negatives)
    for text in tweetss:  #函数返回推特list,每条推以str形式存储
        b.analyze(text)  #把推特传给分析器
        if b.score > 0:
            pa += 1
            print(b.score, colored(text, "green"))
        elif b.score < 0:
            na += 1
            print(b.score, colored(text, "red"))
        else:
            nea += 1
            print(b.score, colored(text, "yellow"))
    positive, negative, neutral = pa, na, nea

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 6
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name,100)

    # TODO
    totaltweets = len(tweets)
    # absolute paths to lists
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # instantiate analyzer
    analyzer = Analyzer(positives, negatives)
    positive, negative, neutral = 0.0, 0.0, 0.0
    for tweet in tweets:
        if analyzer.analyze(tweet)>0:
            positive = positive + 1
        elif analyzer.analyze(tweet)<0:
            negative = negative + 1
        else:
            neutral = neutral + 1
    positive = positive/totaltweets*100
    negative = negative/totaltweets*100
    neutral = neutral/totaltweets*100

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 7
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name, count=100)

    # get analyzer object and set counter to 0
    positive, negative, neutral = 0.0, 0.0, 0.0
    analyzer = Analyzer(os.path.join(sys.path[0], "positive-words.txt"),
                        os.path.join(sys.path[0], "negative-words.txt"))

    # iterate through all the tweets
    for i in tweets:
        # calculate score and then add to counter
        if analyzer.analyze(i) > 0.0:
            positive += 1.0
        elif analyzer.analyze(i) < 0.0:
            negative += 1.0
        else:
            neutral += 1.0
    # map all to percentage
    positive = (positive / (positive + negative + neutral)) * 100.0
    negative = (negative / (positive + negative + neutral)) * 100.0
    neutral = (neutral / (positive + negative + neutral)) * 100.0

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 8
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name)

    positive, negative, neutral = 0.0, 0.0, 0.0

    # If there is no tweets
    if tweets == None:
        return redirect(url_for("index"))

    #Initalize analyzer
    # absolute paths to lists
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # feeding analyser
    analyzer = Analyzer(positives, negatives)

    # Couting tweets
    n = len(tweets)
    score = 0

    if n < 100:
        for i in range(0, n):
            score = analyzer.analyze(tweets[i])
            if score > 0.0:
                positive += 1.0
            elif score < 0.0:
                negative += 1.0
            else:
                neutral += 1.0

    elif n > 100:
        for i in range(0, 100):
            score = analyzer.analyze(tweets[i])
            if score > 0.0:
                positive += 1.0
            elif score < 0.0:
                negative += 1.0
            else:
                neutral += 1.0

    # generate chart
    chart = helpers.chart(positive, negative,
                          neutral)  #### llama a helpers.chart

    # render results
    return render_template(
        "search.html", chart=chart, screen_name=screen_name
    )  #regresa llama "redr_template" y lo llena con (search chart y screen_name)
Ejemplo n.º 9
0
def parse(path, f=None):
    p = Parser(path=path)
    p.parse_file()
    a = Analyzer(parser=p)
    a.analyze()
    j = Packer(analyzer=a)
    if f is None:
        return j.pack()
    else:
        j.pack(f=f)
Ejemplo n.º 10
0
def processFile(path, f=None):
    p = Parser(path=path)
    p.parseFile()
    a = Analyzer(parser=p)
    a.analyze()
    j = Packer(analyzer=a)
    if f is None:
        return j.pack()
    else:
        j.pack(f=f)
Ejemplo n.º 11
0
Archivo: ocr.py Proyecto: xsyann/ocr
 def __trainModel(self, verbose=False, trainRatio=.5):
     if verbose:
         analyzer = Analyzer(self.__model, self.__dataset, trainRatio)
         analyzer.start()
     if self.__dataset.trainSampleCount > 0:
         self.__model.train(self.__dataset.trainSamples, self.__dataset.trainResponses)
     if verbose:
         analyzer.stop()
         analyzer.analyze()
         print analyzer
Ejemplo n.º 12
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "").lstrip("@")
    if not screen_name:  # if missing redirect to index
        return redirect(url_for("index"))

        # absolute paths to lists
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # instantiate analyzer
    analyzer = Analyzer(positives, negatives)

    # get screen_name's most recent 100 tweets
    tweets = helpers.get_user_timeline(screen_name, 100)

    # get screen_name's tweets
    #tweets = helpers.get_user_timeline(screen_name)#
    if None:  #redirect toindex if None
        return redirect(url_for("index"))
    # TODO
    positive, negative, neutral = 0.0, 0.0, 0.0

    #В переменной tweets у тебя будет пачка твитов,
    #разбить их на отдельные твиты, потом на отдельные слова
    #и каждое слово прогнать через Analyzer,
    #посчитать позитив/негатив каждого твита
    #и после количество всех плохих/хороших/никаких.
    # analyze each tweet & increase corresponding sentimen count
    totaltweets = 0
    for tweet in tweets:
        score = 0
        totaltweets += 1
        sentence = nltk.word_tokenize(tweet)
        for word in sentence:
            score += analyzer.analyze(word.lower())

        score = analyzer.analyze(tweet)
        if score > 0.0:
            positive += 1
        elif score < 0.0:
            negative += 1
        else:
            neutral += 1

    positive = positive / totaltweets * 100
    negative = negative / totaltweets * 100
    neutral = neutral / totaltweets * 100

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 13
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "").lstrip("@")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name)

    # absolute paths to lists
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # instantiate analyzer
    analyzer = Analyzer(positives, negatives)

    positive = 0.0
    negative = 0.0
    neutral = 0.0

    # If invalid twitter user, redirects to index page
    if tweets is None:
        return redirect(url_for("index"))

    # If user has less than 100 tweets, analyze all tweets
    if len(tweets) < 100:
        for i in range(len(tweets)):
            score = analyzer.analyze(tweets[i])

            if score > 0.0:
                positive += 1.0
            elif score < 0.0:
                negative += 1.0
            else:
                neutral += 1.0

    # If user has over 100 tweets, only analyze 100
    else:
        for i in range(100):

            score = analyzer.analyze(tweets[i])

            if score > 0.0:
                positive += 1.0
            elif score < 0.0:
                negative += 1.0
            else:
                neutral += 1.0

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 14
0
def processFile(path):
    try:
        p = Parser(path=path)
        p.parseFile()
        a = Analyzer(parser=p)
        a.analyze()
        j = Packer(analyzer=a)
        return j.pack()
    except:
        print path
        exit(1)
Ejemplo n.º 15
0
def processFile(path):
    try:
        p = Parser(path=path)
        p.parseFile()
        a = Analyzer(parser=p)
        a.analyze()
        j = Packer(analyzer=a)
        return j.pack()
    except:
        print path
        exit(1)
Ejemplo n.º 16
0
    def start(self, filename: str):
        self.log.debug('analyse file: ' + filename)
        astmodif = AstModifier(filename)
        # get origin AST
        originTree = astmodif.origin()
        self.log.info('origin: ' + astmodif.dump(originTree))
        # simplify the AST
        astmodif.simplify()
        self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))

        # analyse
        analyzer = Analyzer()
        analyzer.analyze(astmodif.simpast)
Ejemplo n.º 17
0
def main():
    """
	Main function of packet scanner when running from cmdline.
	"""
    #ps = PackageScanner()
    #packages = ps.getInstalledPackages()
    #print(packages)
    #ps.saveScanResults()

    an = Analyzer()
    an.loadFromFile(config.PKG_SCAN_DIR / config.PKG_SCAN_FILE)
    #an.loadFromPackageCont(packages)
    an.analyze()
    an.saveAnalysisResults()
Ejemplo n.º 18
0
 def test_gun(self, x):
     self.make_gun(x)
     generator = SFGenerator(self)
     generator.gen_af()
     generator.gen_sf7()
     core = SFCore(generator)
     core.run()
     analyzer = Analyzer(core)
     analyzer.analyze()
     # analyzer.plot_efield(False, True)
     freq = analyzer.info['f']/self.freq-1
     flat = analyzer.info['flat']-1
     y = np.append(freq, flat)
     return y
Ejemplo n.º 19
0
def get_scores(tweets):
    # absolute paths to lists
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # instantiate analyzer
    analyzer = Analyzer(positives, negatives)

    # initial scores
    n_pos = 0.0
    n_neg = 0.0
    n_neut = 0.0

    # iterate throught tweets
    for t in tweets:
        score = analyzer.analyze(t)
        if score > 0.0:
            n_pos += 1
        elif score < 0.0:
            n_neg += 1
        else:
            n_neut += 1

    # final scores
    n_pos /= N_TWEETS
    n_neg /= N_TWEETS
    n_neut /= N_TWEETS

    return n_pos, n_neg, n_neut
Ejemplo n.º 20
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name, 100)
    
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # instantiate analyzer
    analyzer = Analyzer(positives, negatives)


    positive, negative, neutral = 0, 0, 0
    for tweet in tweets:
        score = analyzer.analyze(tweet)
        if score > 0:
            positive += 1
        elif score < 0:
            negative += 1
        else:
            neutral += 1
    

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 21
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "").lstrip("@")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name,50)

    # TODO
    if tweets==None:
        return redirect(url_for("index"))
    obj=Analyzer()
    positive, negative, neutral =0,0,0
    for tweet in tweets:
        tokeni=nltk.tokenize.TweetTokenizer()
        tokens=tokeni.tokenize(tweet)
        score=0
        for word in tokens:
            score=obj.analyze(word)+score
        if score>0:
            positive=positive+1
        elif score < 0:
            negative=negative+1
        else:
            neutral=neutral+1
    
    

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 22
0
def search():

    # validate screen_name
    # this works because a form will be send using get not post
    screen_name = request.args.get("screen_name", "")
    if not screen_name:
        return redirect(url_for("index"))
    # absolute paths to lists
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # create object by passing the above arguments
    analyzer = Analyzer(positives, negatives)
    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name)

    # just intializing
    positive, negative, neutral = 0.0, 0.0, 100.0
    # count the number of positive, negative and neutral tweets
    for tweet in tweets:
        score = analyzer.analyze(tweet)
        if score > 0.0:
            positive += 1
        elif score < 0.0:
            negative += 1
        else:
            neutral += 1
    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 23
0
def main():

    # ensure proper usage
    if len(sys.argv) != 2:
        sys.exit("Usage: ./smile @username")

    # absolute paths to lists
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # instantiate analyzer
    analyzer = Analyzer(positives, negatives)

    # get latest 50 tweets of the user
    tweets = get_user_timeline(sys.argv[1].strip('@'), count=50)
    if tweets == None:
        sys.exit("User doesn't exist or is private")

    # analyze tweets
    for tweet in tweets:
        score = analyzer.analyze(tweet)
        if score > 0.0:
            print(colored(tweet, "green"))
        elif score < 0.0:
            print(colored(tweet, "red"))
        else:
            print(colored(tweet, "yellow"))
Ejemplo n.º 24
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name, 100)

    if tweets == None:
        return redirect(url_for("index"))

    # TODO
    positive, negative, neutral = 0.0, 0.0, 100.0
    analyzer = Analyzer(positive, negative)
    for tweet in tweets:
        c = analyzer.analyze(tweet)
        if c > 0:
            positive += 1
            neutral -= 1
        elif c < 0:
            negative += 1
            neutral -= 1

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 25
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "").lstrip("@")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name)

    # absolute paths to lists for analysis
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    positive, negative, neutral = 0.0, 0.0, 0.0
    analyzer = Analyzer(positives, negatives)
    if tweets != None:
        for tweet in tweets:
            score = analyzer.analyze(tweet.lower())
            if score > 0.0:
                positive += 1
            elif score < 0.0:
                negative += 1
            else:
                neutral += 1
    else:
        sys.exit("Error")

    # TODO

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 26
0
def classification_preprocess_all_datasets():
    """
    Preprocesses all datasets to be ready for classification task.
    This will include stemming, word correction, lower-casing, hashtag removal, special char removal.
    """
    
    for i in range(0,len(utils.annotated_datasets)):
        tweetlines = utils.get_dataset(utils.annotated_datasets[i])
        tweets = []
        for line in tweetlines:
            if len(line)>1:
                tweets.append(tweet.to_tweet(line))
        
#        tweets = lower_case(tweets)
        tweets = remove_hastags_and_users(tweets)
        tweets = count_emoticons(tweets)
        tweets = replace_links(tweets)
        tweets = remove_specialchars(tweets)
        tweets = correct_words(tweets)
        tweets = stem(tweets)
        tweets = tokenize(tweets)
        tweets = pos_tag(tweets)
        tweets = count_exclamations(tweets)

        analyzer = Analyzer(utils.annotated_datasets[i], tweets)
        stats = analyzer.analyze()
        print stats
        #store tweets in pickles...
        print "Storing pickles..."
        utils.store_pickles(tweets, utils.annotated_datasets[i][24:len(utils.annotated_datasets[i])-4])
Ejemplo n.º 27
0
def get_tweets(screen_name):
    if not screen_name:
        return jsonify({'Response': '404'})
    #include the tweets file
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # instantiate analyzer
    analyzer = Analyzer(positives, negatives)

    # tweets and scores to conform new dictionary with scores
    tweets = helpers.get_user_timeline(screen_name)
    scores = []
    for tweet in tweets:
        scores.append(analyzer.analyze(tweet))

    tweets_with_scores = {}

    for tweet, score in zip(tweets, scores):
        tweets_with_scores[tweet] = score

    return jsonify({
        'Response': '200',
        'tweets_with_scores': tweets_with_scores
    })
Ejemplo n.º 28
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "").lstrip("@")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name, 100)

    # absolute paths to lists
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # classify tweets
    positive, negative, neutral = 0.0, 0.0, 100.0
    positive_count, negative_count, neutral_count = 0, 0, 0

    if tweets is not None and len(tweets) != 0:

        # instantiate analyzer
        analyzer = Analyzer(positives, negatives)

        # analyze tweets
        for tweet in tweets:
            score = analyzer.analyze(tweet)
            if score > 0.0:
                positive_count += 1
            elif score < 0.0:
                negative_count += 1
            else:
                neutral_count += 1

        # get percentages
        positive, negative, neutral = positive_count / len(tweets), \
            negative_count / len(tweets), \
            neutral_count / len(tweets)

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    if tweets is not None and len(tweets) != 0:
        return render_template("search.html",
                               chart=chart,
                               screen_name=screen_name,
                               tweets=str(len(tweets)),
                               tweet=tweets[0])
    elif len(tweets) == 0:
        return render_template("search.html",
                               chart=chart,
                               screen_name=screen_name,
                               tweets=0,
                               tweet="No tweets")
    else:
        return render_template("search.html",
                               chart=chart,
                               screen_name=screen_name,
                               tweets="N/A",
                               tweet="Not an account")
Ejemplo n.º 29
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "")
    # If user doesn't exist / has private account - redirect to start page.
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name)
    if not tweets:
        return redirect(url_for("index"))

    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # Instantiate analyzer.
    analyzer = Analyzer(positives, negatives)
    positive, negative, neutral = 0.0, 0.0, 100.0

    # Get percentage of sentiment for each word in a tweet.
    for tweet in tweets:
        score = analyzer.analyze(tweet)
        if score > 0.0:
            positive += 100 / len(tweets)
        elif score < 0.0:
            negative += 100 / len(tweets)
        else:
            neutral += 100 / len(tweets)

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 30
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "").lstrip("@")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name)

    # TODO
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # instantiate analyzer
    analyzer = Analyzer(positives, negatives)
    positive, negative, neutral = 0.0, 0.0, 0.0
    # analyze text
    for i in range(0, 200):
        score = analyzer.analyze(tweets[i])
        if score > 0.0:
            print(colored(str(tweets[i]), "green"))
            positive = positive + 1
        elif score < 0.0:
            print(colored(str(tweets[i]), "red"))
            negative = negative + 1
        else:
            print(colored(str(tweets[i]), "yellow"))
            neutral = neutral + 1

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 31
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    Tweets = helpers.get_user_timeline(screen_name, 100)

    if Tweets == None:
        sys.exit("screen name does not exist/is protected")

    # TODO
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")
    analyzer = Analyzer(positives, negatives)

    positive, negative, neutral = 0.0, 0.0, 0.0
    for tweet in Tweets:
        score = analyzer.analyze(tweet)  #Tweets Here
        if score > 0.0:
            positive += 1
        elif score < 0.0:
            negative += 1
        else:
            neutral += 1

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 32
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "").lstrip("@")
    if not screen_name:
        return render_template("error index.html")

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name)

    if tweets == []:
        return render_template("error index.html")

    # absolute paths to lists
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # instantiate analyzer
    analyzer = Analyzer(positives, negatives)
    positive, negative, neutral = 0.0, 0.0, 0.0
    for tweet in tweets:
        temp = analyzer.analyze(tweet)
        if temp > 0:
            positive += 1
        elif temp < 0:
            negative += 1
        else:
            neutral += 1
    # generate chart
    chart = helpers.chart(positive / (positive + negative + neutral),
                          negative / (positive + negative + neutral),
                          neutral / (positive + negative + neutral))

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 33
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "").lstrip("@")
    if not screen_name:
        return redirect(url_for("index"))

    # absolute paths to lists
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # instantiate analyzer
    analyzer = Analyzer(positives, negatives)

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name, 100)

    # initialise percentages
    positive, negative, neutral = 0.0, 0.0, 0.0

    # classify each tweet and add to percentages
    for tweet in tweets:
        score = analyzer.analyze(tweet)
        if score == 1:
            positive += 1
        if score == -1:
            negative += 1
        if score == 0:
            neutral += 1

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 34
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's last 50 tweets
    tweets = helpers.get_user_timeline(screen_name, 50)
    if not tweets:
        sys.exit("sorry no tweets found")

    # set up for analyzing and counting
    positive, negative, neutral = 0.0, 0.0, 0.0
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")
    analyzer = Analyzer(positives, negatives)
    # analyze and count each tweet
    for tweet in tweets:
        curScore = analyzer.analyze(tweet)
        if curScore > 0.0:
            positive += 1
        elif curScore < 0.0:
            negative += 1
        else:
            neutral += 1

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 35
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    count = 100
    tweets = helpers.get_user_timeline(screen_name, count)
    if tweets is None:
        return redirect(url_for("index"))

    positive, negative, neutral = 0.0, 0.0, 0.0
    analyzer = Analyzer(positive, negative)
    for tweet in tweets:

        score = analyzer.analyze(tweet)
        if score > 0.0:
            positive += 1

        elif score < 0.0:
            negative += 1

        else:
            neutral += 1

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 36
0
 def __trainModel(self, trainRatio=.5, errorsIteration=0, log=None):
     if log:
         analyzer = Analyzer(self.__model, self.__dataset, trainRatio)
         analyzer.start()
     self.__model.train(self.__dataset.trainSamples,
                        self.__dataset.trainResponses)
     samples, responses = self.__dataset.testSamples, self.__dataset.testResponses
     i = 0
     while responses.any() and i < errorsIteration:
         samples, responses = self.__injectErrors(samples, responses)
         i += 1
     self.__dataset.testSamples = samples
     self.__dataset.testResponses = responses
     if log:
         analyzer.stop()
         analyzer.analyze()
         log(str(analyzer))
Ejemplo n.º 37
0
def brute(ciphertext):
    analyzer = Analyzer()
    highestText  = ""
    highestValue = 0
    keys = [chr(i) for i in xrange(0, 0x100)]
    for key in keys:
        message = xor(ciphertext, key)
        value = analyzer.analyze(message)
        if (value > highestValue):
            highestValue = value
            highestText  = message
    print "[Score = %f] %s" % (highestValue, highestText)
Ejemplo n.º 38
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "")
    if not screen_name:
        return redirect(url_for("index"))
    
    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name, 100)

    # Check if tweets array contains None
    if tweets is None:
        sys.exit("Error: No tweets was returned!")
    
    # Absolute paths to lists 
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")
    
    # Initialize an Analyze object
    analyzer = Analyzer(positives, negatives)
    
    # Initialize sentiment analysis counts for chart values
    positive, negative, neutral = 0.0, 0.0, 0.0
    
    # Iterate through tweets 
    for tweet in tweets:
        
        # Return score analysis for tweet
        score = analyzer.analyze(tweet)
        
        # Increment respective sentiment analysis counts
        if score > 0.0:
            positive += 1
        elif score < 0.0:
            negative += 1
        else:
            neutral += 1
        
    # Set sentiment analysis counts to percentages
    num_tweets = positive + negative + neutral

    positive = positive / num_tweets
    negative = negative / num_tweets
    neutral = neutral / num_tweets

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 39
0
def re_analyze():
    """
    Unpickles preprocessed tweets and performs reanalyzis of these, then stores stats.
    """
    labels = ["random",'"rosenborg"','"erna solberg"']
    data = {}
    worddata = {}
    for i in xrange(3):
        tweets = utils.get_pickles(i)
        analyzer = Analyzer(utils.annotated_datasets[i], tweets)
        
        avg_list,words_list= analyzer.analyze()
        print avg_list
        worddata[labels[i]] = words_list
        data[labels[i]] = avg_list
    plotting.average_wordclasses(worddata, "averages")

    plotting.detailed_average_wordclasses(data, "averages2")
Ejemplo n.º 40
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name, 100)

    # handle get_user_timeline errors
    if tweets == None:
        return redirect(url_for("index"))

    # absolute paths to lists
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # instantiate analyzer
    analyzer = Analyzer(positives, negatives)

    # counts for sentiment categories
    pos_count, neg_count, neut_count = 0, 0, 0

    # score and assign sentiment category to each tweet
    for tweet in tweets:
        score = analyzer.analyze(tweet)
        if score > 0.0:
            pos_count += 1
        elif score < 0.0:
            neg_count += 1
        else:
            neut_count += 1

    whole = pos_count + neg_count + neut_count
    positive, negative, neutral = (pos_count / whole), (neg_count / whole), (neut_count / whole)

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 41
0
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "").lstrip("@")
    if not screen_name:
        return redirect(url_for("index"))

    # absolute paths to lists
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # instantiate analyzer
    analyzer = Analyzer(positives, negatives)
    
    # get screen_name's most recent 100 tweets
    tweets = helpers.get_user_timeline(screen_name, 100)
    
    # return to index if screen_name doesn't exist
    if tweets == None:
        return redirect(url_for("index"))
        
    # create positive, negative and neutral count
    positive, negative, neutral = 0, 0, 0
    
    # analyze each tweet & increase corresponding sentimen count
    for tweet in tweets:
        score = analyzer.analyze(tweet)
        if score > 0.0:
            positive += 1
        elif score < 0.0:
            negative += 1
        else:
            neutral += 1

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
Ejemplo n.º 42
0
    def do_analyze(self, args_str):
        parser = self._get_arg_parser()
        parser.add_argument("-o", "--output",
                            metavar="FILE", dest="output",
                            help="specific output dir or file"),
        parser.add_argument("-t", "--threads",
                            type=int, dest="threads", default=multiprocessing.cpu_count(),
                            help="threads number to work [default equal cpu count]")
        parser.add_argument("--plot-all",
                            action="store_true", dest="plot_all", default=False,
                            help="plot all stocks, not only good ones")
        parser.add_argument('codes', nargs='*')
        options = self._parse_arg(parser, args_str)
        if not options:
            return

        schemes = []
        user_options = []
        for k, v in self.config['analyzing']['schemes'].items():
            schemes.append(v)
            user_options.append(v['desc'])
        select = util.select(user_options, 'please select a scheme used for analyzing')
        config = schemes[select]['config']
        logging.info('analyzer config:\n%s' % yaml.dump(config))

        if not self.loaded:
            self.do_load()

        stocks = {}
        if len(options.codes):
            for code in options.codes:
                if code in self.dm.stocks:
                    stocks[code] = self.dm.stocks[code]
                else:
                    logging.error('unknown stock %s', code)
        else:
            stocks = self.dm.stocks

        if not len(stocks):
            logging.error('no stocks found in local database, please run \'load\' command first')
            return

        analyzer = Analyzer(stocks, self.dm.indexes, config)
        logging.info('all %d available stocks will be analyzed' % len(analyzer.stocks))
        logging.info('-----------invoking data analyzer module-------------')
        analyzer.analyze(threads=options.threads)
        logging.info('-------------------analyze done----------------------')

        list = []
        for result in analyzer.good_stocks:
            stock = result.stock
            list.append({'code': stock.code, 'name': stock.name, 'price': stock.price,
                         'pe': stock.pe, 'nmc': stock.nmc / 10000, 'mktcap': stock.mktcap / 10000,
                         'toavgd5': '%.2f%%' % stock.get_turnover_avg(5),
                         'toavgd30': '%.2f%%' % stock.get_turnover_avg(30),
                         'area': stock.area, 'industry': stock.industry
                         })
        df = DataFrame(list)
        if df.empty:
            logging.info('no good stocks found')
            return

        logging.info('list of good %d stocks%s:' % (len(analyzer.good_stocks),
                                                    options.output and ' and save plots to %s' % options.output or ''))
        print(df.to_string(
            columns=('code', 'name', 'price', 'pe', 'nmc', 'mktcap', 'toavgd5', 'toavgd30', 'area', 'industry')))
        logging.info('global market status: %s' % analyzer.global_status)

        if options.output:
            logging.info('generating html report...')
            os.makedirs(options.output, exist_ok=True)
            analyzer.generate_report(options.output, only_plot_good=not options.plot_all)
            logging.info('done')
Ejemplo n.º 43
0
#!python
import sys
sys.path.insert(0, '../parser')

from parser import Parser
from mcrl2generator import Mcrl2Generator
from analyzer import Analyzer

print 'Traffic Lights v1...'
with open("../trafficlights_v1/trafficlights.im", 'r') as f:
	trafficlights = f.read()
	parser = Parser(trafficlights)
	parser.parse()

	analyzer = Analyzer(parser.im)
	correct = analyzer.analyze();

        if correct:
            generator = Mcrl2Generator(parser.im, analyzer.symbolTable)
            mcrl2 = generator.toMcrl2()
            with open("../trafficlights_v1/trafficlights.mcrl2", 'w') as out:
                out.write(mcrl2)
                print 'MCRL2 generated.'
        else:
            exit(1)

print ''
print ''

print 'Traffic Lights v2...'
with open("../trafficlights_v2/trafficlights.im", 'r') as f:
Ejemplo n.º 44
0
    tweetlines = utils.get_dataset("test_annotated_data/erna_dataset.tsv")
    tweets = []
    for line in tweetlines:
        if len(line)>1:
            tweets.append(tweet.to_tweet(line))
        
    
#    tweets = lower_case(tweets)
    tweets = remove_hastags_and_users(tweets)
    tweets = count_emoticons(tweets)
    tweets = replace_links(tweets)
    tweets = remove_specialchars(tweets)
    for tweet in tweets:
        print tweet
    tweets = correct_words(tweets)
    tweets = stem(tweets)
    tweets = tokenize(tweets)
    for tweet in tweets:
        print tweet.stat_str()
    tweets = pos_tag(tweets)
    tweets = count_exclamations(tweets)
    for tweet in tweets:
        print tweet.stat_str()
    
    analyzer = Analyzer("test_annotated_data/erna_dataset.tsv", tweets)
    stats = analyzer.analyze()
    print stats
    
    

Ejemplo n.º 45
0
class CameraProcessor:
  def __init__(self, camera, gui):
    self.analyzer = Analyzer(gui)
    self.cap = cv2.VideoCapture(camera)
    self.callibration_state = 0
    self.callibration_message = [
      "Please click the plus sign with the circle around it",
      "Please click the plus sign WITHOUT the circle around it",
      "Got it!"
    ]
    self.create_images_dir()
    self.gui = gui
    gui.subscribe('color_threshold', self)
    gui.subscribe('blob_threshold', self)
    gui.subscribe('area_threshold', self)
    
  def create_images_dir(self):
    try:
      os.mkdir("images")
    except:
      pass
      
  def handle_callibration_click(self, event,x,y,flags,param):
    if event == 1:
      self.analyzer.set_callibration(self.callibration_state, (x,y))
      print("Setting callibration point %d to (%d, %d)" % (self.callibration_state, x, y))
      self.callibration_state += 1
      print(self.callibration_message[self.callibration_state])
      
  def callibrate(self):
    cv2.namedWindow('callibration')
    cv2.setMouseCallback('callibration',self.handle_callibration_click)
    print(self.callibration_message[self.callibration_state])
    while self.callibration_state < 2:
      ret, frame = self.cap.read()
      resized = cv2.resize(frame, (800,600))
      cv2.imshow( "callibration" ,resized)
      cv2.waitKey(1)
    cv2.destroyWindow("callibration")
    
  def detected_dice(self):
    if len(self.analyzer.detected_dice) == 0: return None 
    return self.analyzer.detected_dice[0]
  
  def save_frame(self):
    filename = "images/%s.jpg" % str(uuid.uuid4())
    ret, frame = self.cap.read()
    print("Writing %s" % filename)
    cv2.imwrite(filename, frame)
    
  def process(self):
    ret, frame = self.cap.read()
    self.process_image(frame)
    
  def process_image(self, frame):
    resized = cv2.resize(frame, (800,600))
    self.analyzer.analyze(resized, frame)
  
  
  def set_parameter(self, name, value):
    if name == 'color_threshold':
      self.analyzer.color_threshold = value
    elif name == 'blob_threshold':
      self.analyzer.blob_threshold = value
    elif name == 'area_threshold':
      self.analyzer.set_area_threshold(value * 100)
    
  def report_blobs(self):
    self.analyzer.report()
  
  def teardown(self):
    self.cap.release()
    
  def run_test(self):
    while True:
      self.process()
      key = cv2.waitKey(1)
      if key & 0xFF == ord('q'):
        break
      if key & 0xFF == ord('r'):
        self.report_blobs()
      if key & 0xFF == ord('s'):
        self.save_frame()
    self.teardown()
Ejemplo n.º 46
0
    def analyze (self, v1, v2=None):
        """analyze(v1, v2=None) 
           Calculates EOF principal components. 
           Sets the following attributes: 
              'principal_components' 
              'eigenvectors'
              'percent_explained' 
        """
        g = v1.getGrid()
        if g is None:
            raise ValueError, "u does not have spatial dimensions."
        latw, longw = g.getWeights()
        latw = latw / numpy.maximum.reduce(latw)
        if self.latweight_choice() == 'none':
            latweight = 1.0 + 0.0 * latw
        elif self.latweight_choice() == 'area':
            latweight = latw 
        else:
            latweight = numpy.sqrt(latw)
        mean_choice = self.mean_choice()
        nr = self.number_of_components()
    
        lat_axis = v1.getLatitude()
        long_axis = v1.getLongitude()
        time_axis = v1.getTime()
        if time_axis is None:
            raise ValueError, "v1 has no time dimension" 
        nlat = len(lat_axis)
        nlong = len(long_axis)
        ntime = len(time_axis)
        nvar = nlat*nlong
        ax1 = v1(order='...x').getAxisList(omit='time')
        
        if v2 is not None: 
            time_axis_2 = v2.getTime()
            if time_axis_2 is None:
                raise ValueError, 'v2 has no time dimension'
            if not numpy.allclose(time_axis, time_axis_2):
                raise ValueError, 'v1 and v2 have incompatible time axes'
            nvar = 2 * nvar
            ax2 = v2(order='...x').getAxisList(omit='time')
        x = numpy.zeros((ntime, nvar), numpy.float)
    
        for ilat in range(nlat):
            udata = v1.getSlice(latitude=ilat, 
                                required='time',
                                order='t...x', 
                                raw=1)
            if udata.mask is not numpy.ma.nomask:
                raise ValueError, 'eof cannot operate on masked data'
            if numpy.ma.rank(udata) != 2:
                raise ValueError, 'eof cannot handle extra dimension'
            udata = udata.filled()
            x[:, ilat*nlong: (ilat+1)*nlong] = \
                self.__adjust(udata, ntime, mean_choice) * \
                              latweight[numpy.newaxis, ilat]
        
        del udata
        if v2 is not None:
            for ilat in range(nlat):
                udata = v1.getSlice(latitude=ilat, 
                                    required='time',
                                    order='t...x', 
                                    raw=1)
                if udata.mask is not numpy.ma.nomask:
                    raise ValueError, 'eof cannot operate on masked data'
                if numpy.ma.rank(udata) != 2:
                    raise ValueError, 'eof cannot handle extra dimension'
                udata = udata.filled()
                x[:, nlat*nlong + ilat*nlong: nlat*nlong + (ilat+1)*nlong] = \
                    self.__adjust(udata[:, ilat, :], ntime, mean_choice) * \
                              latweight[numpy.newaxis, ilat]
            del udata
      
        a = Analyzer ()
        
        a.analyze (x, nr = nr)
    
    # Compute weighted eigenvectors
        evs = a.evec
        pcs = numpy.matrix(x)*numpy.matrix(evs)
        number_of_components = len(a.eval)
        result = []
        for k in range(number_of_components):
            evs1 = numpy.reshape(evs[0:nlong*nlat, k], (nlat, nlong))
            evs1 = evs1 / latweight[:, numpy.newaxis]
            pc = cdms2.createVariable(evs1, copy=0, axes=ax1, 
                          id=v1.id+'_'+str(k+1),
                          attributes = v1.attributes)
            result.append(pc)
            if v2:
                evs1 = numpy.reshape(evs[nlong*nlat:2*nlong*nlat, k], 
                                       (nlat, nlong))
                evs1 = evs1 / latweight[:, numpy.newaxis]
                pc = cdms2.createVariable(evs1, copy=0, axes=ax2, 
                          id=v2.id+'_'+str(k+1),
                          attributes = v2.attributes)
                result.append(pc)

        self.principal_components = result
        self.percent_explained = a.pct
        self.eigenvectors = a.evec
Ejemplo n.º 47
0
def eof(v1, v2=None, nr=4, latweight_choice=None, mean_choice=None):
    g = v1.getGrid()
    if g is None:
        raise ValueError, "u does not have spatial dimensions."
    latw, longw = g.getWeights()
    latw = latw / numpy.maximum.reduce(latw)
    if latweight_choice is None:
        latweight = 1.0 + 0.0 * latw
    elif latweight_choice == "latitude":
        latweight = latw
    else:
        latweight = sqrt(latw)

    lat_axis = v1.getLatitude()
    long_axis = v1.getLongitude()
    time_axis = v1.getTime()
    if time_axis is None:
        raise ValueError, "u has no time dimension"
    nlat = len(lat_axis)
    nlong = len(long_axis)
    ntime = len(time_axis)
    nvar = nlat * nlong
    ax1 = v1.getAxisList(omit="time")
    if v2 is not None:
        nvar = 2 * nvar
        ax2 = v2.getAxisList(omit="time")
    x = numpy.zeros((ntime, nvar), numpy.float)

    for ilat in range(nlat):
        udata = v1.getSlice(latitude=ilat)
        if udata.mask() is not None:
            raise ValueError, "eof cannot operate on masked data"
        udata = udata.filled()
        x[:, ilat * nlong : (ilat + 1) * nlong] = adjust(udata, ntime, mean_choice) * latweight[numpy.newaxis, ilat]

    del udata
    if v2 is not None:
        for ilat in range(nlat):
            udata = v1.getSlice(latitude=ilat)
            if udata.mask() is not None:
                raise ValueError, "eof cannot operate on masked data"
            udata = udata.filled()
            x[:, nlat * nlong + ilat * nlong : nlat * nlong + (ilat + 1) * nlong] = (
                adjust(udata[:, ilat, :], ntime, mean_choice) * latweight[numpy.newaxis, ilat]
            )
        del udata

    a = Analyzer()

    a.analyze(x, nr=nr)

    # Compute weighted eigenvectors
    evs = a.evec
    pcs = numpy.matrix(x) * numpy.matrix(evs)
    number_of_components = len(a.eval)
    result = []
    for k in range(number_of_components):
        evs1 = numpy.reshape(evs[0 : nlong * nlat, k], (nlat, nlong))
        evs1 = evs1 / latweight[:, numpy.newaxis]
        pc = cdms2.createVariable(evs1, copy=0, axes=ax1, id=v1.id + "_" + str(k + 1), attributes=v1.attributes)
        result.append(pc)
        if v2:
            evs1 = numpy.reshape(evs[nlong * nlat : 2 * nlong * nlat, k], (nlat, nlong))
            evs1 = evs1 / latweight[:, numpy.newaxis]
            pc = cdms2.createVariable(evs1, copy=0, axes=ax2, id=v2.id + "_" + str(k + 1), attributes=v2.attributes)
            result.append(pc)
    return a, result
Ejemplo n.º 48
0
from setup import Configuration
cfg=Configuration()
from observables import Observable

cfg.parametersSet['region'] = 'SR'
cfg.parametersSet['observable'] = 'met'
cfg.parametersSet['lumi'] = '10000' # pb^-1
#cfg.parametersSet['observable'] = Observable(variable='ZpT',formula='z_pt',labelX='Z p_{T} [GeV]')
#cfg.parametersSet['selection'] = '{"leading jet":"jets_pt[0]>120"}'

label = str(hash(frozenset(cfg.parametersSet.items())))

from analyzer import Analyzer
analyzer = Analyzer(cfg, label)
analyzer.analyze()
analyzer.format_histograms()
analyzer.draw('pippo')

histograms = analyzer.formatted_histograms

# manage output
output_file = TFile('plots.root' if not cfg.parametersSet.has_key('output_name') else cfg.parametersSet['output_name'],
                    'recreate')
for h in histograms: histograms[h].Write()
output_file.Close()

output_file = TFile('logs/log.root', 'update')
output_file.mkdir(label)
output_file.cd(label)
for h in histograms: histograms[h].Write()