示例#1
0
def plot_daq_main():
    """
    main function for plot_daq command-line program
    """

    import matplotlib.pylab as pylab

    # Setup input option parser
    usage = """%prog [OPTION]...

    %prog simple script for plotting data captured by daq_acquire
     commmand-line program. """

    # Set up command line option parser
    parser = optparse.OptionParser(usage=usage)
    options, args = parser.parse_args()

    datafile = sys.argv[1]
    data = pylab.load(datafile)

    t = data[:, 0]
    samples = data[:, 1:]

    for i in range(samples.shape[1]):
        pylab.figure(i)
        pylab.plot(t, samples[:, i])
        pylab.xlabel('t (sec)')
        pylab.ylabel('(V)')
        pylab.title('channel %d' % (i, ))
    pylab.show()
示例#2
0
def findbruteforcefit(kit=None, *args, **kwargs):

    if kit != 1:
        pylab.save('bftfile', 'kit')
    else:
        pylab.load('bftfile', 'kit')
        kit['tightnesssettings'] = copy(
            settingsfromtightness(kit['tightnesssettings']['scalartightness']))


# findbruteforcefit.m:6

    random(1)
    ABCexact = array([8572.0553, 3640.1063, 2790.9666])
    # findbruteforcefit.m:9
    ABCguess = array([8572.0, 3640.0, 2790.0])
    # findbruteforcefit.m:10

    ABCguess = multiply(ABCguess, array([0.99, 1.01, 0.99]))
    # findbruteforcefit.m:11
    kit = trimkit(kit,
                  kit['tightnesssettings']['bruteforce']['numexperimentlines'])
    # findbruteforcefit.m:12
    flimits = array([min(kit['onedpeakfs']), max(kit['onedpeakfs'])])
    # findbruteforcefit.m:14
    theoryset = linesforbruteforce2(
        ABCguess, flimits,
        kit['tightnesssettings']['bruteforce']['numtheorylines'],
        max(kit['onedpeakhsunassigned']))
    # findbruteforcefit.m:15
    linestouse['lines'] = copy(theoryset)
    # findbruteforcefit.m:17
    linestouse['heighttouse'] = copy('sixKweakpulsestrength')
    # findbruteforcefit.m:18
    linestouse['fitdescriptor'] = copy('made up brute force fit')
    # findbruteforcefit.m:19
    linestouse['ABCxxxxx'] = copy(array([ABCguess, 0, 0, 0, 0, 0]))
    # findbruteforcefit.m:20
    #addC13swithlinelist

    ABClist = [ABCguess]
    # findbruteforcefit.m:23
    dAdBdC = array([0.01, 0.01, 0.01])
    # findbruteforcefit.m:24
    linestouse['ABClist'] = copy(ABClist)
    # findbruteforcefit.m:26
    linestouse['dAdBdC'] = copy(dAdBdC)
    # findbruteforcefit.m:27
    kit['findfitsettings'] = copy(kit['tightnesssettings']['bruteforce'])
    # findbruteforcefit.m:28
    allfits = findfits(linestouse, kit)
    # findbruteforcefit.m:30
    fit = pullbest(allfits, kit)
    # findbruteforcefit.m:32
    fit['patternType'] = copy('bruteforce')
    # findbruteforcefit.m:33
    kit = addfittokit(kit, fit)
    # findbruteforcefit.m:34
    displaykitwithfits(kit)
    1
    return fit
from datetime import datetime
import json
from matplotlib import pylab as plt
import re
import pandas

key_words = [
    "Corporate Social Responsibility", "Corporate Responsibility",
    "Sustainability", "Sustainable development", "Corporate Accountability",
    "Crating Shared Value", "Citizenship", "Social Responsibility",
    "Environmental, Social and Governance", "shared value", "social",
    "responsibility", "CSR ", "CR ", "ESG"
]

data = plt.load('company-tweets.npy', allow_pickle='TRUE').tolist()
# data = plt.load('competitor-tweets.npy', allow_pickle='TRUE').tolist()
found = {}

for key in data:
    print(key, len(data[key]))
    found[key] = []
    for tweet in data[key]:
        #print(tweet)
        for kw in key_words:
            kw = kw.lower()
            if hasattr(tweet, 'retweeted_status'):
                if kw in tweet.retweeted_status.full_text.lower():
                    text = tweet.retweeted_status.full_text.replace("\n", "  ")
                    # print(f"found{kw} in retweet {tweet.retweeted_status.full_text} of {key}")
                    found[key].append({
                        "tweet": text,
示例#4
0
文件: baseline2.py 项目: mfitzp/toolz
    #    ySpec = cspline1d_eval(sCoef,x)
    noise = N.zeros_like(spec)
    for i in xrange(len(xSpec[:-1])):
        noise[xSpec[i]:xSpec[i + 1]] = ySpec[i]

    return noise, minNoise
    #return xSpec, ySpec

if __name__ == '__main__':

    #    ms = P.load('J15.csv', delimiter = ',')
    #    x, ms = interpolate_spectrum(ms)
    #    ms = normalize(topHat(ms, 0.01))
    #    ms = roundLen(ms)
    #    ms = P.load('N3_Norm.txt')
    ms = P.load('exampleMS.txt')
    print ms.max(), len(ms)
    #    x = N.arange(len(ms))
    #    print ms.shape, x.shape

    #    x = N.r_[0:10]
    #    dx = x[1]-x[0]
    #    newx = N.r_[-3:13:0.1]  # notice outside the original domain
    #
    #    y = N.sin(x)
    #    cj = cspline1d(y)
    #    newy = cspline1d_eval(cj, newx, dx=dx,x0=x[0])
    #    print x.shape, newx.shape, cj.shape
    numSegs = int(len(ms) * 0.0015)
    print "Numsegs: ", numSegs
    #    x, msSm = SplitNSmooth(ms, numSegs, 5)
示例#5
0
            "Corporate Responsibility",
            "Sustainability",
            "Sustainable development",
            "Corporate Accountability",
            "Crating Shared Value",
            "Citizenship",
            "Social Responsibility",
            "Environmental, Social and Governance",
            "shared value",
            "social",
            "responsibility",
            "CSR ",
            "CR ",
            "ESG"]

data = plt.load('women-ceo-tweets.npy', allow_pickle='TRUE').tolist()
found = []


for dictionary in data:
    for key in dictionary:
        for tweet in dictionary[key]:
            for kw in key_words:
                kw = kw.lower()
                if hasattr(tweet, 'retweeted_status'):
                    if kw in tweet.retweeted_status.full_text.lower():
                        text = tweet.retweeted_status.full_text.replace("\n", "  ")
                        # print(f"found{kw} in retweet {tweet.retweeted_status.full_text} of {key}")
                        found.append({"tweet":text, "user":key, "date":tweet.created_at})
                else:
                    if kw in tweet.full_text.lower():
示例#6
0
        noise[xSpec[i]:xSpec[i+1]] = ySpec[i]



    return noise, minNoise
    #return xSpec, ySpec


if __name__ == '__main__':

#    ms = P.load('J15.csv', delimiter = ',')
#    x, ms = interpolate_spectrum(ms)
#    ms = normalize(topHat(ms, 0.01))
#    ms = roundLen(ms)
#    ms = P.load('N3_Norm.txt')
    ms = P.load('exampleMS.txt')
    print ms.max(), len(ms)
#    x = N.arange(len(ms))
#    print ms.shape, x.shape

#    x = N.r_[0:10]
#    dx = x[1]-x[0]
#    newx = N.r_[-3:13:0.1]  # notice outside the original domain
#
#    y = N.sin(x)
#    cj = cspline1d(y)
#    newy = cspline1d_eval(cj, newx, dx=dx,x0=x[0])
#    print x.shape, newx.shape, cj.shape
    numSegs = int(len(ms)*0.0015)
    print "Numsegs: ", numSegs
#    x, msSm = SplitNSmooth(ms, numSegs, 5)