コード例 #1
0
# coding=utf-8
import pymysql
import urllib.request
import lxml.html
import json
import string
import sys
import time
import random
import re
import io
import os
sys.path.append('/home/hemaobin/workspace/stock')
import mysqldb
import analyzer
analyzer = analyzer.Analyzer()
basic_url = 'http://hq.sinajs.cn/list='
symbollist = [
    'sz000651', 'sz000333', 'sz300104', 'sz300415', 'sh601777', 'sz300051'
]
code_list = ['000651', '000333', '300104', '300415', '601777', '300051']
headers = {'User-Agent': 'gsi'}
stock = mysqldb.StockDatabase()
stock.connectdatabase()
cursor = stock.getcursor()
while True:
    i = 0
    for symbol in symbollist:
        url = basic_url + symbol
        print(url)
        time.sleep(1)
コード例 #2
0
ファイル: __init__.py プロジェクト: Me0w1ng/SDNalytics
def init(command=None):
    args = configure_cmdline(command)
    if command is None:
        command = args.command
    single = args.single
    configure_logging()
    logging.debug("Starting sdnalyzer.")

    configuration = None
    config_file_path = "/etc/sdnalytics/sdnalytics.json"

    if not os.path.isfile(config_file_path):
        print "Run sdn-ctl setup before executing sdn-observe or sdn-analyze"
        #"Copy sdnalyzer.default.json to " + config_file_path + " and adapt the file according to your setup. Then start this program again."
        return

    with open(config_file_path) as config_file:
        configuration = json.loads(config_file.read())
        if "connectionString" not in configuration:
            raise Exception(
                "No connection string configured in sdnalyzer.json.")

    store.start(configuration["connectionString"])
    if "api" in configuration:
        if "port" in configuration["api"]:
            api_port = int(configuration["api"]["port"])
        if "username" in configuration["api"]:
            api_username = configuration["api"]["username"]
        if "password" in configuration["api"]:
            api_password = configuration["api"]["password"]

    if command == "observe" or command == "analyze":
        command += "r"

    if command == "setup":
        store.init()
        print "Successfully setup the database. You can now use sdn-analyze and sdn-observe monitor your network."
    elif command == "reset":
        store.drop()
        store.init()
        print "Successfully reset the database. All previously gathered data has been discarded."
    elif command == "observer":
        program_state = start_api(command, api_username, api_password,
                                  api_port + 1)

        import observer
        poll_interval = 30
        if "pollInterval" in configuration:
            poll_interval = int(configuration["pollInterval"])

        if "controller" in configuration:
            if "host" in configuration["controller"]:
                controller_host = configuration["controller"]["host"]

            if "port" in configuration["controller"]:
                controller_port = configuration["controller"]["port"]

        program_state.instance = observer.Observer(controller_host,
                                                   controller_port)
        program_state.instance.observe(single, poll_interval, program_state)
    elif command == "analyzer":
        program_state = start_api(command, api_username, api_password,
                                  api_port + 2)
        import analyzer
        program_state.instance = analyzer.Analyzer()
        program_state.instance.analyze(single, program_state)
    elif command == "adhoc":
        import adhoc

        adhoc.run()
    else:
        logging.error("Invalid command {}.".format(command))
    logging.debug("Shut down.")
コード例 #3
0
ファイル: monitor.py プロジェクト: TakaR81/sm2020
def main():
    #各種ファイル名
    data_dir = "./data/"
    json_name = data_dir + "data.json"
    csv_name = data_dir + "score_list.csv"
    tweet7_json = data_dir + "tweet7.json"
    day7_json = data_dir + "day7.json"
    week7_json = data_dir + "week7.json"
    pn_dict = data_dir + "pn_ja.dic"
    setting_name = data_dir + "setting.txt"

    #変数
    prev_tweet_num = 10
    get_tweet_num_by_time = 3
    sum_score = 0

    #データ関係のファイルを消去
    remove_file_names = [
        json_name, csv_name, setting_name, tweet7_json, day7_json, week7_json
    ]
    removeFiles(remove_file_names)

    #フロント側の設定完了を待つ
    account_id, goal = waitSetting(setting_name)

    #各種オブジェクトを生成,初期設定
    #account オブジェクト
    user = account.Account(account_id)

    #dataManager オブジェクト
    dm = dataManager.DataManager()
    dm.setJson(json_name)
    dm.loadJson()
    dm.setCsv(csv_name)
    dm.loadCsv()

    #dataSaverオブジェクト
    ds = dataSaver.DataSaver(tweet7_json, day7_json, week7_json)

    #analyzer オブジェクト
    anlzr = analyzer.Analyzer()
    anlzr.loadPnDict(pn_dict)

    #監視の前にデータを取得しておく
    for tweet_id, tweet_info in user.getTimeline(prev_tweet_num, 1).items():
        date = tweet_info["date"]
        tweet_text = tweet_info["tweet"]
        mentions = tweet_info["mentions"]

        #score = anlzr.pnDictScore(tweet_text)
        score = anlzr.gcnlScore(tweet_text)

        sum_score += score
        high_score_words = []

        json_dict = {
            tweet_id: {
                "date": date,
                "tweet": tweet_text,
                "mentions": mentions,
                "score": score,
                "sum_score": sum_score,
                "high_score_words": high_score_words
            }
        }
        dm.updateDatabase(json_dict)

    date_score_list = dm.getCsv()
    ds.updateSubTotalJsons(date_score_list)

    while True:
        print("monitoring timeline...")
        time.sleep(10)
        for tweet_id, tweet_info in user.getTimeline(get_tweet_num_by_time,
                                                     1).items():
            date = tweet_info["date"]
            tweet_text = tweet_info["tweet"]
            mentions = tweet_info["mentions"]

            if dm.hasData(tweet_id):
                print("Attayo!")
                continue

            #score = anlzr.pnDictScore(tweet_text)
            score = anlzr.gcnlScore(tweet_text)

            sum_score += score
            high_score_words = []

            json_dict = {
                tweet_id: {
                    "date": date,
                    "tweet": tweet_text,
                    "mentions": mentions,
                    "score": score,
                    "sum_score": sum_score,
                    "high_score_words": high_score_words
                }
            }
            dm.updateDatabase(json_dict)

        date_score_list = dm.getCsv()
        ds.updateSubTotalJsons(date_score_list)
コード例 #4
0
ファイル: main.py プロジェクト: paradoxysm/EMGanalysis
        except ValueError as err:
            print("ValueError:", err)
            import_type = ""
        if import_type == 'q':
            print("Goodbye!")
            sys.exit()
        else:
            try:
                import_type = int(import_type)
            except ValueError as err:
                print("Seems like you picked something wrong try again!")
                import_type = ""
            else:
                # Attempt to instantiate an Analyzer with the requested Data Import Type
                if import_type <= len(modules) and import_type > 0:
                    import_type = modules[import_type - 1]
                    try:
                        analyzerObject = analyzer.Analyzer(import_type)
                    except ImportError as err:
                        print("ImportError:", err)
                        analyzerObject = None
                        import_type = ""
                    except Exception as err:
                        print("RuntimeError:", err)
                        sys.exit()
                else:
                    print("Seems like you picked something wrong try again!")
                    import_type = ""

    # Run the analyzer
    analyzerObject.run()
コード例 #5
0
            from requests.auth import HTTPBasicAuth
            ch = ChainAPI.ChainAPI()
            try:
                tmp = opts.chainAPI.split(":")
                user = tmp[0]
                tmp = tmp[1].split("@")
                pwd = tmp[0]
                url = "http://"+tmp[1]
                ch.connect(url, auth=HTTPBasicAuth(user,pwd))
                callable_objects.append(ch)
            except:
                App.error(0, "Error in parsing chainAPI URL: " + opts.chainAPI)
                quit()

        ### Load ANALYZER
        analyzer = Analyzer.Analyzer(opts.nn, callable_objects=callable_objects)

        callable_objects = []
        callable_objects.append(analyzer)

        ### Load Spectrum Visualizer
        if opts.show is True:
            import analyzer_vizualizer as tv
            vizu     = tv.TidzamVizualizer()
            callable_objects.append(vizu)

        ### Load Stream Player
        if opts.stream is not None:
            import input_audiofile as ca
            connector = ca.TidzamAudiofile(opts.stream,
                callable_objects = callable_objects,
コード例 #6
0
ファイル: main_window.py プロジェクト: sept-en/Presolar
    def __init__ (self, parent=None):
        super (QtGui.QWidget, self).__init__ (parent)

        # create analyzer
        self.analyzer = analyzer.Analyzer("../datasets.json")

        self.mainGrid = QtGui.QVBoxLayout ()
        
        self.commonTopPanel = QtGui.QVBoxLayout()
        self.basicAnalysisBox = QtGui.QVBoxLayout()
        self.advancedAnalysisBox = QtGui.QVBoxLayout()

        font = QtGui.QFont()
        font.setPointSize (16)

        """
        Basic tab
        """
        # first line : select country,city comboboxes and searchButton
        # combobox country
        self.standardSelectCountry = QtGui.QComboBox()
        self.standardSelectCountry.addItems (dataset.Dataset.getCountries (self.analyzer.datasets))
        self.standardSelectCountry.currentIndexChanged.connect (self.countryIndexChanged)
        self.standardSelectCountry.setFont (font)
        self.standardSelectCountry.setMaxVisibleItems (6)
        # combobox city
        self.standardSelectCity = QtGui.QComboBox()
        self.standardSelectCity.setEnabled (False)
        self.standardSelectCity.setFont (font)
        self.standardSelectCity.setMaxVisibleItems (6)
        # search button
        searchButton = QtGui.QPushButton ("Search")
        searchButton.clicked.connect (self.searchButtonClicked)
        searchButton.setFont (font)

        # create layout for first line
        userInputPanel = QtGui.QHBoxLayout()

        logoLbl = QtGui.QLabel()
        logoLbl.setPixmap (QtGui.QPixmap ("../presolar_logo_left.png"))
        logoLbl.setScaledContents (True)
        #logoLbl.setFixedSize (100, 80)
        # font
        fontSlogan = QtGui.QFont ("Railway")
        fontSlogan.setPixelSize (30)
        # color
        #slogan = QtGui.QLabel("\"Lux et Veritas\"")
        slogan = QtGui.QLabel("            ")
        slogan.setStyleSheet ("QLabel { color: rgb(66, 149, 175); }")
        slogan.setFont (fontSlogan)
        sloganHBox = QtGui.QHBoxLayout()
        sloganHBox.setSpacing (30)
        sloganHBox.addWidget (logoLbl)
        sloganHBox.addWidget (slogan)
        #userInputPanel.addWidget (logoLbl)
        userInputPanel.addWidget (self.standardSelectCountry)
        userInputPanel.addWidget (self.standardSelectCity)
        userInputPanel.addWidget (searchButton)

        self.basicResultBox = QtGui.QTextEdit()
        self.basicResultBox.setReadOnly (True)
        self.basicResultBox.setFont (font)
        basicLayout = QtGui.QHBoxLayout()
        basicLayout.addWidget (self.basicResultBox)

        # kw/ha
        kwhLbl = QtGui.QLabel("How much $ you spent every month on electricity?")
        self.energyOrder = QtGui.QLineEdit()
        kwhLayout = QtGui.QHBoxLayout()
        kwhLayout.addWidget(kwhLbl)
        kwhLayout.addWidget(self.energyOrder)

        self.commonTopPanel.addLayout (sloganHBox)
        self.commonTopPanel.addLayout (kwhLayout)
        self.commonTopPanel.addLayout (userInputPanel)
        self.basicAnalysisBox.addLayout (basicLayout)

        """
        Advanced tab
        """
        # Advanced cost of panel textedit and label
        self.advancedCostOfPanelEdit = QtGui.QLineEdit()
        advancedCostOfPanelLbl = QtGui.QLabel ("&Cost of panel($):")
        advancedCostOfPanelLbl.setBuddy (self.advancedCostOfPanelEdit)
        # Advanced power of panel and label
        self.advancedPowerOfPanelEdit = QtGui.QLineEdit()
        advancedPowerOfPanelLbl = QtGui.QLabel ("&Power of panel(Watts):")
        advancedPowerOfPanelLbl.setBuddy (self.advancedPowerOfPanelEdit)
        # Advanced count of panel textedit and label
        self.advancedCountOfPanelsEdit = QtGui.QLineEdit()
        advancedCountOfPanelsLbl = QtGui.QLabel ("&Quantity of panels:")
        advancedCountOfPanelsLbl.setBuddy (self.advancedCountOfPanelsEdit)
        
        # Create layout for advanced tab
        advancedCostLayout = QtGui.QVBoxLayout()
        advancedCostLayout.addWidget (advancedCostOfPanelLbl)
        advancedCostLayout.addWidget (self.advancedCostOfPanelEdit)
        advancedPowerLayout = QtGui.QVBoxLayout()
        advancedPowerLayout.addWidget (advancedPowerOfPanelLbl)
        advancedPowerLayout.addWidget (self.advancedPowerOfPanelEdit)
        advancedCountLayout = QtGui.QVBoxLayout()
        advancedCountLayout.addWidget (advancedCountOfPanelsLbl)
        advancedCountLayout.addWidget (self.advancedCountOfPanelsEdit)

        advancedTabInputLayout = QtGui.QHBoxLayout()
        advancedTabInputLayout.addLayout (advancedCostLayout)
        advancedTabInputLayout.addLayout (advancedPowerLayout)
        advancedTabInputLayout.addLayout (advancedCountLayout)
        self.advancedResultBox = QtGui.QTextEdit()
        self.advancedResultBox.setReadOnly (True)
        self.advancedResultBox.setFont (font)
        self.advancedAnalysisBox = QtGui.QVBoxLayout()
        self.advancedAnalysisBox.addLayout (advancedTabInputLayout)
        self.advancedAnalysisBox.addWidget (self.advancedResultBox)


        """
        Tab widget
        """
        # add tab widget layout
        self.tabWidget = QtGui.QTabWidget()
        basicWidget = QtGui.QWidget()
        basicWidget.setLayout (self.basicAnalysisBox)
        advancedWidget = QtGui.QWidget()
        advancedWidget.setLayout (self.advancedAnalysisBox)
        self.tabWidget.addTab (basicWidget, "Basic")
        self.tabWidget.addTab (advancedWidget, "Advanced")

        self.mainGrid.addLayout (self.commonTopPanel)
        self.mainGrid.addWidget (self.tabWidget)
        
        self.setWindowTitle ("Presolar")
        self.setFixedSize (800, 600)
        icon = QtGui.QIcon ("../presolar_logo.png")
        self.setWindowIcon (icon)
        self.setLayout (self.mainGrid)
コード例 #7
0
    #for beta in np.arange(1.0, 8.0, 1.0):
        #for l in range(4,Lmax,2):
        for l in [2, 4, 8, 16]:
            #l=Lmax
            print '-----------'
            print 'beta =', beta
            print 'l =', l
            sim = simulation.Simulation(beta,l,model)
            sim.run(N,4*N)
            sim.save(str(data_directory)+str(model)+'.L_'+str(l)+'beta_'+str(beta)+'.h5')
            sim.spinsToFile(str(data_directory)+str(model)+'.L_'+str(l)+'beta_'+str(beta)+'.csv')

    print('Accepted moves:', sim.accepted)
    
    
    analysis = analyzer.Analyzer(model=model)
    
    #==============================================================================
    # DATA ANALYSIS     
    #==============================================================================
    #how to calculate the Binder Ratio within Python:
    resultsDir = data_directory
    dataLocationPattern = data_directory+str(model)

    infiles=pyalps.getResultFiles(pattern=dataLocationPattern)

    data = pyalps.loadMeasurements(pyalps.getResultFiles(pattern=dataLocationPattern+'*'),['E','m^2', 'm^4'])
    m2 = pyalps.collectXY(data,x='BETA',y='m^2',foreach=['L'])
    m4 = pyalps.collectXY(data,x='BETA',y='m^4',foreach=['L'])
    E = pyalps.collectXY(data,x='BETA',y='E',foreach=['L'])
    
コード例 #8
0
        for file in files:
            cur_file = os.path.join(dirpath, file)
            print(f'[DEBUG]: checking file {cur_file}...')
            if file[-3:] == '.py':
                with open(cur_file, "r") as source:
                    try:
                        tree = ast.parse(source.read())
                    except SyntaxError:
                        print("[ERROR] something went wrotn!")

                    function_analyzer.visit(tree)
                    print(f'[DEBUG]: finished checking file {file}!')

    function_analyzer.report()
    print(f'[INFO] function parser has finished!\n\n')

    # check main python script with detected functions of interest
    # now we do perform static analysis on my script and also on imports
    print(f'[INFO] Initiating script parser...')
    analyzer = analyzer.Analyzer(set(function_analyzer.stats))
    try:
        with open(script, "r") as file:
            filename = os.path.basename(script)
            print(f'[INFO]: inspecting file {script}...')
            tree = ast.parse(file.read())
            analyzer.pre_visit(tree, filename, root)
            print(f'[INFO]: finished checking file {script}!')
            analyzer.report()
    except FileNotFoundError:
        print(f'[ERROR] Could not open file {script}')
コード例 #9
0
#!/usr/bin/env python3

import signal
import os
import configparser
import sys

import utilities
import analyzer

def main(*args, *kwargs):
    config = configparser.ConfigParser()

    config_path = config.read(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.ini'),
            encoding='utf-8')

    args = utilities.parse_args(config)

    if not config_path:
        print('Config path missing or invalid')
        sys.exit(0)

    analyzer_ = analyzer.Analyzer(argv=True)

    report = analyzer_.run_analyses()

if __name__ == '__main__':
    main()
コード例 #10
0
 def test_compute_score(self):
     analyz = analyzer.Analyzer()
     compare_data = pd.read_pickle("../assets/compute_score_test.pkl")
     assert_frame_equal(analyz.compute_score({"title": "Rift"}),
                        compare_data)
コード例 #11
0
def recursive_glob(rootdir='.', pattern='*'):
    return [
        os.path.join(rootdir, filename)
        for rootdir, dirnames, filenames in os.walk(rootdir)
        for filename in filenames if fnmatch.fnmatch(filename, pattern)
    ]


if __name__ == '__main__':
    if len(sys.argv) == 1:
        print 'usage: python computer.py folder1 folder2 ...'
        sys.exit(0)

    arg = sys.argv[1:]
    analyzer = az.Analyzer()

    # processed every book in folder
    for week in arg:
        analyzer.reset_cnt()
        notes = {}
        books = [x for x in recursive_glob(week, '*.pkl')]
        print books
        for b in books:
            l = pickle.load(open(b, 'rb'))
            bookname = os.path.basename(b).split('.')[0]
            print 'bookname: ', bookname
            notes[bookname] = defaultdict(list)
            for item in l:
                tokens, pos = item[0], item[1]
                pol_tmp = notes[bookname]['pol']
コード例 #12
0
 def test_get_score(self):
     analyz = analyzer.Analyzer()
     compare_data = pd.read_pickle("../assets/get_score_test.pkl")
     assert_series_equal(analyz.get_score("title", "Rift"), compare_data)
コード例 #13
0
 def test_load_set(self):
     analyz = analyzer.Analyzer()
     compare_data = pd.read_pickle("../assets/prepared_data_tests.pkl")
     assert_frame_equal(
         analyz.load_set("../assets/prepared_data_tests.pkl"), compare_data)
コード例 #14
0
def main(opt):

    # set twitter auth credentials
    auth = tweepy.OAuthHandler(opt.consumer_key, opt.consumer_secret)
    auth.set_access_token(opt.access_token_key, opt.access_token_secret)

    # get api instance
    if opt.https_proxy:
        api = tweepy.API(auth,
                         wait_on_rate_limit=True,
                         wait_on_rate_limit_notify=True,
                         proxy=opt.https_proxy)
    else:
        api = tweepy.API(auth,
                         wait_on_rate_limit=True,
                         wait_on_rate_limit_notify=True)

    # write_to_file, roll_size or roll_count
    write_to_file = True if opt.hdfs_path or opt.local_path else False

    # instantiate the analyzer
    analyzer = an.Analyzer()

    # override opt.roll_size for testing
    # opt.roll_size = 20480

    if write_to_file:
        tmp_tweet_dir = Util.TMP_DIR + '/' + Util.TWEETS
        tmp_wordcloud_dir = Util.TMP_DIR + '/' + Util.WORDCLOUD

        if not os.path.exists(tmp_tweet_dir):
            os.makedirs(tmp_tweet_dir)

        if not os.path.exists(tmp_wordcloud_dir):
            os.makedirs(tmp_wordcloud_dir)

        # create new hdfs paths
        if opt.hdfs_path:
            hadoopy.put(tmp_tweet_dir, opt.hdfs_path)
            hadoopy.put(tmp_wordcloud_dir, opt.hdfs_path)

        # create new local paths
        if opt.local_path:
            try:
                os.makedirs(opt.local_path + Util.TWEETS)
                os.makedirs(opt.local_path + Util.WORDCLOUD)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    raise e
                pass

    # join our keywords as a single query
    # query = ' OR '.join(opt.keywords)
    queries = [
        ' OR '.join(opt.keywords[i:i + 10])
        for i in xrange(0, len(opt.keywords), 10)
    ]

    for query in queries:

        file_closed = True

        # Cursor params
        # since_id=tweet_id, max_id=tweet_id, lang="en"
        # include_entities=True, rpp=100, count=1000
        if opt.since_tweet_id:
            cursor = tweepy.Cursor(api.search,
                                   q=query,
                                   result_type="recent",
                                   since_id=opt.since_tweet_id,
                                   rpp=100)
        else:
            cursor = tweepy.Cursor(api.search,
                                   q=query,
                                   result_type="recent",
                                   rpp=100)

        try:

            for tweet in cursor.items():

                tweet_obj, wordarray = Tweet.tweet_wordcloud_from_json(
                    tweet._json, analyzer)

                wordcloud_list = Wordcloud.list_from_array(
                    tweet_obj.tweet_id, wordarray)

                Util.vprint(opt.verbose,
                            "Tweet_id: " + str(tweet_obj.tweet_id))
                # print "Tweet_id: " + str(tweet_obj.tweet_id)

                # determine if we are flagged to write to file
                if write_to_file:

                    # start of loop
                    # fp will either return an existing .tmp file or open a new one
                    # bytes_written will be automatically set to zero
                    # if new file created
                    if file_closed:
                        now = datetime.utcnow()
                        fp_tweets = open(
                            tmp_tweet_dir + '/' +
                            now.strftime('%Y-%m-%dT%H.%M.%SZ') + '.tmp', 'a')
                        fp_wordcloud = open(
                            tmp_wordcloud_dir + '/' +
                            now.strftime('%Y-%m-%dT%H.%M.%SZ') + '.tmp', 'a')
                        bytes_written = 0
                        print "Create new temporary file to write to: " + fp_tweets.name[
                            -24:]

                    bytes_written += Tweet.write_to_file(tweet_obj, fp_tweets)
                    Wordcloud.write_to_file(wordcloud_list, fp_wordcloud)
                    file_closed = False
                    Util.vprint(opt.verbose,
                                "bytes_written: " + str(bytes_written))

                    # close the file if reached limit
                    # rename (remove .tmp) and move to specified local / HDFS path
                    if (bytes_written >= opt.roll_size):
                        __close_tmp_mv(fp_tweets, fp_wordcloud, opt.hdfs_path,
                                       opt.local_path)
                        file_closed = True

            print "Finished searching tweets for queries: "
            print query

        except tweepy.error.TweepError as te:
            print "Tweepy throws error"
            print te.reason
            print te.response

        except (KeyboardInterrupt, SystemExit):
            if write_to_file and not file_closed:
                print "Closing temporary files"
                fp_tweets.close()
                fp_wordcloud.close()
                __cleanup_tmp_dir(tmp_tweet_dir, tmp_wordcloud_dir,
                                  opt.hdfs_path, opt.local_path)

        # post loop
        # close the file, just in case it is not closed within the loop
        finally:
            if write_to_file and not file_closed:
                print "Closing temporary files"
                fp_tweets.close()
                fp_wordcloud.close()
                __cleanup_tmp_dir(tmp_tweet_dir, tmp_wordcloud_dir,
                                  opt.hdfs_path, opt.local_path)
                file_closed = True

    if write_to_file and not file_closed:
        print "Closing temporary files"
        fp_tweets.close()
        fp_wordcloud.close()
        __cleanup_tmp_dir(tmp_tweet_dir, tmp_wordcloud_dir, opt.hdfs_path,
                          opt.local_path)

    print "Ending tweet searching"
コード例 #15
0
def main():
    if len(sys.argv) < 2:
        print("Path to configuration file wasn't specified. Exiting")
        exit(1)

    config = c.Configuration(sys.argv[1])

    repo = r.Repository(config.getRepo(), config)
    if repo.checkIfExists() == True:
        print("Updating repository " + repo.extractRepositoryName())
        repo.Pull()
    else:
        print("Cloning repository: " + repo.extractRepositoryName())
        repo.Clone()

    qaRepo = r.Repository(config.getQA(), config)
    if config.getRepo() != config.getQA():
        if qaRepo.checkIfExists() == True:
            print("Updating repository " + qaRepo.extractRepositoryName())
            qaRepo.Pull()
        else:
            print("Cloning repository: " + qaRepo.extractRepositoryName())
            qaRepo.Clone()
    else:
        print("Skipping QA repository: it's the same as test repo")

    if not u.CheckRepoPathExists(config, repo, config.getPath()):
        print("Configured directory " + config.getPath() +
              " wasn't found in test repository. Aborting")
        exit(21)

    if not u.CheckRepoPathExists(config, qaRepo, config.getQAPath()):
        print("Configured directory " + config.getQAPath() +
              " wasn't found in test repository. Aborting")
        exit(22)

    # Workflow starts here

    gh = 0

    try:
        gh = g.GitHub(config.getPrivateKey(), config.getAppID())
        gh.Auth()
    except ValueError as err:
        print("GitHub auth failed: " + str(err))
        exit(101)

    ghUser = ''
    ghOrg = ''
    ghType = ''
    installation_id = 0

    for user in config.getUsers():
        installation_id = gh.CheckUserInstallation(user)
        ghUser = user
        ghType = 'user'
        break

    for org in config.getOrgs():
        installation_id = gh.CheckOrgInstallation(org)
        ghOrg = org
        ghType = 'org'
        break

    ghTitle = ''
    if ghType == 'user':
        ghTitle = ghUser
    else:
        ghTitle = ghOrg

    if installation_id == 0:
        print("Couldn't get installation for " + ghType + " " + ghTitle)
        exit(210)

    installation_id = gh.CheckRepoInstallation('crioto', 'qa-org')

    print("Found installation ID for " + ghTitle + ": " + str(installation_id))
    gh.AuthInstallation(installation_id)

    print(gh.GetIssues('crioto', 'qa-org'))

    gh.CreateIssue('crioto', 'qa-org', 'Test Title', 'Test Text', '')
    print(gh.GetIssues('crioto', 'qa-org'))

    # gh = g.InitializeGithub(config.getToken())
    # user = gh.get_user()
    # print(user)

    exit(0)

    builder = b.Builder(
        os.path.join(config.getLocalPath(), qaRepo.extractRepositoryName(),
                     config.getQAPath()))
    builder.Run()

    issues = builder.Get()
    tags = []
    for issue in issues:
        tags.append(issue.GetAbsoluteHandle())

    analyzer = a.Analyzer(
        os.path.join(config.getLocalPath(), repo.extractRepositoryName(),
                     config.getPath()), tags)
    analyzer.Run()

    covered = analyzer.GetMatches()
コード例 #16
0
 def setUp(self):
     self.sm_analyzer = analyzer.Analyzer("samples/sample.xml",
                                          "samples/rules_sample.xml")
コード例 #17
0
ファイル: go.py プロジェクト: ranapat/as3parser
import sys
import analyzer
import parser
import settings

if len(sys.argv) == 3:

    settings = settings.Settings()
    
    parser = parser.Parser()
    #parser.cache_from(settings.class_data)
    
    analyzer = analyzer.Analyzer(parser)
    print analyzer.guess(sys.argv[1], sys.argv[2])
else:
    print "nothing"
コード例 #18
0
    if stream_tools.StreamTools.stream_timer() == False:
        logging.info('Connection is closed Day has ended')
        pass
    else:
        ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})


def on_error(ws, error):
    pass


def on_message(ws, message):

    logging.info('--------------------------------------')
    print('-------------------')
    while dp.load(message, ticker) == True:
        dp.run()
        strategy = strategy_factory.get_strategy(dp._market_analyzer())
        strategy.run()
        metrics = dp.metrics()
        break


if __name__ == '__main__':

    ticker = 'TSLA'
    socket = "wss://alpaca.socket.polygon.io/stocks"
    dp = analyzer.Analyzer()
    web_socket_start()