Exemple #1
0
 def __init__(self, pgm, logfile, data=None):
     Analysis.__init__(self, pgm, logfile)
     self.data = data
     if data == None:
         self.protos = None
     else:
         self.protos = data.protos
Exemple #2
0
def demo(code='N225',
         name='日経平均株価',
         start='2014-01-01',
         days=240,
         csvfile=os.path.join(os.path.dirname(
             os.path.abspath(__file__)),
             '..',
             'test',
             'stock_N225.csv'),
         update=False):

    # Handling ti object example.
    io = FileIO()
    stock_d = io.read_from_csv(code,
                               csvfile)
    ti = TechnicalIndicators(stock_d)
    ti.calc_ret_index()

    print(ti.stock['ret_index'].tail(10))
    io.save_data(io.merge_df(stock_d, ti.stock),
                 code, 'demo_')

    # Run analysis code example.
    analysis = Analysis(code=code,
                        name=name,
                        start=start,
                        days=days,
                        csvfile=csvfile,
                        update=True)
    return analysis.run()
Exemple #3
0
    def __init__(self, config):

        Analysis.__init__(self, config)

        self.share_specific_calculations()

        self.generate_unique_ids()

        self.pcwg_share_metrics_calc()
def get_data():
    result = pandas.DataFrame()
    for file in Path(CLOUD).iterdir():
        location = CLOUD + '\\' + file.name
        if result.empty:
            result = Analysis.create_df(location)
        else:
            result = pandas.concat([result, Analysis.create_df(location)], ignore_index=True)
    return result
Exemple #5
0
def main():
    from optparse import OptionParser
    usage = "usage: %prog [options] arg"
    parser = OptionParser(usage)
    parser.add_option("-c", "--code", dest="stockcode",
                      help="stock code")
    parser.add_option("-n", "--name", dest="stockname",
                      help="stock name")
    parser.add_option("-s", "--stock", dest="stocktxt",
                      help="read scraping stock names from text file")
    parser.add_option("-r", "--readfile", dest="csvfile",
                      help="read stock data from csv file")
    parser.add_option("-u", "--update",
                      help="update csvfile (overwirte)",
                      action="store_true", dest="update")
    parser.add_option("-d", "--date", dest="startdate",
                      help="specify start date as '2014-10-01'")
    parser.add_option("-y", "--days", dest="days",
                      help="plot days as '240', specify 0 for all days")
    parser.add_option("-a", "--axis", dest="axis",
                      help="setting y-axis limit (1 or 2, default 2)")
    parser.add_option("-p", "--complexity", dest="complexity",
                      help="complexity of chart (1-3, default 3)")
    (options, args) = parser.parse_args()

    if len(args) != 0:
        parser.error("incorrect number of arguments")

    if options.days is None:
        options.days = 240

    if options.axis is None:
        options.axis = 2

    if options.complexity is None:
        options.complexity = 3

    if options.stocktxt:
        return read_csv(filename=options.stocktxt,
                        start=options.startdate,
                        days=int(options.days),
                        update=options.update,
                        axis=int(options.axis),
                        complexity=int(options.complexity))
    else:
        analysis = Analysis(code=options.stockcode,
                            name=options.stockname,
                            fullname=options.stockname,
                            start=options.startdate,
                            days=int(options.days),
                            csvfile=options.csvfile,
                            update=options.update,
                            axis=int(options.axis),
                            complexity=int(options.complexity))
        return analysis.run()
Exemple #6
0
    def __init__(self, analysis_config):

        Analysis.__init__(self, analysis_config)

        self.calculateBase()

        self.powerCurveSensitivityResults = {}
        self.powerCurveSensitivityVariationMetrics = pd.DataFrame(columns = ['Power Curve Variation Metric'])

    	self.calculate_sensitivity_analysis()
        self.calculate_scatter_metric()
Exemple #7
0
def twitter_callback(tweet):
    """Analyzes Trump tweets, makes stock trades, and sends tweet alerts."""

    # Initialize these here to create separate httplib2 instances per thread.
    analysis = Analysis(logs_to_cloud=LOGS_TO_CLOUD)
    trading = Trading(logs_to_cloud=LOGS_TO_CLOUD)

    companies = analysis.find_companies(tweet)
    logs.debug("Using companies: %s" % companies)
    if companies:
        trading.make_trades(companies)
        twitter.tweet(companies, tweet)
    def generate_features_pool(self):
        """ generate train and test files for classification """

        from analysis import Analysis
        from dir_processing import DirProcessing
        from file_op import FileOp

        landmarks_urls_list, features = Analysis.get_topic_proportions_for_every_image()

        subsets_dict = self.divide_persons_into_subsets()
        
        for i in range(0, len(landmarks_urls_list)):
            landmarks_url = landmarks_urls_list[i]
            label_url = DirProcessing.get_label_url_from_landmarks_url(landmarks_url)
            loc = DirProcessing.get_location_from_sequence(landmarks_url, 3)

            if label_url and loc != "MIDDLE":
                person_id, perform_id, index_id = DirProcessing.get_id_from_label_url(label_url)
                subset_id = subsets_dict[person_id]
                feature = features[i, :]

                if loc == "START":
                    label = 0
                else:
                    label = FileOp.read_label_file(label_url)

                self.features_pool[subset_id].append(feature)
                self.labels_pool[subset_id].append(label)
                self.urls_pool[subset_id].append(landmarks_url)

        print "Features pools have been generated. "
	def set_data(self,ev,ev_id,beats,geo,seconds,unif,poly): 
		event = Event(ev)
		self.event_id = ev_id
		#print "Muons: " +str(event.muons)
		self.anal = Analysis(event,beats,geo,seconds,unif,poly)
		self.data = self.anal.event
		self.unif = unif
Exemple #10
0
    def load_dataset(self, dataset_config, analysis_config):

        power_filter = Filter(True, dataset_config.power, 'Below', False, 0.0)

        dataset_config.filters.append(power_filter)

        return Analysis.load_dataset(self, dataset_config, analysis_config)
Exemple #11
0
 def analyses(self):
     if len(self.analysis_list) == 0:
         nodes = self.xml_node.xpath("analyses/analysis")
         for node in nodes:
             a = Analysis(node)
             self.analysis_list.append(a)
     return self.analysis_list
Exemple #12
0
def test_run(code='N225',
             name='日経平均株価',
             start='2014-01-01',
             days=180,
             csvfile=os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                  '..', 'test', 'stock_N225.csv'),
             update=False,
             axis=2,
             complexity=3):

    analysis = Analysis(code=code,
                        name=name,
                        start=start,
                        days=days,
                        csvfile=csvfile,
                        update=update)
    ti = analysis.run()

    eq_('N225', analysis.code)
    eq_('日経平均株価', analysis.name)
    eq_('2014-01-01', analysis.start)
    eq_(-180, analysis.minus_days)
    eq_('stock_N225.csv', os.path.basename(analysis.csvfile))
    eq_(False, analysis.update)
    eq_('clf_N225.pickle', analysis.clffile)

    expected = 18791.39
    result = round(ti.stock.ix['2015-03-20', 'sma25'], 2)
    eq_(expected, result)

    filename = 'ti_N225.csv'
    expected = False
    eq_(expected, os.path.exists(filename))
    if os.path.exists(filename):
        os.remove(filename)

    filename = 'chart_N225.png'
    expected = True
    eq_(expected, os.path.exists(filename))
    if os.path.exists(filename):
        os.remove(filename)

    stock_length = len(ti.stock)
    expected = 180
    eq_(expected, stock_length)

    return result
Exemple #13
0
def load_data(X='all', S='all'):
    from analysis import NLF, LF
    X = '[0-9]*' if X=='all' else X
    S = '[0-9]*' if S=='all' else S
    
    cw_markers = sort_markers(Analysis.list_markers('CW.{}.{}\.dat'.format(X,S)))
    ccw_markers= sort_markers(Analysis.list_markers('CCW.{}.{}\.dat'.format(X,S)))

    acw = []; accw = []
    for marker in cw_markers:
        acw.append(Analysis.load(marker, NLF['TRPSPI'], NLF['TRPRAY'], NLF['PRAY'], NLF['PSPI']))
        acw[-1].gauss = LF['COL']('{}GAUSS{}.in'.format(acw[-1].data_dir, marker[2:]))
    for marker in ccw_markers:
        accw.append(Analysis.load(marker, NLF['TRPSPI'], NLF['TRPRAY'], NLF['PRAY'], NLF['PSPI']))
    for i, a in enumerate(accw):
        a.gauss = np.flip(acw[i].gauss, 0)
    return acw, accw
Exemple #14
0
    def __init__(self, analysis_config, baseLineMode):

        self.basePower = "Base Power"
        self.baseLineMode = baseLineMode

        Status.add("Baseline Mode: %s" % self.baseLineMode)

        Analysis.__init__(self, analysis_config)

        self.calculateBase()

        self.calculateHubBenchmark()
        self.calculateREWSBenchmark()
        self.calculateTurbRenormBenchmark()
        self.calculationCombinedBenchmark()
        self.calculatePowerDeviationMatrixBenchmark()
        self.calculateProductionByHeightBenchmark()
Exemple #15
0
    def get_base_filter(self):

        if self.baseLineMode == "Hub":
            return self.dataFrame[self.inputHubWindSpeed].notnull()
        elif self.baseLineMode == "Measured":
            return Analysis.get_base_filter(self)
        else:
            raise Exception("Unrecognised baseline mode: %s" % self.baseLineMode)
Exemple #16
0
    def __init__(self, analysis_config, baseLineMode):

        self.basePower = "Base Power"
        self.baseLineMode = baseLineMode

        Status.add("Baseline Mode: %s" % self.baseLineMode)

        Analysis.__init__(self, analysis_config)

        self.calculateBase()

        self.calculateHubBenchmark()
        self.calculateREWSBenchmark()
        self.calculateTurbRenormBenchmark()
        self.calculationCombinedBenchmark()
        self.calculatePowerDeviationMatrixBenchmark()
        self.calculateProductionByHeightBenchmark()
    def __init__(self, marker, full=True):
        TBL_TYPE = [('turn', int), ('pid', int)] + MU_TYPE
        stune_loader = lambda filename: np.loadtxt(
            filename, dtype=TBL_TYPE, skiprows=1)
        trdata_loader = lambda filename: LF['TBL'](filename)
        NLF['TRPSPI'] = ('TRPSPI', trdata_loader)
        NLF['TRPRAY'] = ('TRPRAY', trdata_loader)
        Analysis.fetch_from(SMPAnalysis.data_dir)
        loaders = [NLF['PRAY'], NLF['TRPSPI']]
        if full:
            loaders += [NLF['TRPRAY'], ('STUNEE', stune_loader)]
        obj = Analysis.load(marker, *loaders)
        super().__init__(**obj)
        self.TOF = 1e-6
        self.centroid = Centroid(self.pray['X'].mean(), self.pray['Y'].mean(),
                                 self.pray['D'].mean())

        self.__comp_pol()
def testImagesAnalyze():
    print('Analysis started')
    for image in tqdm(testImages):
        a = Analysis(image, 'encryptedLSB/' + testImages.get(image) + '.tiff')
        lsbData[testImages.get(image)] = [
            a.mse * 10, a.correlation, a.ssim, a.fusion / 10
        ]

        a = Analysis(image,
                     'KOHONENencrypted/' + testImages.get(image) + '.tiff')
        kohonenData[testImages.get(image)] = [
            a.mse, a.correlation, a.ssim, a.fusion
        ]

    kohonenAnalysisData = a.normalize(kohonenData)
    print(lsbData)
    print(kohonenAnalysisData)
    barCharts(['baboon', 'airplane', 'peppers', 'earth'])
Exemple #19
0
    def run(self):
        base_preprocessor = Preprocessor(self.basefile)
        base_preprocessed_image_stack = base_preprocessor.preprocess_basefile()
        arc_preprocessor = Preprocessor(self.arcfile)
        arc_preprocessed_image_stack = arc_preprocessor.preprocess_arcfile()

        x, y, z = self.cell_aggragation_shape
        segmenter = Segmenter(base_preprocessed_image_stack,
                              self.create_np_ellipsoid(x, y, z),
                              "WS",
                              generate_debugging=self.enable_debugging)
        segmented_image_stack = segmenter.run_segmentation()

        analysis = Analysis(segmented_image_stack,
                            arc_preprocessed_image_stack)
        analysis.generate_report()
        colorized_image_stack = analysis.colorize_overlapping_cells()
        self.save_image_stack(colorized_image_stack)
def main():
    settings = Settings()
    settings.Initalize_Global_Settings()

    preprocess = Preprocess(settings)
    preprocess.Load_Into_Dataframes()

    analysis = Analysis(preprocess)
    experiments = Experiments(analysis)

    data = analysis.Core(experiments)
    data_experimentals = experiments.Run_Experiments()

    models, best_fit, gals_df = analysis.Mocks_And_Models(experiments)

    plotting = Plotting(preprocess)
    plotting.Plot_Core(data, models, best_fit)
    plotting.Plot_Experiments(data, data_experimentals, models, best_fit)
def store(data, date):
    start_date = date - datetime.timedelta(weeks=1)  # get the date from a week ago

    week_ago = Analysis.date_to_str(start_date)
    current_time = pandas.Timestamp(date.hour, date.minute, date.second)  # time is the same a week ago as now


    # store data from within a week in a pickle
    data[(data['Date'] > week_ago) | ((data['Date'] == week_ago) & (data['Time'] > current_time))].to_pickle(DATASTORE)
Exemple #22
0
    def __init__(self, analysis_config, baseLineMode):

        self.basePower = "Base Power"
        self.baseLineMode = baseLineMode

        Status.add("Baseline Mode: %s" % self.baseLineMode)

        Analysis.__init__(self, analysis_config)

        self.calculateBase()

        self.calculateHubBenchmark()
        self.calculateREWSBenchmark()
        self.calculateTurbRenormBenchmark()
        self.calculationCombinedBenchmark()
        self.calculatePowerDeviationMatrixBenchmark()

        self.dataFrame.to_csv("Debug.dat")
Exemple #23
0
def read_csv(filename, start, days, update, axis, complexity):
    stocks = pd.read_csv(filename, header=None)
    reference = []
    for s in stocks.values:
        code = str(s[0])
        analysis = Analysis(code=code,
                            name=s[1],
                            start=start,
                            days=int(days),
                            csvfile="".join(['stock_', str(s[0]), '.csv']),
                            update=True,
                            reference=reference,
                            axis=int(axis),
                            complexity=int(complexity))
        result = analysis.run()
        if code == "N225" and result:
            reference = result.stock_raw['Adj Close']
    return result
Exemple #24
0
    def get_end_results(self):
        # get optimized pulse and propagation

        # get and save inter vects

        self.anly = Analysis(self.sys_para, self.tfs.final_state,
                             self.tfs.ops_weight, self.tfs.unitary_scale,
                             self.tfs.inter_vecs)
        self.save_data()
        self.display()
        if not self.show_plots:
            self.conv.save_evol(self.anly)

        self.uks = self.Get_uks()
        if not self.sys_para.state_transfer:
            self.Uf = self.anly.get_final_state()
        else:
            self.Uf = []
    def get_analysis(self,analyzed_model):
        for i in self.analysis_model_list:
            if i.model == analyzed_model:
                return i

        analysis_model = Analysis(analyzed_model)
        self.analysis_model_list.append(analysis_model)

        return analysis_model
Exemple #26
0
def test_run(
    code="N225",
    name="日経平均株価",
    start="2014-01-01",
    days=180,
    csvfile=os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "test", "stock_N225.csv"),
    update=False,
    axis=2,
    complexity=3,
    reference=[],
):

    analysis = Analysis(code=code, name=name, start=start, days=days, csvfile=csvfile, update=update)
    ti = analysis.run()

    eq_("N225", analysis.code)
    eq_("日経平均株価", analysis.name)
    eq_("2014-01-01", analysis.start)
    eq_(-180, analysis.days)
    eq_("stock_N225.csv", os.path.basename(analysis.csvfile))
    eq_(False, analysis.update)
    eq_("clf_N225.pickle", analysis.clffile)

    expected = 18791.39
    result = round(ti.stock.ix["2015-03-20", "sma25"], 2)
    eq_(expected, result)

    filename = "ti_N225.csv"
    expected = True
    eq_(expected, os.path.exists(filename))
    if os.path.exists(filename):
        os.remove(filename)

    filename = "chart_N225.png"
    expected = True
    eq_(expected, os.path.exists(filename))
    if os.path.exists(filename):
        os.remove(filename)

    stock_length = len(ti.stock)
    expected = 180
    eq_(expected, stock_length)

    return result
def n_players_plots(alg_list, noise=0.1, ranking=Copeland, horizon=1000):
	alg_names = [str(alg(0, ranking)) for alg in alg_list]
	print('alg_names', alg_names)
	players = [1000,200,100,40,20,10] # [20,18,16,14,12,10]
	n_trials = [1, 10, 10, 30, 30,30] # [10,10,10,10,10,10] #
	cum_regs = np.zeros((len(players), len(alg_list)))
	for num_players in players:
		ds = GenDataset
		analysis = Analysis(alg_list, dataset=ds, name=str(ds(0,0))+'_noise'+str(noise)+'_Copeland', ranking_procedure=ranking, noise=noise, n_trials = n_trials[players.index(num_players)], n_players=num_players, horizon=horizon)
		cum_reg = analysis.get_cum_regret_n(t=horizon-1)
		cum_regs[players.index(num_players),:] = cum_reg
		print('cum_reg', cum_regs)
		with open('players_test_'+str(players.index(num_players))+'.txt','w') as f:
			f.write(str(cum_regs.tolist()))
		f.close()
		
	plot_n_cumregrets(cum_regs, alg_names, players)
	plot_n_cumregrets(cum_regs, alg_names, players, axis='log')
	plot_n_cumregrets(cum_regs, alg_names, players, axis='loglog')
Exemple #28
0
    def selectPowerCurve(self, powerCurveMode):

        if powerCurveMode == "InnerTurbulenceMeasured":

            if self.hasActualPower:
                return self.innerTurbulenceMeasuredPowerCurve
            else:
                raise Exception("Cannot use inner measured power curvve: Power data not specified")

        elif powerCurveMode == "OuterTurbulenceMeasured":

            if self.hasActualPower:
                return self.outerTurbulenceMeasuredPowerCurve
            else:
                raise Exception("Cannot use outer measured power curvve: Power data not specified")

        else:

            Analysis.selectPowerCurve(self, powerCurveMode)
Exemple #29
0
class SimpleAnalysisTestCase(unittest.TestCase):
  
  def setUp(self):
    data={ 'ANALYSIS_GROUP':'BLUEPRINT',   'ALIGNMENT_SOFTWARE':'BWA',  'ALIGNMENT_SOFTWARE_VERSION':'0.7.7', 
                        'ANALYSIS_SOFTWARE':'MACS2','ANALYSIS_SOFTWARE_VERSION':'2.0.10.20131216'}
    self.analysis = Analysis( metadata=data ) 
  
  def test_analysis_metadata(self):
    analysis_data = self.analysis.get_analysis_data()
    self.assertEqual('BWA', analysis_data['alignment_software'])
Exemple #30
0
def test5():
    symbol_list = []
    lines = open(config.TRADABLE_STOCKS, 'r').read().split('\n')
    for line in lines:
        if len(line) > 0:
            symbol_list.append(line)

    #symbol_list = ['CSCO']

    q = Quote()
    a = Analysis()
    p = RenkoPatterns()
    spy_df = q.get('spy', 'google')

    for sym in symbol_list:
        df = q.get(sym, 'google')
        if df is not None:
            a.analysis(sym, df, spy_df)
            df.to_csv(DATA_DIR + sym + '.csv')
Exemple #31
0
def main():

    # Step1: 读取底层网络和虚拟网络请求文件
    network_files_dir = 'networks/'
    sub_filename = 'subsj.txt'
    sub = Substrate(network_files_dir, sub_filename)
    event_queue1 = simulate_events_one('VNRequest/', 2000)

    # Step2: 选择映射算法
    algorithm = 'mcts'

    # Step3: 处理虚拟网络请求事件
    start = time.time()
    sub.handle(event_queue1, algorithm)
    time_cost = time.time() - start
    print(time_cost)

    # Step4: 输出映射结果文件
    tool = Analysis()
    tool.save_result(sub, '%s-VNE-0320-sj.txt' % algorithm)
    def fun_analyze(self):
        device, chn_type, mode, window_size, window_lag, norm_min, norm_max = self._read_input()
        IP, port = self._read_osc()
        try:
            # starting analysis
            self.analysis = Analysis(discovery=self.discovery, mode=mode, chn_type=chn_type,
                                     corr_params=self.conn_params, OSC_params=[IP, port],
                                     window_params=[float(window_size), None],  # baseline lag not implemented
                                     norm_params=[float(norm_min), float(norm_max)])
            self.analysis.start()
            # starting output analysis
            # self.outputAnalysis = OutputAnalysis(self.analysis)
            # self.outputAnalysis.start()
            # update state variable and buttons
            self.analysis_running = True
            self.btn_stop.setEnabled(True)
            self._enableEdit(False)

        except Exception as e:
            self.param_check.setText("Error message: " + str(e))
Exemple #33
0
    def getRecentMonthCateStatistic(self, recentNumber):
        year = datetime.date.today().year
        month = datetime.date.today().month
        sumPerc, sumCost = Statistic().getRecentMonthCateStatistic(
            recentNumber)
        allCate = Analysis().getAllCategoryAndId()
        csvStr = ''
        csvStr += '分类'
        csvPerc = ''
        csvPerc += '分类'
        #输出表头
        for i in range(0, recentNumber):
            thisMonth = month - i
            if thisMonth < 1:
                thisMonth = 12 + thisMonth
                thisYear = year - 1
            else:
                thisYear = year
            csvPerc += ",%s-%s(%%)" % (thisYear, thisMonth)
            csvStr += ",%s-%s(¥)" % (thisYear, thisMonth)
        csvStr += "\n"
        csvPerc += "\n"
        #手动添加总计分类
        csvStr += '总计'
        for i in range(0, recentNumber):
            thisMonth = month - i
            if thisMonth < 1:
                thisMonth = 12 + thisMonth
            csvStr += ',' + str(sumCost['总计'][thisMonth])
        csvStr += "\n"

        for cate in allCate:
            #循环分类
            csvStr += "%s" % cate['name']
            csvPerc += "%s" % cate['name']
            for i in range(0, recentNumber):
                #循环月份
                thisMonth = month - i
                if thisMonth < 1:
                    thisMonth = 12 + thisMonth

                # print precent of cate
                try:
                    csvPerc += ',' + sumPerc[cate['name']][thisMonth] + '%'
                except KeyError:
                    csvPerc += ',0%'
                # print cost of cate
                try:
                    csvStr += ',' + str(sumCost[cate['name']][thisMonth])
                except KeyError:
                    csvStr += ',0'
            csvStr += '\n'
            csvPerc += "\n"
        return csvStr + '\n' + csvPerc
Exemple #34
0
def read_csv(filename, start, days, update, axis, complexity):
    stocks = pd.read_csv(filename, header=None)
    reference = []
    for s in stocks.values:
        code = str(s[0])
        analysis = Analysis(code=code,
                            name=s[1],
                            fullname=s[2],
                            start=start,
                            days=int(days),
                            csvfile="".join(['stock_',
                                             str(s[0]), '.csv']),
                            update=True,
                            reference=reference,
                            axis=int(axis),
                            complexity=int(complexity))
        result = analysis.run()
        if code == "N225" and result:
            reference = result.stock_raw['Adj Close']
    return result
Exemple #35
0
    def __init__(self, analysis_config):

        Analysis.__init__(self, analysis_config)

        if self.hasActualPower:

            Status.add("Calculating actual power curves...")

            self.innerTurbulenceMeasuredPowerCurve = self.calculateMeasuredPowerCurve(2, self.cutInWindSpeed, self.cutOutWindSpeed, self.ratedPower, self.actualPower, 'Inner Turbulence')
            self.outerTurbulenceMeasuredPowerCurve = self.calculateMeasuredPowerCurve(2, self.cutInWindSpeed, self.cutOutWindSpeed, self.ratedPower, self.actualPower, 'Outer Turbulence')

        if self.rewsActive and self.rewsDefined:

            if self.hasShear: self.rewsMatrixInnerShear = self.calculateREWSMatrix(3)
            if self.hasShear: self.rewsMatrixOuterShear = self.calculateREWSMatrix(6)

            Status.add("Actual Power Curves Complete.")

        self.calculateBase()
        self.calculate_additional_power_deviation_matrices()
def start_software(size):
    """
    Creates all the classes and tells the classes how to communicate with
    each other
    Given screen 'size' as (x,y) tuple and gives it to view
    """
    pygame.init()

    #initializes model, view, controller
    model = CircuitModel()
    view = PyGameWindowView(size)
    controller = Controller()
    analysis = Analysis()

    #gives model, view, controller references to the other ones
    model.view = view
    model.controller = controller
    model.analysis = analysis

    view.model = model
    view.controller = controller

    controller.model = model
    controller.view = view

    analysis.model = model

    #runs software
    running = True

    while running:
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                running = False
            controller.handle_event(event)

        model.update()
        view.draw()
        controller.update()

    pygame.quit()
Exemple #37
0
    def twitter_callback(self, tweet):
        """Analyzes Trump tweets, trades stocks, and tweets about it."""

        # Initialize the Analysis, Logs, Trading, and Twitter instances inside
        # the callback to create separate httplib2 instances per thread.
        analysis = Analysis(logs_to_cloud=LOGS_TO_CLOUD)
        logs = Logs(name="main-callback", to_cloud=LOGS_TO_CLOUD)

        # Analyze the tweet.
        companies = analysis.find_companies(tweet)
        logs.info("Using companies: %s" % companies)
        if not companies:
            return

        # Trade stocks.
        trading = Trading(logs_to_cloud=LOGS_TO_CLOUD)
        # trading.make_trades(companies)

        # Tweet about it.
        twitter = Twitter(logs_to_cloud=LOGS_TO_CLOUD)
        twitter.tweet(companies, tweet)
Exemple #38
0
    def get_base_filter(self):

        base_filter = Analysis.get_base_filter(self)

        if self.baseLineMode == "Hub":
            return base_filter & self.dataFrame[
                self.inputHubWindSpeed].notnull()
        elif self.baseLineMode == "Measured":
            return base_filter
        else:
            raise Exception("Unrecognised baseline mode: %s" %
                            self.baseLineMode)
Exemple #39
0
    def twitter_callback(self, tweet):
        """Analyzes Trump tweets, trades stocks, and tweets about it."""

        # Initialize the Analysis, Logs, Trading, and Twitter instances inside
        # the callback to create separate httplib2 instances per thread.
        analysis = Analysis(logs_to_cloud=LOGS_TO_CLOUD)
        logs = Logs(name="main-callback", to_cloud=LOGS_TO_CLOUD)

        # Analyze the tweet.
        companies = analysis.find_companies(tweet)
        logs.info("Using companies: %s" % companies)
        if not companies:
            return

        # Trade stocks.
        trading = Trading(logs_to_cloud=LOGS_TO_CLOUD)
        trading.make_trades(companies)

        # Tweet about it.
        twitter = Twitter(logs_to_cloud=LOGS_TO_CLOUD)
        twitter.tweet(companies, tweet)
def main():

	papers = read_file(sys.argv[1])
	analysis = Analysis()

	column_labels = papers[0]
	data = papers[1:]

	write_json('confCount.json',analysis.per_conf_crawler_count(data))
	write_json('confUserInt.json',analysis.per_conf_user_interactions_count(data))
	write_json('yearCount.json',analysis.per_year_crawler_count(data))
	write_json('confYear.json',analysis.per_conf_year_count(data))
	write_json('confUndefined.json',analysis.per_conf_undefined_count(data))
	write_json('catCount.json',analysis.per_category_crawler_count(data))
Exemple #41
0
 def read_adc_channels(self, file, directory, nodeId, outputname, outputdir,
                       n_readings):
     """Start actual CANopen communication
     This function contains an endless loop in which it is looped over all
     ADC channels. Each value is read using
     :meth:`read_sdo_can_thread` and written to its corresponding
     """
     dev = AnalysisUtils().open_yaml_file(file=file, directory=directory)
     # yaml file is needed to get the object dictionary items
     dictionary_items = dev["Application"]["index_items"]
     _adc_channels_reg = dev["adc_channels_reg"]["adc_channels"]
     _adc_index = list(dev["adc_channels_reg"]["adc_index"])[0]
     _channelItems = [int(channel) for channel in list(_adc_channels_reg)]
     # Write header to the data
     out_file_csv = AnalysisUtils().open_csv_file(outname=outputname,
                                                  directory=outputdir)
     fieldnames = [
         'Time', 'Channel', "nodeId", "ADCChannel", "ADCData",
         "ADCDataConverted"
     ]
     writer = csv.DictWriter(out_file_csv, fieldnames=fieldnames)
     writer.writeheader()
     csv_writer = csv.writer(
         out_file_csv
     )  # , delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
     monitoringTime = time.time()
     for point in np.arange(0, n_readings):
         # Read ADC channels
         pbar = tqdm(total=len(_channelItems) + 1,
                     desc="ADC channels",
                     iterable=True)
         for c in np.arange(len(_channelItems)):
             channel = _channelItems[c]
             subindex = channel - 2
             data_point = self.read_sdo_can(nodeId, int(_adc_index, 16),
                                            subindex, 1000)
             ts = time.time()
             elapsedtime = ts - monitoringTime
             if data_point is not None:
                 adc_converted = Analysis().adc_conversion(
                     _adc_channels_reg[str(channel)], data_point)
                 adc_converted = round(adc_converted, 3)
                 csv_writer.writerow(
                     (str(round(elapsedtime,
                                1)), str(self.get_channel()), str(nodeId),
                      str(subindex), str(data_point), str(adc_converted)))
                 self.logger.info(
                     f'Got data for channel {channel}: = {adc_converted}')
             pbar.update(1)
         pbar.close()
     self.logger.notice("ADC data are saved to %s%s" %
                        (outputdir, outputname))
Exemple #42
0
def update_output(contents, filename):
    if contents is not None:
        success = Analysis(filename)
        if (success):
            return {
                'display': 'none'
            }, 'Result is ready', "", {
                'display': 'block'
            }, {
                'display': 'none'
            }
        else:
            return {'display': 'block'}, 'Analysis failed', "", {}, {}
Exemple #43
0
    def twitter_callback(self, tweet):        
        """Analyzes Trump tweets, trades stocks, and tweets about it."""

        # save the tweet
        alltweets = []
        screen_name = "realDonaldTrump"
        toList(tweet, alltweets)  
        writeToFile(alltweets, screen_name)
        writeToDB(alltweets, screen_name)

        # Initialize the Analysis, Logs, Trading, and Twitter instances inside
        # the callback to create separate httplib2 instances per thread.
        analysis = Analysis()
        logs = Logs(name="main-callback")
        self.logs.info("twitter_callback starts") 
        
        #Analyze the tweet.
        companies = analysis.find_companies(tweet)
        
        logs.info("Using companies: %s" % companies)
        if not companies:
             return
Exemple #44
0
def main():

    # Step1: 读取底层网络和虚拟网络请求文件
    network_files_dir = 'networks/'
    sub_filename = 'subts.txt'
    sub = Substrate(network_files_dir, sub_filename)
    event_queue1 = simulate_events_one('VNRequest/', 2000)
    # event_queue1 = simulate_events_one('Mine//', 2000)

    # Step2: 选择映射算法
    algorithm = 'RLN'
    arg = 10

    # Step3: 处理虚拟网络请求事件
    start = time.time()
    sub.handle(event_queue1, algorithm, arg)
    time_cost = time.time() - start
    print(time_cost)

    # Step4: 输出映射结果文件
    tool = Analysis()
    tool.save_result(sub, '%s-VNE-0410-%s-cacln-1.txt' % (algorithm, arg))
Exemple #45
0
    def update_and_save(self):

        if not self.end:

            if (self.iterations % self.conv.update_step == 0):
                self.anly = Analysis(self.sys_para, self.tfs.final_state,
                                     self.tfs.ops_weight,
                                     self.tfs.unitary_scale,
                                     self.tfs.inter_vecs)
                self.save_data()
                self.display()
            if (self.iterations % self.conv.evol_save_step == 0):
                if not (self.sys_para.show_plots == True and
                        (self.iterations % self.conv.update_step == 0)):
                    self.anly = Analysis(self.sys_para, self.tfs.final_state,
                                         self.tfs.ops_weight,
                                         self.tfs.unitary_scale,
                                         self.tfs.inter_vecs)
                    if not (self.iterations % self.conv.update_step == 0):
                        self.save_data()
                    self.conv.save_evol(self.anly)

            self.iterations += 1
Exemple #46
0
def main():

    if DEBUG:
        logging.basicConfig(
            stream=sys.stdout,
            level=logging.INFO,
            format=
            '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',
            datefmt="%H:%M:%S")

    parser = argparse.ArgumentParser()
    parser.add_argument('--task', type=lambda x: is_dir(x))
    parser.add_argument(
        '--test_types',
        nargs="+",
        choices=['first_match', 'all_matches', 'consecutive_matches'])
    parser.add_argument('--log_files', nargs='+', type=argparse.FileType())
    parser.set_defaults(
        test_types=['first_match', 'all_matches', 'consecutive_matches'])

    args = parser.parse_args()

    if args.log_files:
        logging.info('starting analysis')

        Analysis(files=args.log_files).analyze_logs()

        logging.info('finished analysis')
    else:
        logging.info('starting collection')

        Collector(args.task).collect()

        logging.info('finished collection')
        logging.info('starting analysis')

        Analysis(logs_dir=DEFAULT_LOG_FILE_DIR).analyze_logs()
def n_noise_plots(alg_list, noises, n_trials=10, num_players=10, ranking=Copeland, horizon=100, ds=NormalGenDataset):
	alg_names = [str(alg(0, ranking)) for alg in alg_list]
	print('alg_names', alg_names)
	n_trials = [n_trials] * len(noises)
        
	cum_regs = np.zeros((len(noises), len(alg_list)))
	for noise in noises:
		analysis = Analysis(alg_list, dataset=ds, name=str(ds(0,0))+'_noise'+str(noise)+'_Copeland', ranking_procedure=ranking, noise=noise, n_trials = n_trials[noises.index(noise)], n_players=num_players, horizon=horizon)
		cum_reg = analysis.get_cum_regret_n(t=horizon-1)
		cum_regs[noises.index(noise),:] = cum_reg
		print('cum_reg', cum_regs)
		with open('noises_test_'+str(noises.index(noise))+'.txt','w') as f:
			f.write(str(cum_regs.tolist()))
		f.close()

        if ds == NormalGenDataset:
                ds_label = "Dataset Generated by Normal"
        elif ds == GenDataset:
                ds_label = "Dataset Generated by Bernoulli"
        else:
                raise "Unrecognized dataset"
	plot_n_cumregrets(cum_regs, alg_names, noises, ds_label=ds_label)
	plot_n_cumregrets(cum_regs, alg_names, noises, axis='log', ds_label=ds_label)
	plot_n_cumregrets(cum_regs, alg_names, noises, axis='loglog', ds_label=ds_label)
Exemple #48
0
    def chart_delegate(self, dt):
        try:
            analysis = self.screen_manager.get_screen('Charts')
        except ScreenManagerException:
            analysis = Analysis(self.screen_manager,
                                name='Charts',
                                updated=self.updated)
            # analysis.add_widgets()
            self.screen_manager.add_widget(analysis)
        if len(tryout.product_dict) > 0:
            self.screen_manager.current = 'Charts'
        else:
            self.no_data_popup.open()

        self.processing = False
Exemple #49
0
def main():
    parser = argparse.ArgumentParser(description='')

    # Add the arguments
    parser.add_argument("-p",
                        "--path",
                        help="Specify a particular data frame",
                        required=True)

    parser.add_argument("-c",
                        "--column",
                        help="Specify a particular column",
                        required=True)

    parser.add_argument("-d",
                        "--day",
                        help="Specify time window day",
                        action="store_true")

    parser.add_argument("-m",
                        "--month",
                        help="Specify time window month",
                        action="store_false")

    parser.add_argument("-w",
                        "--week",
                        help="Specify time window week",
                        action="store_true")

    args = parser.parse_args()

    file = args.path
    if not os.path.isfile(file):
        print('The path specified does not exist or is not a file')
        sys.exit()
        # TODO Check File type ?

    col = args.column
    day = args.day
    month = args.month
    week = args.week

    df = _get_df(file)

    Analysis(df, col, day, month, week).generate_visuals_and_stats()
def check_server(address, date):
    # create client side socket
    checker = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

    # create the proper request string with a helper function
    request = Analysis.date_to_request(date)

    # inside a try in case the connection fails the exception can be handled differently to prevent data loss
    # its not necessary currently but could be if things are added to it
    try:
        # connect to server and send request
        checker.connect(address)
        checker.send(request.encode(encoding='utf-8'))

        json_data = checker.recv(10000).decode(encoding='utf-8')  # get response from server and decode

        return pandas.read_json(json_data)  # return
    finally:
        checker.close()  # close connection no matter what
Exemple #51
0
 def print_general_info(self):
     if self.data is None:
         Analysis.print_general_info(self)
     else:
         Analysis.print_general_info_with_data(self, self.data)
    if t == 0:
        plotter.plot_state_actions(mdp.pi, rewards=rewards, sinks=sinks,
                filename='comparisons/boost_dt_comparisons/boost_dt_classic_il_state_action.png')        

    classic_il_data[t,:] = classic_il_r
    value_iter_data[t,:] = value_iter_r
    classic_il_acc[t,:] = acc





#DAGGER

dagger_data = np.zeros((TRIALS, ITER))
dagger_analysis = Analysis(H, W, ITER, rewards = grid.reward_states, sinks=grid.sink_states, desc="Dagger's policy progression")
dagger_acc = np.zeros((TRIALS, ITER))
for t in range(TRIALS):
    print "DAgger Trial: " + str(t)
    mdp.load_policy()
    dagger = SVMDagger(grid, mdp)
    dagger.svm.nonlinear=False
    dagger.rollout()
    r = np.zeros(ITER)
    acc = np.zeros(ITER)
    for _ in range(ITER):
        print "     Iteration: " + str(_)
        print "     Retraining with " + str(len(dagger.net.data)) + " examples"
        dagger.retrain()
        acc[_] = dagger.svm.acc()
        iteration_states = []
Exemple #53
0
def get_tweet_text(tweet_id):
    """Looks up the text for a single tweet."""

    tweet = get_tweet(tweet_id)
    analysis = Analysis(logs_to_cloud=False)
    return analysis.get_expanded_text(tweet)
# ====================================
# PROCESS DATA WITH ROOT
# ====================================
# switch off printing the canvas for the save mode
if args.save:
    gROOT.SetBatch(1)

# start class instance
do_histo = False
if not do_histo:
    z = RootGraphs(x, run_mode, args.number)
    z.init_loop()
else:
    z = RootGraphs(x, run_mode, args.number)
    test = Analysis(x, run_mode, args.number)
    test.main_loop()


# ====================================
# UPDATE LOOP
# ====================================
if loop_mode:
    signal.signal(signal.SIGINT, signal_handler)
    print "\nStarting loop-mode!"
    while True:
        print "\rlast update:", strftime("%H:%M:%S", localtime()),
        sys.stdout.flush()
        x.update_data()
        z.refresh_graphs()
        z.make_margins()
class OSC_engine:
	def __init__(self,layer_ratio = 111,send_all = False, frontload = False): 
		self.threads=[]
		# Init OSC
		self.client = OSC.OSCClient()
		self.client.connect(('127.0.0.1', 5002)) # first argument is the IP of the host, second argument is the port to use
		self.data = None
		self.track_time = float(layer_ratio[0])
		self.calo_time = float(layer_ratio[1])
		self.rpc_time = float(layer_ratio[2])
		#self.track_time = random.uniform(1,2)
		#self.calo_time = random.uniform(1,2)
		#self.rpc_time = random.uniform(1,2)
		self.t0 = time.time() 
		self.sendall = send_all
		self.frontload = frontload
		
	def set_data(self,ev,ev_id,beats,geo,seconds,unif,poly): 
		event = Event(ev)
		self.event_id = ev_id
		#print "Muons: " +str(event.muons)
		self.anal = Analysis(event,beats,geo,seconds,unif,poly)
		self.data = self.anal.event
		self.unif = unif
		
	def pkg_msg(self, data, addr): 
		oscmsg = OSC.OSCMessage(address="/%d%s" % (self.event_id, addr))
		#oscmsg.setAddress(address)
		oscmsg.append(data)
		return oscmsg
	
	def pkg_str(self, data, address): 
		oscmsg = OSC.OSCMessage('s')
		oscmsg.setAddress(address)
		oscmsg.append(data)
		return oscmsg
			
	def osc_scan_send(self,data,addr,delay): #sends data at a rate determined by geometric coordinate. NOTE: if I switch to bundle/thread method, can get rid of eta as parameter
		oscmsg = OSC.OSCMessage(address="/%d%s" % (self.event_id, addr))
		for a in range(len(data)): 
			oscmsg.append(data[a])
			self.client.send(oscmsg)
			oscmsg.clearData()
			if not self.sendall: 
				time.sleep(delay[a])
		return True
	
	def osc_scan_sendall(self,datasets,addr,delay): #sends data as single message per physics object 
		oscmsg = OSC.OSCMessage(address="/%d%s" % (self.event_id, addr))
		count = len(datasets[0])
		time.sleep(delay[0])
		for a in range(count): 
			for dataset in datasets: 
				oscmsg.append(dataset[a])
			self.client.send(oscmsg) 
			oscmsg.clearData()
			if not self.sendall: 
				time.sleep(delay[a+1])
		return True
				
			
	
		#This is the final delay step between the last particle hit and the edge of the detector
		#time.sleep(delay[len(delay)-1]) 
	
	def osc_bnd_send(self,oscmsgs): 
		oscbnd = OSC.OSCBundle()
		for oscmsg in oscmsgs: 
			oscbnd.append(oscmsg) 
		self.client.send(oscbnd) 
		
	def new_scan_thread(self,data, address,delay):
		thread=Thread(target=self.osc_scan_send, args=(data,address,delay))
		self.threads.append(thread)
		thread.start()
	
	def new_scan_all_thread(self,datasets, address,delay):
		thread=Thread(target=self.osc_scan_sendall, args=(datasets,address,delay))
		self.threads.append(thread)
		thread.start()

	def preamble(self):
		#self.client.send(self.pkg_msg(str(self.data.etmiss.getE()),'/EventInfo/etmiss'))
		#self.client.send(self.pkg_msg(self.data.Tracks.getEffectiveSum(),'/EventInfo/effectivesum'))
	
		msg1 = 0
		msg2 = 0
		
		#test_bundles
		if self.data.etmiss:
			msg1 = self.pkg_msg(self.data.etmiss.E, '/EventInfo/etmiss')
		print "effective sum: " + str(self.data.Tracks.getEffectiveSum())
		if self.data.Tracks.getEffectiveSum(): 
			msg2 = self.pkg_msg(self.data.Tracks.getEffectiveSum(),'/EventInfo/effectivesum')
		
		msglast = self.pkg_msg(1.0, '/EventInfo/start')
		if msg1 and msg2: 
			self.osc_bnd_send([msg1,msg2])
		self.client.send(msglast)
		if self.frontload: 
			print "waiting 5 seconds"
			time.sleep(5)

		 
	def run(self): 
		                                                                                                                                                                                                                                                                                                  
			
 		print "running non-spatialize"
 		
 		if self.unif: 
			self.anal.wait_for_beat(self.t0) 
		# opted to front-load sending event info 
		print "starting time: " + str(time.time())
		
		self.preamble()
		
		# sending remaining data multithreaded
		
		if self.data.RPC.size(): 
			#print "RPC size: " + str(len(self.data.RPC.getEta()))
			#print "RPC delay size: " + str(len(self.data.RPC.delays))
			RPC_delay=self.data.RPC.delays
			self.new_scan_all_thread([self.data.RPC.getEta(),self.data.RPC.getPhi(),self.data.RPC.getTheta()], '/RPC',RPC_delay)
			 
		if self.data.Tracks.size(): 
			Track_delay = self.data.Tracks.delays
			#print "Track size: " + str(len(self.data.Tracks.getEta()))
			#print "Track delay size: " + str(len(self.data.Tracks.delays))
			self.new_scan_all_thread([self.data.Tracks.getEta(),self.data.Tracks.getPhi(),self.data.Tracks.getE(),self.data.Tracks.getTheta(),self.data.Tracks.getPt()], '/Tracks',Track_delay)


			#print "track E: " + str(self.data.Tracks.getE())
		if self.data.LArHits.size(): 
			LAr_delay=self.data.LArHits.delays
			self.new_scan_all_thread([self.data.LArHits.getEta(),self.data.LArHits.getPhi(),self.data.LArHits.getE(),self.data.LArHits.getTheta()], '/LArHits',LAr_delay)
			
		if self.data.HECHits.size():
			HEC_delay=self.data.HECHits.delays
			self.new_scan_all_thread([self.data.HECHits.getEta(),self.data.HECHits.getPhi(),self.data.HECHits.getE(),self.data.HECHits.getTheta()], '/HECHits',HEC_delay)

		# THREADS #
		for thread in self.threads: 
			thread.join()
		
		self.t0 = time.time()
		
	def run_spatialize(self): 
		print "running spatialize"
		
		if self.unif: 
			self.anal.wait_for_beat(self.t0)

		self.preamble()
		
		if self.data.Tracks.size(): 
			Track_delay = self.data.Tracks.delays
 
 			#single_track_delay = self.data.Tracks.single_track_delay
			
			#Final adjustment of timing based on user preferences 
			Track_delay = [i/self.track_time for i in Track_delay]
			
			#ToDo: 
			#Recalculate delays based on how long each detector segment should be processed for
			
			self.new_scan_all_thread([self.data.Tracks.getEta(),self.data.Tracks.getPhi(),self.data.Tracks.getE(),self.data.Tracks.getTheta(),self.data.Tracks.getPt()], '/Tracks',Track_delay)

			#Code below is to stream polyline associated with track. Removed for now in favor of 
			#only streaming Pt
			
 			for thread in self.threads: 
				thread.join()
			
		
		self.threads = []
 
		if self.data.LArHits.size(): 
			LAr_delay=self.data.LArHits.delays
			
			#Final adjustment of timing based on user preferences 
			LAr_delay = [i/self.calo_time for i in LAr_delay]
			self.new_scan_all_thread([self.data.LArHits.getEta(),self.data.LArHits.getPhi(),self.data.LArHits.getE(),self.data.LArHits.getTheta()], '/LArHits',LAr_delay)

		if self.data.HECHits.size():
			HEC_delay=self.data.HECHits.delays
			
			#Final adjustment of timing based on user preferences 
			HEC_delay = [i/self.calo_time for i in HEC_delay]
			self.new_scan_all_thread([self.data.HECHits.getEta(),self.data.HECHits.getPhi(),self.data.HECHits.getE(),self.data.HECHits.getTheta()], '/HECHits',HEC_delay)

			for thread in self.threads: 
				thread.join()
				
		self.threads = []	
			
		if self.data.RPC.size(): 
			print self.data.RPC.delays
			RPC_delay=self.data.RPC.delays
			
			#Final adjustment of timing based on user preferences
			RPC_delay = [i/self.rpc_time for i in RPC_delay]
			self.new_scan_all_thread([self.data.RPC.getEta(),self.data.RPC.getPhi(),self.data.RPC.getTheta()], '/RPC',RPC_delay)

		for thread in self.threads: 
			thread.join()
	  
		self.t0 = time.time()
		print "final time: " + str(self.t0)
		
		"""	
		################## For web streaming via socketio

		def sendtoweb(self, data, delay):
			for a in range(len(data)): 
				#time.sleep(self.anal.SECONDS/self.anal.TOTRANGE*delay[a])
				time.sleep(3) 
				socketio.emit('my response', {'data': 'Muon generated event', 'value': data[a]},
				namespace='/music') 
		
		def socket_stream(self, data, delay):
			thread=Thread(target=sendtoweb, args=(data,delay))
			self.webthreads.append(thread) 
			thread.start() 

		##################	
		"""
		return True
Exemple #56
0
value_iter_pi = mdp.pi
plotter.plot_state_actions(value_iter_pi, rewards = grid.reward_states, sinks = grid.sink_states,
        filename='comparisons/svm_comparisons/value_iter_state_action.png')


# VALUE ITERATION AND CLASSIC IL
value_iter_data = np.zeros([TRIALS, ITER])
classic_il_data = np.zeros([TRIALS, ITER])
classic_il_acc = np.zeros([TRIALS, ITER])
for t in range(TRIALS):
    mdp.load_policy()
    sup = SVMSupervise(grid, mdp)
    sup.sample_policy()

    value_iter_analysis = Analysis(W, H, ITER, rewards=rewards, sinks=sinks,
            desc='Value iter policy')

    r = 0.0
    for _ in range(ITER * SAMP):
        sup.rollout()
        r = r + sup.get_reward() / (ITER * SAMP)
    print "Value iter reward: " + str(r)
    if t == 0:
        value_iter_analysis.count_states(sup.get_states())
        value_iter_analysis.save_states("comparisons/svm_comparisons/value_iter.png")
        value_iter_analysis.show_states()
    sup.train()

    classic_il_acc[t,:] = np.zeros(ITER) + sup.svm.acc()            

    value_iter_data[t,:] = np.zeros(ITER) + r
Exemple #57
0
            date.replace(hour=0, minute=0, second=0)):
        return False

    # The strategy needs to be active.
    if strategy["action"] == "hold":
        return False

    # We need to know the stock price.
    if not strategy["price_at"] or not strategy["price_eod"]:
        return False

    return True


if __name__ == "__main__":
    analysis = Analysis(logs_to_cloud=False)
    trading = Trading(logs_to_cloud=False)
    twitter = Twitter(logs_to_cloud=False)

    # Look up the metadata for the tweets.
    tweets = twitter.get_tweets(SINCE_TWEET_ID)

    events = []
    for tweet in tqdm(tweets):
        event = {}

        timestamp_str = tweet["created_at"]
        timestamp = trading.utc_to_market_time(datetime.strptime(
            timestamp_str, "%a %b %d %H:%M:%S +0000 %Y"))
        text = twitter.get_tweet_text(tweet)
        event["timestamp"] = timestamp
def main():
    f = open('try_3.txt','w')
    g = open('accs.txt', 'w')
    g.close()
    task = MarioTask("testbed", initMarioMode = 2)
    task.env.initMarioMode = 2
    task.env.levelDifficulty = 1

    results = [] 
    names = [] 

    with open('type.txt', 'w') as f:
        f.write('ent')
    
    # # #test dagger
    # iterations = 1
    # rounds = 1
    
    iterations = 50
    rounds = 15
    #agent = Dagger(IT,useKMM = False)
    #exp = EpisodicExperiment(task, agent) 
    #T = Tester(agent,exp)
    #dagger_results = T.test(rounds = rounds,iterations = iterations)
    #dagger_data = dagger_results[-1]
    #dagger_results = dagger_results[:-1]
    #results.append(dagger_results)
    #names.append('dagger')
    #pickle.dump(results,open('results.p','wb'))

    #agent = Dagger(IT, useKMM=False)
    #exp = EpisodicExperiment(task, agent)
    #T = Tester(agent, exp)
    #dagger_data, _, acc = T.test(rounds = rounds, iterations = iterations)
    
    agent = Supervise(IT,useKMM = False)
    exp = EpisodicExperiment(task, agent) 
    T = Tester(agent,exp)
    prefix = 'dt-noisy-sup-change-entropy'
    sl_data, sup_data, acc = T.test(rounds = rounds, iterations = iterations, prefix = prefix)

    np.save('./data/' + prefix + '-sup_data.npy', sup_data)
    np.save('./data/' + prefix + '-sl_data.npy', sl_data)
    np.save('./data/' + prefix + '-acc.npy', acc)    
    
    # IPython.embed()

    analysis = Analysis()
    analysis.get_perf(sup_data, range(iterations))
    analysis.get_perf(sl_data, range(iterations))
    analysis.plot(names=['Supervisor', 'Supervised Learning'], label='Reward', filename='./results/' + prefix + '-return_plots.eps')#, ylims=[0, 1600])

    acc_a = Analysis()
    acc_a.get_perf(acc, range(iterations))
    acc_a.plot(names=['Supervised Learning Acc.'], label='Accuracy', filename='./results/' + prefix + '-acc_plots.eps')

    """


    agent = Dagger(IT,useKMM = False)
    exp = EpisodicExperiment(task, agent) 
    T = Tester(agent,exp)
    dagger_data, _, acc = T.test(rounds = rounds, iterations = iterations)

    np.save('./data/dagger_data.npy', dagger_data)
    np.save('./data/acc.npy', acc)    
    
    IPython.embed()

    analysis = Analysis()
    analysis.get_perf(dagger_data, range(iterations))
    analysis.plot(names=['DAgger'], label='Reward', filename='./results/return_plots.eps')

    acc_a = Analysis()
    acc_a.get_perf(acc, range(iterations))
    acc_a.plot(names=['DAgger Acc.'], label='Accuracy', filename='./results/acc_plots.eps')

    """
    
    #agent = Supervise(IT,useKMM = False)
    #exp = EpisodicExperiment(task, agent) 
    #T = Tester(agent,exp)
    #supervise_results = T.test(rounds = rounds, iterations = iterations)
    #supervise_data = supervise_results[-1]
    #supervise_results = supervise_results[:-1]
    #results.append(supervise_results)
    #names.append('supervise')
    #pickle.dump(results,open('results.p','wb'))

    #IPython.embed()

    #analysis = Analysis()
    #analysis.get_perf(supervise_data, results[1][5])
    #analysis.get_perf(dagger_data, results[0][5])
    #analysis.plot(names=['Supervise', 'DAgger'], label='Reward', filename='./return_plot.eps')#, ylims=[-1, 0])




    # agent = Sheath(IT,useKMM = False,sigma = 1.0)
    # exp = EpisodicExperiment(task, agent) 
    # T = Tester(agent,exp)
    # dagger_results = T.test(rounds = 10,iterations = 35)
    # results.append(dagger_results)
    # names.append('sheath_1')
    # pickle.dump(results,open('results.p','wb'))

    # agent = Sheath(IT,useKMM = False,sigma = 1e-1)
    # exp = EpisodicExperiment(task, agent) 
    # T = Tester(agent,exp)
    # dagger_results = T.test(rounds = 10,iterations = 35)
    # results.append(dagger_results)
    # names.append('sheath_1')
    # pickle.dump(results,open('results.p','wb'))


    
    # agent = Sheath(IT,useKMM = False,sigma = 0.5)
    # exp = EpisodicExperiment(task, agent) 
    # T = Tester(agent,exp)
    # dagger_results = T.test(rounds = 10,iterations = 35)
    # results.append(dagger_results)
    # names.append('sheath_1')

    # pickle.dump(results,open('results.p','wb'))
    # agent = Sheath(IT,useKMM = False,sigma = 1e-1)
    # exp = EpisodicExperiment(task, agent) 
    # T = Tester(agent,exp)
    # dagger_results = T.test(rounds = 4,iterations = 35)
    # results.append(dagger_results)
    # names.append('sheath_1')
    

    # agent = Sheath(IT,useKMM = False,sigma = 1e-2)
    # exp = EpisodicExperiment(task, agent) 
    # T = Tester(agent,exp)
    # dagger_results = T.test(rounds = 4,iterations = 35)
    # results.append(dagger_results)
    # names.append('sheath_1')
    # # # # # #test big ahude
    # agent = Ahude(IT,f,gamma = 1e-2,labelState = True, useKMM = True)
    # exp = EpisodicExperiment(task, agent) 
    # T = Tester(agent,exp)
    # ahude_big_results = T.test(rounds = 3)
    # results.append(ahude_big_results)
    # names.append('ahude_1e-1')

    # pickle.dump(results,open('results.p','wb'))


    # # # # # #test med ahude
    # agent = Ahude(IT,f,gamma = 1e-2,labelState = False,useKMM = True)
    # exp = EpisodicExperiment(task, agent) 
    # T = Tester(agent,exp)
    # ahude_med_results = T.test(rounds = 3)
    # results.append(ahude_med_results)
    # names.append('ahude_1e-2')
    
    # # #

    # # # # # # #test small ahude 
    # agent = Ahude(IT,f,gamma = 1e-3)
    # exp = EpisodicExperiment(task, agent) 
    # T = Tester(agent,exp)
    # ahude_small_results = T.test() 
    # results.append(ahude_small_results)
    # names.append('ahude_1e-3')
    
 
    # pickle.dump(results,open('results.p','wb'))

    #plt.figure(1)
    #for i in range(len(results)):
    #    plt.plot(results[i][5],results[i][1])
    
    
    #plt.legend(names,loc='upper left')

    # plt.figure(2)
    # for i in range(len(results)):
    #     plt.plot(results[i][0])

    # plt.legend(names,loc='upper left')

    # plt.figure(3)
    # for i in range(0,len(results)):
    #     plt.plot(results[i][3])

    # plt.legend(names,loc='upper left')


    plt.show()
    
    # IPython.embed()
    f.close()           
       

    #agent.saveModel()
    print "finished"