Ejemplo n.º 1
0
def main(argv = sys.argv[1:]):
    args = parser.parse_args(argv)
    if args.statFiles == None:
        print 'Filenames missing.'
        sys.exit(-1)

    aucData = []
    aucLabels = []
    for statFile in args.statFiles:
        s = Statistics()
        s.load(statFile)
        avgRecall = [float(x)/s.problems for x in s.recallData]
        figure('Recall')
        plot(range(s.cutOff),avgRecall,label=statFile)
        legend(loc='lower right')
        ylabel('Average Recall')
        xlabel('Highest ranked premises')
        axis([0,s.cutOff,0.0,1.0])
        figure('100%Recall')
        plot(range(s.cutOff),s.recall100Data,label=statFile)
        legend(loc='lower right')
        ylabel('100%Recall')
        xlabel('Highest ranked premises')
        axis([0,s.cutOff,0,s.problems])
        aucData.append(s.aucData)
        aucLabels.append(statFile)
    figure('AUC Histogram')
    hist(aucData,bins=args.bins,label=aucLabels,histtype='bar')
    legend(loc='upper left')
    ylabel('Problems')
    xlabel('AUC')

    show()
Ejemplo n.º 2
0
def calc_statistics(args, app=None):
    if not app:
        app = create_app(config_override=CONFIG)
    with app.app_context():
        from stats import Statistics
        statistics = Statistics()
        statistics.calc_all()
        db.session.commit()
Ejemplo n.º 3
0
def diff(db, trace, **kwargs):
    doopconn = doop.Connector(db)
    probe = gxl.Probe(trace)

    # Get statistics from a doop static analysis
    static = Statistics.from_doop(doopconn)

    # Get dynamic analysis statistics from a gxl trace
    dynamic = Statistics.from_trace(probe, doopconn)

    # Compute dynamic \ static
    diff = Statistics.difference(dynamic, static)

    # Construct a transformation chain
    diffchain = [(diff, 'Default')]

    def add_filter(refinement, msg):
        last = diffchain[-1][0]
        diffchain.append((refinement(last), msg))
        refinement.report()

    # filter false positive calls to <clinit>
    add_filter(clinit.ClassInitRefinement(doopconn), '<clinit>')

    # filter native methods
    add_filter(native.Refinement(doopconn), 'No native')

    # filter calls to java.lang.ClassLoader methods
    add_filter(classloader.ClassLoaderRefinement(doopconn), 'No ClassLoader methods')

    # Compute (dynamic \ static) \ synthetic
    classpath = kwargs.get('cp', None)

    if classpath:
        add_filter(synthetic.Refinement(classpath), 'No synthetic')

    # Statically unknown classes / methods
    add_filter(reflect.NoFactsRefinement(doopconn), 'No facts')

    # Prune call-edges with unreachable origin
    add_filter(unreachable.Refinement(doopconn), 'Unreachable origin')
    
    nSteps = len(diffchain)
    steps  = range(1, 1 + nSteps)

    for ((diff, msg), step) in zip(diffchain, steps):
        print "\n--- {0}. Step {1}/{2} ---\n".format(msg, step, nSteps)
        display(static, dynamic, diff)

    for tp in ('a2a', 'a2l', 'l2a', 'l2l'):
        if tp in kwargs.get('toprint', []):
            print '\nPrinting {0} edges...'.format(tp)
            for (s,t) in getattr(diffchain[-1][0], tp):
                print s, '===>', t

    return static, dynamic, diffchain
Ejemplo n.º 4
0
 def __init__(self):
     self.__match_maker = MatchMaker()
     self.__stats = Statistics()
     self.__app = QtWidgets.QApplication(sys.argv)
     self.__MainWindow = QtWidgets.QMainWindow()
     super().__init__()
     super().setupUi(self.__MainWindow)
     self.__init_logic()
     self.__display_round()
     self.__display_stats()
Ejemplo n.º 5
0
Archivo: train.py Proyecto: yyht/daga
def validate(model, val_iter):
    """Validate with mini-batches."""
    model.eval()
    batch_stats = Statistics()
    with torch.no_grad():
        for batch in val_iter:
            sents = batch.sent
            _, stats = model(sents)
            batch_stats.update(stats)
            torch.cuda.empty_cache()
    return batch_stats
Ejemplo n.º 6
0
    def initPages(self):
        self.mode_stack = QtGui.QStackedWidget()

        main_menu = MainMenu(self)  #0
        self.stats = Statistics(self)  #1
        self.trainer = Trainer(self, self.stats)  #2
        self.generator = Generator(self)  #3

        self.mode_stack.addWidget(main_menu)
        self.mode_stack.addWidget(self.stats)
        self.mode_stack.addWidget(self.trainer)
        self.mode_stack.addWidget(self.generator)

        self.setCentralWidget(self.mode_stack)
Ejemplo n.º 7
0
    def print_stat_comparison(self):
        real_orders, _, _ = DataLoader.load_split_data(self.config.real_root, self.sampling_window_start_time,
                                                       self.sampling_window_end_time, self.config.product)
        sim_orders = self.sim_analysis.all_sims[0][0].compute()

        real_stats = Statistics.get_order_stats(real_orders)
        sim_stats = Statistics.get_order_stats(sim_orders)

        print_str = ""

        for k in real_stats.keys():
            print_str += k + "\t\t\t\tReal: " + str(real_stats[k]) + "\t\t\tSim: " + str(sim_stats[k]) + "\n"

        print(print_str)
Ejemplo n.º 8
0
    def test_from_many_sources(self):

        # create one mutation which is present in multiple sources
        m = models.Mutation()
        metadata_1 = models.InheritedMutation(mutation=m)
        metadata_2 = models.MC3Mutation(mutation=m)
        db.session.add_all([metadata_1, metadata_2])

        from stats import Statistics
        statistics = Statistics()

        in_many_sources = statistics.from_more_than_one_source()

        assert in_many_sources == 1
Ejemplo n.º 9
0
 def get_relative_prices(trades_df: dd, other_df: dd) -> dd:
     price_over_time: dd = Statistics().get_price_over_time(
         trades_df).groupby(
             ['time'])['most_recent_trade_price'].mean().to_frame()
     other_df = DataUtils().fuzzy_join(other_df, price_over_time, on='time')
     relative_prices = other_df['relative_price']
     relative_prices = DataUtils().remove_tails(relative_prices, 3)
     return relative_prices
Ejemplo n.º 10
0
    def test_hurst_windowed(self):
        day = 17
        product = "LTC-USD"
        for i in range(0, 1):
            # day += 1
            month = 5
            st = datetime.datetime(2018, month, day, 0, 0, 0)
            et = datetime.datetime(2018, month, day, 23, 59, 59)
            _, trades, _ = DataLoader.load_split_data("/Users/jamesprince/project-data/data/consolidated-feed/"
                                                      + product + "/",
                                                      st,
                                                      et, product)

            window_minutes = 10
            step_minutes = 10
            times, hurst_exps = Statistics.get_hurst_exponent_over_time(trades, st, et, step_minutes, window_minutes)
            Statistics.plot_metric_daily(times, hurst_exps, product, st, step_minutes, window_minutes, "Hurst Exponent")
def start_simulation_sim_time(no_of_jobs_server_1, no_of_jobs_server_2, arrival_rate, job_distribution, sim_time):
    list_of_servers = []
    for _ in range(0,2): # Create 2 servers
        scheduler_i = FIFO()
        job_size_i = Expo(job_distribution)
        server_i = Server(job_size_i, scheduler_i)
        list_of_servers.append(server_i)
    policy_i = ShortestQueue() #Join shortest queue policy
    dispatcher = Dispatcher(policy_i, list_of_servers) #Create a dispatcher with JSQ policy
    statistics = Statistics()
    world = Global(statistics)
    p_arrivals = PoissonArrival(float(arrival_rate))

    #For each job in server 1 we:
    #Create a job with arrival at time 0, then increase # of jobs in the server.
    #We get time the job enters service, either immediately or after the total processing done.
    #Set the enter service and service time needed.
    #Next calculate departure time and set it, then we can schedule the departure.
    server_1_processing_time = 0
    #Forloops used to create a given # of jobs before starting sim
    for job in range(0, int(no_of_jobs_server_1)):
        job = Job(0) #All arrive at time 0
        list_of_servers[0]._total_jobs +=1
        enter_service = max(list_of_servers[0]._total_processing_time, 0) #Takes processing time (time when server is idle) and the time of arrival. Max of those is enter service for this job
        job.set_enter_service(enter_service) #Setting the enter service, first is 0, next is 0+service_1, next is 0+service_1 + service_2.. ...
        job.set_service(list_of_servers[0].get_service_time()) #Generates service time AND increases processing time for server
        departure_time = job._enter_service + job._service_time # Enter service will be correct because of processing time of server bookkeeping
        job.set_departure(departure_time) #Set the departure time as enter service+service time. (Its a timeperiod on the timeline)
        world.schedule_event(list_of_servers[0].departure, job._departure_time, [job, world]) #Schedule this departure to the world queue
    for job in range(0, int(no_of_jobs_server_2)):
        job = Job(0)
        list_of_servers[1]._total_jobs +=1
        enter_service = max(list_of_servers[1]._total_processing_time, 0)
        job.set_enter_service(enter_service)
        job.set_service(list_of_servers[1].get_service_time())
        departure_time = job._enter_service + job._service_time
        job.set_departure(departure_time)
        world.schedule_event(list_of_servers[1].departure, job._departure_time, [job, world])
    
    initial_arrival = random.expovariate(p_arrivals._rate)
    params = [dispatcher, world]
    world.schedule_event(p_arrivals.generate_arrival, initial_arrival, params) #Schedule first arrival to start chain

    world.number_of_arr_dep = 0 #resetting the number of events before we start
    # Now we need to schedule the initial arrivals to start the chain of events.
    for x in range(1, 11):
        # Now that each dispatcher has an arrival, we can start looping through events
        while world.next_event() <= float(sim_time)*(x*0.1): # while the virtual time of next event is less than our simulation time..
            world.process_event() # We take the event and process it (running the function(s))
        print("{}%".format(x*10))
    #for loop to step between while loops (every 10%)while world.next

    total_no_jobs = world._stats.number_of_jobs
    print(total_no_jobs)
    world._stats.print_stats()
Ejemplo n.º 12
0
def main():
    """
    The scanner's entry point.
    """

    stats = Statistics()
    args = parse_cmd_args()

    # Create and set the given directories.

    if args.tor_dir and not os.path.exists(args.tor_dir):
        os.makedirs(args.tor_dir)

    logging.getLogger("stem").setLevel(
        logging.__dict__[args.verbosity.upper()])
    log_format = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
    logging.basicConfig(format=log_format,
                        level=logging.__dict__[args.verbosity.upper()],
                        filename=args.logfile)

    log.debug("Command line arguments: %s" % str(args))

    socks_port, control_port = bootstrap_tor(args)
    controller = Controller.from_port(port=control_port)
    stem.connection.authenticate(controller)

    # Redirect Tor's logging to work around the following problem:
    # https://bugs.torproject.org/9862

    log.debug("Redirecting Tor's logging to /dev/null.")
    controller.set_conf("Log", "err file /dev/null")

    # We already have the current consensus, so we don't need additional
    # descriptors or the streams fetching them.

    controller.set_conf("FetchServerDescriptors", "0")

    cached_consensus_path = os.path.join(args.tor_dir, "cached-consensus")
    if args.first_hop and (not util.relay_in_consensus(args.first_hop,
                                                       cached_consensus_path)):
        log.critical("Given first hop \"%s\" not found in consensus.  Is it"
                     " offline?" % args.first_hop)
        return 1

    for module_name in args.module:

        if args.analysis_dir is not None:
            datestr = time.strftime("%Y-%m-%d_%H:%M:%S%z") + "_" + module_name
            util.analysis_dir = os.path.join(args.analysis_dir, datestr)

        try:
            run_module(module_name, args, controller, socks_port, stats)
        except error.ExitSelectionError as err:
            log.error("Failed to run because : %s" % err)
    return 0
Ejemplo n.º 13
0
def main():
    input_file = sys.argv[1]
    output_file = sys.argv[2]

    total_start = time.clock()
    #contours = np.load("contours.npy")
    contours = FloodEdgeDetector.find_contours(input_file)
    print("Contours Time", time.clock() - total_start)
    lines = Statistics.evaluate_contours(output_file, contours)
    print("Overall Time", time.clock() - total_start)
    ImageUtilities.show_contours_in_image(input_file, contours, lines)
Ejemplo n.º 14
0
    def generateChart(self):
        self.graphView.setScrollView(self.scrollView)

        stat = Statistics(self.startDate, self.endDate)

        if self.reportType == "tasks":
            self.graphView.setData(stat.countTasks(), self.reportType)

        elif self.reportType == "projects":
            self.graphView.setData(stat.countProjects(), self.reportType)

        elif self.reportType == "slacking":
            self.graphView.setData(stat.countSlacking(), self.reportType)

        self.graphView.setScale(stat.maxValue)
        self.lblWorkTotal.setStringValue_(secToTimeStr(stat.totalWork))
        self.lblAvgWork.setStringValue_(secToTimeStr(stat.avgWork))
        self.lblSlackTotal.setStringValue_(secToTimeStr(stat.totalSlacking))
        self.lblAvgSlack.setStringValue_(secToTimeStr(stat.avgSlacking))
        self.graphView.setNeedsDisplay_(True)
Ejemplo n.º 15
0
  def init_tlb(self, tlb_lst, tlb_len, tlb_stats, yconfig, data=True):
    for l in range(tlb_len):
      if yconfig[l]['private'] or data:
        if yconfig[l]['type'] == 'fully-associative':
          tlb_lst.append(Tlb(Structure.FULLY_ASSOCIATIVE, yconfig[l]['entry']))
        elif yconfig[l]['type'] == '2-way':
          tlb_lst.append(Tlb(Structure.SET2_ASSOCIATIVE, yconfig[l]['entry']))
        elif yconfig[l]['type'] == '4-way':
          tlb_lst.append(Tlb(Structure.SET4_ASSOCIATIVE, yconfig[l]['entry']))
        else:
          raise NotImplementedError("unimplemented tlb type")

        tlb_stats.append(Statistics())
        tlb_lst[-1].set_stats(tlb_stats[-1])
      else:
        # shared tlb -> let the data and instruction tlbs have
        # the same object.
        tlb_lst.append(self.dtlb[l])
        tlb_stats.append(self.dtlb_stats[l])

    tlb_stats.append(Statistics())
Ejemplo n.º 16
0
    def test_lypaunov_windowed(self):
        st = datetime.datetime(2018, 5, 17, 1, 0, 0)
        et = datetime.datetime(2018, 5, 17, 23, 0, 0)

        conf = configparser.ConfigParser()
        conf.read("../config/backtest.ini")
        config = BacktestConfig(conf)

        ob_seq, ob_state = reconstruct_orderbook(config, st, logging.getLogger("test"))

        orderbook_evo = OrderBookEvolutor(ob_state, st, ob_seq)

        feed_df = DataLoader.load_feed(self.root, st, et, "LTC-USD")
        evo = orderbook_evo.evolve_orderbook_discrete(feed_df, 1)

        window_minutes = 30
        step_minutes = 5
        num_samples = int((et - st).total_seconds() / (step_minutes * 60))

        times = []
        lyap_exps = []

        for i in range(0, num_samples):
            window_st = st + datetime.timedelta(seconds=i * step_minutes * 60)
            window_et = window_st + datetime.timedelta(seconds=window_minutes * 60)

            evo_filt = evo[evo['time'] > window_st]
            evo_filt = evo_filt[evo_filt['time'] < window_et]
            midprices = evo_filt['midprice'].dropna()

            prices = np.asarray(midprices, dtype=np.float32)
            print(prices)

            res = nolds.lyap_e(prices)
            print(res)

            times.append(window_st)
            lyap_exps.append(res[0])

        Statistics.plot_lyapunov_exponent(times, lyap_exps, "LTC-USD", st, step_minutes, window_minutes)
Ejemplo n.º 17
0
    def initPages(self):
        self.mode_stack = QtGui.QStackedWidget()

        main_menu = MainMenu(self) #0
        self.stats = Statistics(self) #1
        self.trainer = Trainer(self,self.stats) #2
        self.generator = Generator(self) #3
        
        self.mode_stack.addWidget(main_menu)
        self.mode_stack.addWidget(self.stats)
        self.mode_stack.addWidget(self.trainer)
        self.mode_stack.addWidget(self.generator)
        
        self.setCentralWidget(self.mode_stack)
 def reset_world(first_arrival):
     nonlocal list_of_servers, dispatcher, statistics, world # if we want to modify variable from closure, must place as nonlocal
     list_of_servers.clear() #clear the 2 servers
     for _ in range(0,2): # Create 2 servers
         scheduler_i = FIFO()
         job_size_i = Expo(job_distribution) # use job_distr from closure
         server_i = Server(job_size_i, scheduler_i) # create a new server to place in list
         list_of_servers.append(server_i)
     dispatcher = Dispatcher(policy_i, list_of_servers) # resetting the dispatcher with new servers
     statistics = Statistics()
     world = Global(statistics)
     set_up_servers()
     params = [dispatcher, world]
     world.schedule_event(p_arrivals.generate_arrival, first_arrival, params) #Schedule first arrival to start chain
Ejemplo n.º 19
0
def main():
    """
    The scanner's entry point.
    """

    stats = Statistics()
    args = parse_cmd_args()

    # Create and set the given directories.

    if args.tor_dir and not os.path.exists(args.tor_dir):
        os.makedirs(args.tor_dir)
    if args.analysis_dir and not os.path.exists(args.analysis_dir):
        os.makedirs(args.analysis_dir)
    util.analysis_dir = args.analysis_dir

    logger.setLevel(logging.__dict__[args.verbosity.upper()])

    logger.debug("Command line arguments: %s" % str(args))

    socks_port, control_port = bootstrap_tor(args)
    controller = Controller.from_port(port=control_port)
    stem.connection.authenticate(controller)

    # Redirect Tor's logging to work around the following problem:
    # https://bugs.torproject.org/9862

    logger.debug("Redirecting Tor's logging to /dev/null.")
    controller.set_conf("Log", "err file /dev/null")

    # We already have the current consensus, so we don't need additional
    # descriptors or the streams fetching them.

    controller.set_conf("FetchServerDescriptors", "0")

    cached_consensus_path = os.path.join(args.tor_dir, "cached-consensus")
    if args.first_hop and (not util.relay_in_consensus(args.first_hop,
                                                       cached_consensus_path)):
        raise error.PathSelectionError("Given first hop \"%s\" not found in "
                                       "consensus.  Is it offline?" %
                                       args.first_hop)

    for module_name in args.module:
        try:
            run_module(module_name, args, controller, socks_port, stats)
        except error.ExitSelectionError as err:
            logger.error("failed to run because : %s" % err)
    return 0
Ejemplo n.º 20
0
    def _forward(self, sents):
        src = sents[:-1]
        tgt = sents[1:]
        tgt = tgt.view(-1)

        dec_out, _ = self.decoder(src)
        dec_out = self.dropout(dec_out)
        logit = self.generator(dec_out.view(-1, dec_out.size(2)))
        recon_loss = self.criterion(logit, tgt).sum()
        n_correct, n_words = self._correct(logit, tgt)
        stats = Statistics(
            loss=recon_loss.item(),
            n_correct=n_correct,
            n_words=n_words,
            n_sents=sents.size(1),
        )
        return recon_loss, stats
Ejemplo n.º 21
0
    def _decode(self, sents, mu, logvar, beta):
        src = sents[:-1]
        tgt = sents[1:]
        tgt = tgt.view(-1)
        recon_loss, n_correct, n_words = 0.0, 0, 0
        dec_outs = 0.0

        for _ in range(self.num_z_samples):
            z = self._reparameterize(mu, logvar)
            h = self.z2h(z)
            dec_state = self._build_dec_state(h)
            dec_out, _ = self.decoder(src, dec_state, z)
            dec_out = self.dropout(dec_out)
            if self.use_avg:
                dec_outs += dec_out
            else:
                logit = self.generator(dec_out.view(-1, dec_out.size(2)))
                recon_loss += self.criterion(logit, tgt).sum()
                n_correct_, n_words_ = self._correct(logit, tgt)
                n_correct += n_correct_
                n_words += n_words_

        if self.use_avg:
            dec_outs /= self.num_z_samples
            logit = self.generator(dec_outs.view(-1, dec_outs.size(2)))
            recon_loss = self.criterion(logit, tgt).sum()
            n_correct, n_words = self._correct(logit, tgt)
        else:
            recon_loss /= self.num_z_samples
            n_correct /= self.num_z_samples
            n_words /= self.num_z_samples

        kl_loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
        loss = recon_loss + (beta * kl_loss)
        stats = Statistics(
            loss.item(),
            recon_loss.item(),
            kl_loss.item(),
            n_correct,
            n_words,
            sents.size(1),
        )
        return loss, stats
Ejemplo n.º 22
0
Archivo: train.py Proyecto: yyht/daga
def train(model, optimizer, train_iter, epoch, args):
    """Train with mini-batches."""
    model.train()
    total_stats = Statistics()
    batch_stats = Statistics()
    num_batches = len(train_iter)
    for i, batch in enumerate(train_iter):
        if args.warmup > 0:
            args.beta = min(1, args.beta + 1.0 / (args.warmup * num_batches))

        sents = batch.sent
        loss, stats = model(sents, args.beta)
        optimizer.zero_grad()
        loss.backward()
        utils.clip_grad_norm(optimizer, args)
        optimizer.step()
        total_stats.update(stats)
        batch_stats.update(stats)
        batch_stats = report_batch(batch_stats, epoch, i, num_batches, args)
        torch.cuda.empty_cache()
    return total_stats
Ejemplo n.º 23
0
def start_simulation(no_of_servers, server_scheduler, no_of_dispatchers, dispatcher_policy, arrival_rate, job_distribution, sim_time):
    list_of_servers = []
    list_of_dispatchers = []
    for _ in range(0, int(no_of_servers)):
        scheduler_i = FIFO() if server_scheduler == 1 else PS()
        job_size_i = Expo(job_distribution)
        server_i = Server(job_size_i, scheduler_i)
        list_of_servers.append(server_i)
    for _ in range(0, int(no_of_dispatchers)):
        policy_i = RND() if dispatcher_policy == 1 else ShortestQueue()
        dispatcher_i = Dispatcher(policy_i, list_of_servers)
        list_of_dispatchers.append(dispatcher_i)

    statistics = Statistics()
    world = Global(statistics)
    p_arrivals = PoissonArrival(float(arrival_rate)/len(list_of_dispatchers))

    # Loop to generate first arrivals for each dispatcher
    for d in list_of_dispatchers:
        arrival = random.expovariate(p_arrivals._rate)
        params = [d, world]
        world.schedule_event(p_arrivals.generate_arrival, arrival, params) # Schedule the first arrivals for each dispatcher
    
    # Now that each dispatcher has an arrival, we can start looping through events
    for x in range(1, 101):
        # Now that each dispatcher has an arrival, we can start looping through events
        while world.next_event() <= float(sim_time)*(x*0.01): # while the virtual time of next event is less than our simulation time..
            world.process_event() # We take the event and process it (running the function(s))
        world._stats.write_to_file_jobs()
        print("{}%".format(x))
    #for loop to step between while loops (every 10%)while world.next

    total_no_jobs = world._stats.number_of_jobs
    print(total_no_jobs)
    world._stats.print_stats()
    ## Should add an interactive stage here to see if user wants to save data
    save_stats          = input("Do you want to save these stats to file Y/N?").upper()
    if save_stats == 'Y':
        file_name        = input("What name for the file? > ")
        print("Thank you, your stats will be saved in the Simulation_results directory.")
    world._stats.write_to_file_stats(file_name)
Ejemplo n.º 24
0
Archivo: train.py Proyecto: yyht/daga
def report_batch(batch_stats, epoch, step, num_batches, args):
    """Report batch statistics."""
    if step % args.report_every == -1 % args.report_every:
        r = batch_stats
        t = r.elapsed_time()
        logger.info(
            "Epoch %3d | %4d/%4d batches | acc %5.2f | "
            "nll %8.2f | kl %6.2f | ppl %8.2f | "
            "beta %4.2f | %.0f tok/s",
            epoch,
            step + 1,
            num_batches,
            r.accuracy(),
            r.nll(),
            r.kl(),
            r.ppl(),
            args.beta,
            r.n_words / (t + 1e-5),
        )
        sys.stdout.flush()
        batch_stats = Statistics()
    return batch_stats
Ejemplo n.º 25
0
Archivo: test.py Proyecto: yyht/daga
def report_iw_nll(model, test_iter, n_iw_iter, n_iw_samples):
    """Calculate the importance-weighted estimate of NLL."""
    model.eval()
    loss, n_sents, n_words = 0.0, 0, 0
    with torch.no_grad():
        for batch in tqdm.tqdm(test_iter, total=len(test_iter)):
            sents = batch.sent
            n_sents += sents.size(1)
            for i in range(sents.size(1)):
                sent = sents[:, i]  # get one sentence
                sent = sent.masked_select(sent.ne(model.padding_idx))  # trim pad token
                n_words += sent.size(0) - 1  # skip start symbol
                sent = sent.unsqueeze(1)
                logw = []
                for _ in range(n_iw_iter):
                    logw.extend(model.estimate_log_prob(sent, n_iw_samples))
                logw = torch.cat(logw)
                logp = torch.logsumexp(logw, dim=-1) - math.log(len(logw))
                loss += logp.item()
                torch.cuda.empty_cache()

    return Statistics(loss=-loss, n_words=n_words, n_sents=n_sents)
Ejemplo n.º 26
0
def main():
    """
    The scanner's entry point.
    """

    stats = Statistics()
    args = parse_cmd_args()

    logger.setLevel(logging.__dict__[args.verbosity.upper()])

    logger.debug("Command line arguments: %s" % str(args))

    bootstrap_tor(args)
    controller = Controller.from_port(port=45679)
    stem.connection.authenticate(controller)

    # Redirect Tor's logging to work around the following problem:
    # https://bugs.torproject.org/9862

    logger.debug("Redirecting Tor's logging to /dev/null.")
    controller.set_conf("Log", "err file /dev/null")

    # We already have the current consensus, so we don't need additional
    # descriptors or the streams fetching them.

    controller.set_conf("FetchServerDescriptors", "0")

    if args.first_hop and \
       (not util.relay_in_consensus(args.first_hop,
                                    util.get_consensus_path(args))):
        raise error.PathSelectionError("Given first hop \"%s\" not found in "
                                       "consensus.  Is it offline?" %
                                       args.first_hop)

    for module_name in args.module:
        run_module(module_name, args, controller, stats)

    return 0
Ejemplo n.º 27
0
def esa_3_server(arr_rate, job_through, no_servers, file_name):
    list_of_servers = init_servers(no_servers, 1, 1)
    list_of_dispatchers = init_dispatchers(1, 2, list_of_servers)
    statistics = Statistics()
    world = Global(statistics)
    p_arrivals = PoissonArrival(float(arr_rate))

    init_first_jobs(world, list_of_dispatchers,
                    p_arrivals)  #Create the first arrival to initiate loop

    #Main loop
    while world.next_event(
    ):  # while the virtual time of next event is less than our simulation time..
        if world._stats.number_of_jobs > job_through:  # If we've reached the desired # of jobs
            data = world._stats.get_mean_sd_sojourn()  #format of 'mean,sd'
            with open(
                    './Simulation_results/esa_' + str(no_servers) +
                    '_subfile_' + str(file_name) + '_server_test2.txt',
                    'a') as myfile:
                myfile.write(data + "\n")
            break
        world.process_event(
        )  # We take the event and process it (running the function(s))
Ejemplo n.º 28
0
    def test_interactions(self):

        from models import Protein, Site, Kinase, KinaseGroup

        p1 = Protein(sites=[
            Site(),
            Site(kinases=[Kinase()], kinase_groups=[KinaseGroup()])
        ])
        db.session.add(p1)
        p2 = Protein(sites=[Site(kinases=[Kinase()])])
        db.session.add(p2)

        u_all_interactions = 0
        u_kinases_covered = set()
        u_kinase_groups_covered = set()
        u_proteins_covered = set()
        for protein in models.Protein.query.all():
            for site in protein.sites:
                kinases = site.kinases
                kinase_groups = site.kinase_groups
                u_all_interactions += len(kinases) + len(kinase_groups)
                u_kinases_covered.update(kinases)
                u_kinase_groups_covered.update(kinase_groups)

                if kinases or kinase_groups:
                    u_proteins_covered.add(protein)

        from stats import Statistics
        statistics = Statistics()
        all_interactions = statistics.interactions()
        kinases_covered = statistics.kinases_covered()
        kinase_groups_covered = statistics.kinase_groups_covered()
        proteins_covered = statistics.proteins_covered()

        assert all_interactions == u_all_interactions
        assert kinases_covered == len(u_kinases_covered)
        assert kinase_groups_covered == len(u_kinase_groups_covered)
        assert proteins_covered == len(u_proteins_covered)
Ejemplo n.º 29
0
    "KeyCode":("character(kind=c_char)", "c_char"),
    "KeySym":("integer(c_long)", "c_long"),
    # enum GWin32OSType
    "GWin32OSType":("integer(c_int)", "c_int")
}

# Two words types:
TYPES2_DICT = {
    "long double": ("real(c_long_double)", "c_long_double"),
    "unsigned long":("integer(c_long)", "c_long"),
    "unsigned short":("integer(c_short)", "c_short"),
    "unsigned int":("integer(c_int)", "c_int")
}

# An instance of the Statistics class:
my_stats = Statistics()

# An instance of the Errors class:
my_errors = Errors()

#*************************************************************************
# Pass 1: scan all header files to find all enum types, all pointers to
# functions (funptr) and add derived GTK types
#*************************************************************************
print("\033[1m Pass 1: looking for enumerators, funptr and derived types...\033[0m")

gtk_types = []

# These lists will be used by the iso_c_binding() function:
gtk_enums = []
gtk_funptr = []
Ejemplo n.º 30
0
def mash(argv = sys.argv[1:]):
    # Initializing command-line arguments
    args = parser.parse_args(argv)
    
    # Set up logging
    logging.basicConfig(level=logging.DEBUG,
                        format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
                        datefmt='%d-%m %H:%M:%S',
                        filename=args.log,
                        filemode='w')    
    logger = logging.getLogger('main.py')
    
    #"""
    # remove old handler for tester
    #logger.root.handlers[0].stream.close()
    logger.root.removeHandler(logger.root.handlers[0])
    file_handler = logging.FileHandler(args.log)
    file_handler.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
    file_handler.setFormatter(formatter)
    logger.root.addHandler(file_handler)
    #"""
    if args.quiet:
        logger.setLevel(logging.WARNING)
        #console.setLevel(logging.WARNING)
    else:
        console = logging.StreamHandler(sys.stdout)
        console.setLevel(logging.INFO)
        formatter = logging.Formatter('# %(message)s')
        console.setFormatter(formatter)
        logging.getLogger('').addHandler(console)
        
    if not os.path.exists(args.outputDir):
        os.makedirs(args.outputDir)

    logger.info('Using the following settings: %s',args)
    # Pick algorithm
    if args.nb:
        logger.info('Using sparse Naive Bayes for learning.')
        model = sparseNBClassifier(args.NBDefaultPriorWeight,args.NBPosWeight,args.NBDefVal)
    elif args.snow:
        logger.info('Using naive bayes (SNoW) for learning.')
        model = SNoW()
    elif args.predef:
        logger.info('Using predefined predictions.')
        model = Predefined(args.predef)
    else:
        logger.info('No algorithm specified. Using sparse Naive Bayes.')
        model = sparseNBClassifier(args.NBDefaultPriorWeight,args.NBPosWeight,args.NBDefVal)

    # Initializing model
    if args.init:
        logger.info('Initializing Model.')
        startTime = time()

        # Load all data
        dicts = Dictionaries()
        dicts.init_all(args)
        
        # Create Model
        trainData = dicts.featureDict.keys()
        model.initializeModel(trainData,dicts)

        if args.learnTheories:
            depFile = os.path.join(args.inputDir,args.depFile)
            theoryModels = TheoryModels(args.theoryDefValPos,args.theoryDefValNeg,args.theoryPosWeight)
            theoryModels.init(depFile,dicts)
            theoryModels.save(args.theoryFile)
            
        model.save(args.modelFile)
        dicts.save(args.dictsFile)

        logger.info('All Done. %s seconds needed.',round(time()-startTime,2))
        return 0
    # Create predictions and/or update model
    else:
        lineCounter = 1
        statementCounter = 1
        computeStats = False
        dicts = Dictionaries()
        theoryModels = TheoryModels(args.theoryDefValPos,args.theoryDefValNeg,args.theoryPosWeight)
        # Load Files
        if os.path.isfile(args.dictsFile):
            #logger.info('Loading Dictionaries')
            #startTime = time()
            dicts.load(args.dictsFile)            
            #logger.info('Done %s',time()-startTime)
        if os.path.isfile(args.modelFile):
            #logger.info('Loading Model')
            #startTime = time()
            model.load(args.modelFile)            
            #logger.info('Done %s',time()-startTime)
        if os.path.isfile(args.theoryFile) and args.learnTheories:
            #logger.info('Loading Theory Models')
            #startTime = time()
            theoryModels.load(args.theoryFile)
            #logger.info('Done %s',time()-startTime)
        logger.info('All loading completed')

        # IO Streams
        OS = open(args.predictions,'w')
        IS = open(args.inputFile,'r')

        # Statistics
        if args.statistics:
            stats = Statistics(args.cutOff)
            if args.learnTheories:
                theoryStats = TheoryStatistics()

        predictions = None
        predictedTheories = None
        #Reading Input File
        for line in IS:
#           try:
            if True:
                if line.startswith('!'):
                    problemId = dicts.parse_fact(line)    
                    # Statistics
                    if args.statistics and computeStats:
                        computeStats = False
                        # Assume '!' comes after '?'
                        if args.predef:
                            predictions = model.predict(problemId)
                        if args.learnTheories:
                            tmp = [dicts.idNameDict[x] for x in dicts.dependenciesDict[problemId]]
                            usedTheories = set([x.split('.')[0] for x in tmp]) 
                            theoryStats.update((dicts.idNameDict[problemId]).split('.')[0],predictedTheories,usedTheories,len(theoryModels.accessibleTheories))                        
                        stats.update(predictions,dicts.dependenciesDict[problemId],statementCounter)
                        if not stats.badPreds == []:
                            bp = string.join([str(dicts.idNameDict[x]) for x in stats.badPreds], ',')
                            logger.debug('Bad predictions: %s',bp)

                    statementCounter += 1
                    # Update Dependencies, p proves p
                    dicts.dependenciesDict[problemId] = [problemId]+dicts.dependenciesDict[problemId]
                    if args.learnTheories:
                        theoryModels.update(problemId,dicts.featureDict[problemId],dicts.dependenciesDict[problemId],dicts)
                    if args.snow:
                        model.update(problemId,dicts.featureDict[problemId],dicts.dependenciesDict[problemId],dicts)
                    else:
                        model.update(problemId,dicts.featureDict[problemId],dicts.dependenciesDict[problemId])
                elif line.startswith('p'):
                    # Overwrite old proof.
                    problemId,newDependencies = dicts.parse_overwrite(line)
                    newDependencies = [problemId]+newDependencies
                    model.overwrite(problemId,newDependencies,dicts)
                    if args.learnTheories:
                        theoryModels.overwrite(problemId,newDependencies,dicts)
                    dicts.dependenciesDict[problemId] = newDependencies
                elif line.startswith('?'):               
                    startTime = time()
                    computeStats = True
                    if args.predef:
                        continue
                    name,features,accessibles,hints = dicts.parse_problem(line)  
                        
                    # Create predictions
                    logger.info('Starting computation for problem on line %s',lineCounter)
                    # Update Models with hints
                    if not hints == []:
                        if args.learnTheories:
                            accessibleTheories = set([(dicts.idNameDict[x]).split('.')[0] for x in accessibles])
                            theoryModels.update_with_acc('hints',features,hints,dicts,accessibleTheories)
                        if args.snow:
                            pass
                        else:
                            model.update('hints',features,hints)

                    # Predict premises
                    if args.learnTheories:
                        predictedTheories,accessibles = theoryModels.predict(features,accessibles,dicts)

                    # Add additional features on premise lvl if sine is enabled
                    if args.sineFeatures:
                        origFeatures = [f for f,_w in features]
                        secondaryFeatures = []
                        for f in origFeatures:
                            if dicts.featureCountDict[f] == 1:
                                continue
                            triggeredFormulas = dicts.featureTriggeredFormulasDict[f]                                
                            for formula in triggeredFormulas: 
                                tFeatures = dicts.triggerFeaturesDict[formula]                                
                                #tFeatures = [ff for ff,_fw in dicts.featureDict[formula]]
                                newFeatures = set(tFeatures).difference(secondaryFeatures+origFeatures)
                            for fNew in newFeatures:
                                secondaryFeatures.append((fNew,args.sineWeight))
                        predictionsFeatures = features+secondaryFeatures
                    else:
                        predictionsFeatures = features                    
                    predictions,predictionValues = model.predict(predictionsFeatures,accessibles,dicts)
                    assert len(predictions) == len(predictionValues)
                    
                    # Delete hints
                    if not hints == []:
                        if args.learnTheories:
                            theoryModels.delete('hints',features,hints,dicts)
                        if args.snow:
                            pass
                        else:
                            model.delete('hints',features,hints)

                    logger.info('Done. %s seconds needed.',round(time()-startTime,2))
                    # Output        
                    predictionNames = [str(dicts.idNameDict[p]) for p in predictions[:args.numberOfPredictions]]
                    predictionValues = [str(x) for x in predictionValues[:args.numberOfPredictions]]
                    predictionsStringList = ['%s=%s' % (predictionNames[i],predictionValues[i]) for i in range(len(predictionNames))]
                    predictionsString = string.join(predictionsStringList,' ')
                    outString = '%s: %s' % (name,predictionsString)
                    OS.write('%s\n' % outString)
                else:
                    logger.warning('Unspecified input format: \n%s',line)
                    sys.exit(-1)
                lineCounter += 1
            """
            except:
                logger.warning('An error occurred on line %s .',line)
                lineCounter += 1
                continue
            """
        OS.close()
        IS.close()

        # Statistics
        if args.statistics:
            if args.learnTheories:
                theoryStats.printAvg()
            stats.printAvg()

        # Save
        if args.saveModel:
            model.save(args.modelFile)
            if args.learnTheories:
                theoryModels.save(args.theoryFile)
        dicts.save(args.dictsFile)
        if not args.saveStats == None:
            if args.learnTheories:
                theoryStatsFile = os.path.join(args.outputDir,'theoryStats')
                theoryStats.save(theoryStatsFile)
            statsFile = os.path.join(args.outputDir,args.saveStats)
            stats.save(statsFile)
    return 0
def start_simulation_state(no_of_jobs_server_1, no_of_jobs_server_2, arrival_rate, job_distribution, given_x, given_y, sim_time):
    list_of_servers = []
    for _ in range(0,2): # Create 2 servers
        scheduler_i = FIFO()
        job_size_i = Expo(job_distribution)
        server_i = Server(job_size_i, scheduler_i)
        list_of_servers.append(server_i)
    # Create dispatcher
    policy_i = ShortestQueue() #Join shortest queue policy
    dispatcher = Dispatcher(policy_i, list_of_servers) #Create a dispatcher with JSQ policy

    statistics = Statistics()
    world = Global(statistics)
    p_arrivals = PoissonArrival(float(arrival_rate))
    arrival = 0

    
    def set_up_servers():
        nonlocal list_of_servers, world
        for job in range(0, int(no_of_jobs_server_1)):
            job = Job(0) #All arrive at time 0
            list_of_servers[0]._total_jobs +=1
            enter_service = max(list_of_servers[0]._total_processing_time, 0) #Takes processing time (time when server is idle) and the time of arrival. Max of those is enter service for this job
            job.set_enter_service(enter_service) #Setting the enter service, first is 0, next is 0+service_1, next is 0+service_1 + service_2.. ...
            job.set_service(list_of_servers[0].get_service_time()) #Generates service time AND increases processing time for server
            departure_time = job._enter_service + job._service_time # Enter service will be correct because of processing time of server bookkeeping
            job.set_departure(departure_time) #Set the departure time as enter service+service time. (Its a timeperiod on the timeline)
            world.schedule_event(list_of_servers[0].departure, job._departure_time, [job, world]) #Schedule this departure to the world queue
        # Now we have some jobs already in the server(s) before we start the main loop of adding arrivals e.t.c.
        for job in range(0, int(no_of_jobs_server_2)):
            job = Job(0)
            list_of_servers[1]._total_jobs +=1
            enter_service = max(list_of_servers[1]._total_processing_time, 0)
            job.set_enter_service(enter_service)
            job.set_service(list_of_servers[1].get_service_time())
            departure_time = job._enter_service + job._service_time
            job.set_departure(departure_time)
            world.schedule_event(list_of_servers[1].departure, job._departure_time, [job, world])
        #cost function is basically sojourn time in these cases.    

    #### IMPORTANT
    # Need here to make a check to see if we're out of bounds before each process_event
    # If we're out of bounds we 'stop' and save the stats.
    # Then reset the 'world' and run again, until the sim_time is over. 
    # May be best to do a sub-routine to 'reset' the world (can be used for both simulation processes)
    
    def reset_world(first_arrival):
        nonlocal list_of_servers, dispatcher, statistics, world # if we want to modify variable from closure, must place as nonlocal
        list_of_servers.clear() #clear the 2 servers
        for _ in range(0,2): # Create 2 servers
            scheduler_i = FIFO()
            job_size_i = Expo(job_distribution) # use job_distr from closure
            server_i = Server(job_size_i, scheduler_i) # create a new server to place in list
            list_of_servers.append(server_i)
        dispatcher = Dispatcher(policy_i, list_of_servers) # resetting the dispatcher with new servers
        statistics = Statistics()
        world = Global(statistics)
        set_up_servers()
        params = [dispatcher, world]
        world.schedule_event(p_arrivals.generate_arrival, first_arrival, params) #Schedule first arrival to start chain
        #Now we have created two new servers, reset them and created a dispatcher with the new servers
        #Then we reset the world(nonlocal) and statistics to get a clean slate
        #Then we called function to set the initial jobs in the servers again (not same jobs!)

    reset_world(0) # Call function to setup our world

    # Now we need to schedule the initial arrivals to start the chain of events.
    for x in range(1, 11):
        # Now that each dispatcher has an arrival, we can start looping through events
        while world.next_event() <= float(sim_time)*(x*0.1): # while the virtual time of next event is less than our simulation time..
            if list_of_servers[0]._total_jobs > int(given_x) or list_of_servers[1]._total_jobs > int(given_y):
                next_arrival_new_world = world.next_event() #Get the time for the first event for new world
                #reset the world here (e.g. clear all stats and world queue, then reset jobs in servers and 'restart')
                print('resetting world')
                world._stats.write_to_file_intermediate_stats('given_end_of_'+given_x+'_and_'+given_y)
                reset_world(next_arrival_new_world) # This function should reset statistics, world event queue, and server states
                #also remember to log the stats before reset.
            world.process_event() # We take the event and process it (running the function(s))
        print("{}%".format(x*10))
    #for loop to step between while loops (every 10%)while world.next

    total_no_jobs = world._stats.number_of_jobs
    print(total_no_jobs)
    world._stats.print_stats()
def start_simulation_less_than_n(no_of_jobs_server_1, no_of_jobs_server_2, arrival_rate, job_distribution, sim_time):
    list_of_servers = []
    global jobs_ran
    global final_data
    stopping_n = no_of_jobs_server_2 
    for _ in range(0,2): # Create 2 servers
        scheduler_i = FIFO()
        job_size_i = Expo(job_distribution)
        server_i = Server(job_size_i, scheduler_i)
        list_of_servers.append(server_i)
    # Create dispatcher
    policy_i = ShortestQueue() #Join shortest queue policy
    dispatcher = Dispatcher(policy_i, list_of_servers) #Create a dispatcher with JSQ policy

    statistics = Statistics()
    world = Global(statistics)
    p_arrivals = PoissonArrival(float(arrival_rate))
    arrival = 0

    #For each job in server 1 we:
    #Create a job with arrival at time 0, then increase # of jobs in the server.
    #We get time the job enters service, either immediately or after the total processing done.
    #Set the enter service and service time needed.
    #Next calculate departure time and set it, then we can schedule the departure.
    server_1_processing_time = 0
    for job in range(0, int(no_of_jobs_server_1)):
        job = Job(0) #All arrive at time 0
        list_of_servers[0]._total_jobs +=1
        enter_service = max(list_of_servers[0]._total_processing_time, 0) #Takes processing time (time when server is idle) and the time of arrival. Max of those is enter service for this job
        job.set_enter_service(enter_service) #Setting the enter service, first is 0, next is 0+service_1, next is 0+service_1 + service_2.. ...
        job.set_service(list_of_servers[0].get_service_time()) #Generates service time AND increases processing time for server
        departure_time = job._enter_service + job._service_time # Enter service will be correct because of processing time of server bookkeeping
        job.set_departure(departure_time) #Set the departure time as enter service+service time. (Its a timeperiod on the timeline)
        world.schedule_event(list_of_servers[0].departure, job._departure_time, [job, world]) #Schedule this departure to the world queue
    for job in range(0, int(no_of_jobs_server_2)):
        job = Job(0)
        list_of_servers[1]._total_jobs +=1
        enter_service = max(list_of_servers[1]._total_processing_time, 0)
        job.set_enter_service(enter_service)
        job.set_service(list_of_servers[1].get_service_time())
        departure_time = job._enter_service + job._service_time
        job.set_departure(departure_time)
        world.schedule_event(list_of_servers[1].departure, job._departure_time, [job, world])
    
    initial_arrival = random.expovariate(p_arrivals._rate)
    params = [dispatcher, world]
    world.schedule_event(p_arrivals.generate_arrival, initial_arrival, params) #Schedule first arrival to start chain

    last_event = 0
    world.number_of_arr_dep = 0 #resetting the number of events before we start
    # Now we need to schedule the initial arrivals to start the chain of events.
    # Now that each dispatcher has an arrival, we can start looping through events
    while world.next_event() <= float(sim_time): # while the virtual time of next event is less than our simulation time..
        if(list_of_servers[0]._total_jobs<=stopping_n and list_of_servers[1]._total_jobs <= stopping_n):
            break
        last_event = world.next_event()
        world.process_event() # We take the event and process it (running the function(s))
    #for loop to step between while loops (every 10%)while world.next
    #We've reached a stopping state. Record event parameters and print to file
    jobs_ran += world._stats.number_of_jobs # We stopped, we add the number of jobs ran this time to global variable
    recorded_x = list_of_servers[0]._total_jobs
    recorded_y = list_of_servers[1]._total_jobs
    recorded_T = last_event #Last event that happened (e.g. departure that caused the total jobs to be < 4)
    recorded_N = world.number_of_arr_dep #Get the number of events that happened'
    final_data.append((recorded_x, recorded_y, recorded_T, recorded_N))
Ejemplo n.º 33
0
class MainWindow(QtGui.QMainWindow):
    
    def __init__(self):
        super(MainWindow, self).__init__()        
        
        self.setWindowTitle("Algae Count Estimator")
        
        self.setSizePolicy(QtGui.QSizePolicy.Fixed,
                           QtGui.QSizePolicy.Fixed)
        
        self.setFixedSize(QtCore.QSize(800,620))
        
        self.initPages()
        self.initMenuBar()       
        
        self.show()
    
    """
    initPages():
        Creates and populates a QStackedWidget that contains the different
        modes (i.e. pages) of the application.
    """
    def initPages(self):
        self.mode_stack = QtGui.QStackedWidget()

        main_menu = MainMenu(self) #0
        self.stats = Statistics(self) #1
        self.trainer = Trainer(self,self.stats) #2
        self.generator = Generator(self) #3
        
        self.mode_stack.addWidget(main_menu)
        self.mode_stack.addWidget(self.stats)
        self.mode_stack.addWidget(self.trainer)
        self.mode_stack.addWidget(self.generator)
        
        self.setCentralWidget(self.mode_stack)
    
    """
    initMenuBar():
        Creates a menu bar that is visible from any mode of the application.
        The options under the menus are called "actions".
    """
    def initMenuBar(self):
        menu_bar = self.menuBar()
        
        #Create File menu and actions.
        file_menu = menu_bar.addMenu('&File')
        
        to_menu_action = QtGui.QAction('Main Menu', self)
        to_menu_action.triggered.connect(lambda: self.changeMode(ModeEnum.MENU))
        
        exit_action = QtGui.QAction('Exit', self)
        exit_action.triggered.connect(self.exitProgram )
        
        file_menu.addAction(to_menu_action)
        file_menu.addAction(exit_action)
        
        #Create Help menu and actions.
        help_menu = menu_bar.addMenu('&Help')

        about_action = QtGui.QAction('About', self)
        about_action.triggered.connect(self.aboutMenu)
        help_menu.addAction(about_action)
        
    """
    changeMode(page_num):
        Changes the view of the user to the mode associated with page_num.
        page_num is an integer defined by ModeEnum. (see enum module).
        
        Special conditions that need to be met before switching to a mode
        may be addressed here.
    """
    def changeMode(self, page_num):
        #Perform any work that needs to be done before switching modes.
        if page_num == ModeEnum.STATS:
            #Update the stats page before switching the view.
            self.stats.updateStatsUI()

        elif page_num == ModeEnum.TRAINER:
            #Attempt to start a new Trainer session if there is not
            #an active session.
            if (not self.trainer.has_active_session):
                if (not self.trainer.startNewSession()):
                    return

        #Switch the mode.
        self.mode_stack.setCurrentIndex(page_num)

    """
    exitProgram():
    This exit routine is called when the user exits via File->Exit.
    Writes the stats to disk and closes the application.
    """
    def exitProgram(self):
        self.mode_stack.widget(ModeEnum.STATS).writeStatsToFile()
        QtGui.qApp.quit()
    
        
    """
    closeEvent(event):
    This exit routine is an overloaded version of Qt's closeEvent,
    and normally occurs when the user exits by clicking the 'X'.
    Before accepting the close event, stats are written to disk.
    """
    def closeEvent(self, event):
        self.mode_stack.widget(ModeEnum.STATS).writeStatsToFile()
        event.accept()
        
        
    def aboutMenu(self):
        QtGui.QMessageBox.about(self, 'About ACE',
            '''<p><strong>ACE 1.0.0</strong><br/>The Algae Count Estimator</p><p>Copyright &copy; 2014 Samuel Dunlap, Josh Minor, Matt Ralphs, Andrew Young<br/>Licensed under the terms of the MIT License</p>
                <p>Build for those who require assistance in the counting of blue-green algae,
                ACE is a tool that graphically generates algae 
                colonies with known counts. Users may test their ability to count these
                colonies with this program and be provided statistics regarding how they've done.</p>
                <p>For more info, see the User's Manual located in the Docs folder.</p>
                <p>This project was originally commissioned for the Institute for Watershed Studies
                of Western Washington University.</p>
                ''')
Ejemplo n.º 34
0
    def compare_returns(df, compare_hurst_exp: bool = False, compare_lyapunov_exponent=False, window: int = 10):
        """Compare returns of the simulated markets with the real market"""
        # TODO: change this hardcoded mess
        product = "LTC-USD"

        plt = Evaluation.get_plt(window)

        df = Evaluation.calculate_aux_rows(df, window)
        Evaluation.print_sim_summary(df)

        Evaluation.plot_linregress(df['rp_diff'], df['sp_diff'],
                                   window, xlabel="Real 5 minute returns",
                                   ylabel="Simulated 5 minute returns")

        if compare_lyapunov_exponent:
            step_minutes = 1
            window_minutes = 240

            st_str = df['start_time'].min()
            et_str = df['start_time'].max()

            st = datetime.datetime.strptime(st_str, "%Y-%m-%dT%H:%M:%S") + datetime.timedelta(
                minutes=window_minutes - 60)
            et = datetime.datetime.strptime(et_str, "%Y-%m-%dT%H:%M:%S") + datetime.timedelta(
                minutes=window_minutes - 60)
            _, trades, _ = DataLoader.load_split_data("/Users/jamesprince/project-data/data/consolidated-feed/LTC-USD/",
                                                      st,
                                                      et,
                                                      product)

            times, lyap_exps = Statistics.get_lyapunov_exponent_over_time(trades, st, et, step_minutes, window_minutes)
            lyap_df = pd.DataFrame({'start_time': list(map(lambda t: t.isoformat(), times)), 'lyap_exp': lyap_exps})

            joined_df = pd.merge(df, lyap_df, how='left', on='start_time')
            dropped_df = joined_df.dropna(subset=['lyap_exp'])

            x1 = dropped_df['lyap_exp']
            x2 = dropped_df['rp_sp_diff']

            plt.xlim(-0.2, 0.2)
            plt.ylim(-10, 10)

            Evaluation.plot_linregress(x1, x2,
                                       window,
                                       show_regression_line=True,
                                       xlabel="Lyapunov Exponent",
                                       ylabel="Diff between real and simulated returns")

        if compare_hurst_exp:
            st_str = df['start_time'].min()
            et_str = df['start_time'].max()

            step_minutes = 5
            window_minutes = 5

            st = datetime.datetime.strptime(st_str, "%Y-%m-%dT%H:%M:%S") - datetime.timedelta(minutes=window_minutes)
            et = datetime.datetime.strptime(et_str, "%Y-%m-%dT%H:%M:%S") - datetime.timedelta(minutes=window_minutes)
            _, trades, _ = DataLoader.load_split_data("/Users/jamesprince/project-data/data/consolidated-feed/LTC-USD/",
                                                      st,
                                                      et,
                                                      product)

            times, lyap_exps = Statistics.get_hurst_exponent_over_time(trades, st, et, step_minutes, window_minutes)
            lyap_df = pd.DataFrame({'start_time': list(map(lambda t: t.isoformat(), times)), 'hurst_exp': lyap_exps})

            print(lyap_df)

            joined_df = pd.merge(df, lyap_df, how='left', on='start_time')
            dropped_df = joined_df.dropna(subset=['hurst_exp'])

            x1 = dropped_df['hurst_exp']
            x2 = dropped_df['rp_sp_diff']

            Evaluation.plot_linregress(x1, x2,
                                       window,
                                       show_regression_line=True,
                                       xlabel="Hurst Exponent",
                                       ylabel="Diff between real and simulated returns")