Ejemplo n.º 1
0
def calc_statistics(args, app=None):
    if not app:
        app = create_app(config_override=CONFIG)
    with app.app_context():
        from stats import Statistics
        statistics = Statistics()
        statistics.calc_all()
        db.session.commit()
Ejemplo n.º 2
0
 def get_relative_prices(trades_df: dd, other_df: dd) -> dd:
     price_over_time: dd = Statistics().get_price_over_time(
         trades_df).groupby(
             ['time'])['most_recent_trade_price'].mean().to_frame()
     other_df = DataUtils().fuzzy_join(other_df, price_over_time, on='time')
     relative_prices = other_df['relative_price']
     relative_prices = DataUtils().remove_tails(relative_prices, 3)
     return relative_prices
Ejemplo n.º 3
0
 def __init__(self):
     self.__match_maker = MatchMaker()
     self.__stats = Statistics()
     self.__app = QtWidgets.QApplication(sys.argv)
     self.__MainWindow = QtWidgets.QMainWindow()
     super().__init__()
     super().setupUi(self.__MainWindow)
     self.__init_logic()
     self.__display_round()
     self.__display_stats()
Ejemplo n.º 4
0
def main():
    """
    The scanner's entry point.
    """

    stats = Statistics()
    args = parse_cmd_args()

    # Create and set the given directories.

    if args.tor_dir and not os.path.exists(args.tor_dir):
        os.makedirs(args.tor_dir)

    logging.getLogger("stem").setLevel(
        logging.__dict__[args.verbosity.upper()])
    log_format = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
    logging.basicConfig(format=log_format,
                        level=logging.__dict__[args.verbosity.upper()],
                        filename=args.logfile)

    log.debug("Command line arguments: %s" % str(args))

    socks_port, control_port = bootstrap_tor(args)
    controller = Controller.from_port(port=control_port)
    stem.connection.authenticate(controller)

    # Redirect Tor's logging to work around the following problem:
    # https://bugs.torproject.org/9862

    log.debug("Redirecting Tor's logging to /dev/null.")
    controller.set_conf("Log", "err file /dev/null")

    # We already have the current consensus, so we don't need additional
    # descriptors or the streams fetching them.

    controller.set_conf("FetchServerDescriptors", "0")

    cached_consensus_path = os.path.join(args.tor_dir, "cached-consensus")
    if args.first_hop and (not util.relay_in_consensus(args.first_hop,
                                                       cached_consensus_path)):
        log.critical("Given first hop \"%s\" not found in consensus.  Is it"
                     " offline?" % args.first_hop)
        return 1

    for module_name in args.module:

        if args.analysis_dir is not None:
            datestr = time.strftime("%Y-%m-%d_%H:%M:%S%z") + "_" + module_name
            util.analysis_dir = os.path.join(args.analysis_dir, datestr)

        try:
            run_module(module_name, args, controller, socks_port, stats)
        except error.ExitSelectionError as err:
            log.error("Failed to run because : %s" % err)
    return 0
def start_simulation_sim_time(no_of_jobs_server_1, no_of_jobs_server_2, arrival_rate, job_distribution, sim_time):
    list_of_servers = []
    for _ in range(0,2): # Create 2 servers
        scheduler_i = FIFO()
        job_size_i = Expo(job_distribution)
        server_i = Server(job_size_i, scheduler_i)
        list_of_servers.append(server_i)
    policy_i = ShortestQueue() #Join shortest queue policy
    dispatcher = Dispatcher(policy_i, list_of_servers) #Create a dispatcher with JSQ policy
    statistics = Statistics()
    world = Global(statistics)
    p_arrivals = PoissonArrival(float(arrival_rate))

    #For each job in server 1 we:
    #Create a job with arrival at time 0, then increase # of jobs in the server.
    #We get time the job enters service, either immediately or after the total processing done.
    #Set the enter service and service time needed.
    #Next calculate departure time and set it, then we can schedule the departure.
    server_1_processing_time = 0
    #Forloops used to create a given # of jobs before starting sim
    for job in range(0, int(no_of_jobs_server_1)):
        job = Job(0) #All arrive at time 0
        list_of_servers[0]._total_jobs +=1
        enter_service = max(list_of_servers[0]._total_processing_time, 0) #Takes processing time (time when server is idle) and the time of arrival. Max of those is enter service for this job
        job.set_enter_service(enter_service) #Setting the enter service, first is 0, next is 0+service_1, next is 0+service_1 + service_2.. ...
        job.set_service(list_of_servers[0].get_service_time()) #Generates service time AND increases processing time for server
        departure_time = job._enter_service + job._service_time # Enter service will be correct because of processing time of server bookkeeping
        job.set_departure(departure_time) #Set the departure time as enter service+service time. (Its a timeperiod on the timeline)
        world.schedule_event(list_of_servers[0].departure, job._departure_time, [job, world]) #Schedule this departure to the world queue
    for job in range(0, int(no_of_jobs_server_2)):
        job = Job(0)
        list_of_servers[1]._total_jobs +=1
        enter_service = max(list_of_servers[1]._total_processing_time, 0)
        job.set_enter_service(enter_service)
        job.set_service(list_of_servers[1].get_service_time())
        departure_time = job._enter_service + job._service_time
        job.set_departure(departure_time)
        world.schedule_event(list_of_servers[1].departure, job._departure_time, [job, world])
    
    initial_arrival = random.expovariate(p_arrivals._rate)
    params = [dispatcher, world]
    world.schedule_event(p_arrivals.generate_arrival, initial_arrival, params) #Schedule first arrival to start chain

    world.number_of_arr_dep = 0 #resetting the number of events before we start
    # Now we need to schedule the initial arrivals to start the chain of events.
    for x in range(1, 11):
        # Now that each dispatcher has an arrival, we can start looping through events
        while world.next_event() <= float(sim_time)*(x*0.1): # while the virtual time of next event is less than our simulation time..
            world.process_event() # We take the event and process it (running the function(s))
        print("{}%".format(x*10))
    #for loop to step between while loops (every 10%)while world.next

    total_no_jobs = world._stats.number_of_jobs
    print(total_no_jobs)
    world._stats.print_stats()
Ejemplo n.º 6
0
Archivo: train.py Proyecto: yyht/daga
def validate(model, val_iter):
    """Validate with mini-batches."""
    model.eval()
    batch_stats = Statistics()
    with torch.no_grad():
        for batch in val_iter:
            sents = batch.sent
            _, stats = model(sents)
            batch_stats.update(stats)
            torch.cuda.empty_cache()
    return batch_stats
Ejemplo n.º 7
0
  def init_tlb(self, tlb_lst, tlb_len, tlb_stats, yconfig, data=True):
    for l in range(tlb_len):
      if yconfig[l]['private'] or data:
        if yconfig[l]['type'] == 'fully-associative':
          tlb_lst.append(Tlb(Structure.FULLY_ASSOCIATIVE, yconfig[l]['entry']))
        elif yconfig[l]['type'] == '2-way':
          tlb_lst.append(Tlb(Structure.SET2_ASSOCIATIVE, yconfig[l]['entry']))
        elif yconfig[l]['type'] == '4-way':
          tlb_lst.append(Tlb(Structure.SET4_ASSOCIATIVE, yconfig[l]['entry']))
        else:
          raise NotImplementedError("unimplemented tlb type")

        tlb_stats.append(Statistics())
        tlb_lst[-1].set_stats(tlb_stats[-1])
      else:
        # shared tlb -> let the data and instruction tlbs have
        # the same object.
        tlb_lst.append(self.dtlb[l])
        tlb_stats.append(self.dtlb_stats[l])

    tlb_stats.append(Statistics())
Ejemplo n.º 8
0
Archivo: train.py Proyecto: yyht/daga
def train(model, optimizer, train_iter, epoch, args):
    """Train with mini-batches."""
    model.train()
    total_stats = Statistics()
    batch_stats = Statistics()
    num_batches = len(train_iter)
    for i, batch in enumerate(train_iter):
        if args.warmup > 0:
            args.beta = min(1, args.beta + 1.0 / (args.warmup * num_batches))

        sents = batch.sent
        loss, stats = model(sents, args.beta)
        optimizer.zero_grad()
        loss.backward()
        utils.clip_grad_norm(optimizer, args)
        optimizer.step()
        total_stats.update(stats)
        batch_stats.update(stats)
        batch_stats = report_batch(batch_stats, epoch, i, num_batches, args)
        torch.cuda.empty_cache()
    return total_stats
Ejemplo n.º 9
0
    def initPages(self):
        self.mode_stack = QtGui.QStackedWidget()

        main_menu = MainMenu(self)  #0
        self.stats = Statistics(self)  #1
        self.trainer = Trainer(self, self.stats)  #2
        self.generator = Generator(self)  #3

        self.mode_stack.addWidget(main_menu)
        self.mode_stack.addWidget(self.stats)
        self.mode_stack.addWidget(self.trainer)
        self.mode_stack.addWidget(self.generator)

        self.setCentralWidget(self.mode_stack)
Ejemplo n.º 10
0
    def test_from_many_sources(self):

        # create one mutation which is present in multiple sources
        m = models.Mutation()
        metadata_1 = models.InheritedMutation(mutation=m)
        metadata_2 = models.MC3Mutation(mutation=m)
        db.session.add_all([metadata_1, metadata_2])

        from stats import Statistics
        statistics = Statistics()

        in_many_sources = statistics.from_more_than_one_source()

        assert in_many_sources == 1
 def reset_world(first_arrival):
     nonlocal list_of_servers, dispatcher, statistics, world # if we want to modify variable from closure, must place as nonlocal
     list_of_servers.clear() #clear the 2 servers
     for _ in range(0,2): # Create 2 servers
         scheduler_i = FIFO()
         job_size_i = Expo(job_distribution) # use job_distr from closure
         server_i = Server(job_size_i, scheduler_i) # create a new server to place in list
         list_of_servers.append(server_i)
     dispatcher = Dispatcher(policy_i, list_of_servers) # resetting the dispatcher with new servers
     statistics = Statistics()
     world = Global(statistics)
     set_up_servers()
     params = [dispatcher, world]
     world.schedule_event(p_arrivals.generate_arrival, first_arrival, params) #Schedule first arrival to start chain
Ejemplo n.º 12
0
def main():
    """
    The scanner's entry point.
    """

    stats = Statistics()
    args = parse_cmd_args()

    # Create and set the given directories.

    if args.tor_dir and not os.path.exists(args.tor_dir):
        os.makedirs(args.tor_dir)
    if args.analysis_dir and not os.path.exists(args.analysis_dir):
        os.makedirs(args.analysis_dir)
    util.analysis_dir = args.analysis_dir

    logger.setLevel(logging.__dict__[args.verbosity.upper()])

    logger.debug("Command line arguments: %s" % str(args))

    socks_port, control_port = bootstrap_tor(args)
    controller = Controller.from_port(port=control_port)
    stem.connection.authenticate(controller)

    # Redirect Tor's logging to work around the following problem:
    # https://bugs.torproject.org/9862

    logger.debug("Redirecting Tor's logging to /dev/null.")
    controller.set_conf("Log", "err file /dev/null")

    # We already have the current consensus, so we don't need additional
    # descriptors or the streams fetching them.

    controller.set_conf("FetchServerDescriptors", "0")

    cached_consensus_path = os.path.join(args.tor_dir, "cached-consensus")
    if args.first_hop and (not util.relay_in_consensus(args.first_hop,
                                                       cached_consensus_path)):
        raise error.PathSelectionError("Given first hop \"%s\" not found in "
                                       "consensus.  Is it offline?" %
                                       args.first_hop)

    for module_name in args.module:
        try:
            run_module(module_name, args, controller, socks_port, stats)
        except error.ExitSelectionError as err:
            logger.error("failed to run because : %s" % err)
    return 0
Ejemplo n.º 13
0
    def _forward(self, sents):
        src = sents[:-1]
        tgt = sents[1:]
        tgt = tgt.view(-1)

        dec_out, _ = self.decoder(src)
        dec_out = self.dropout(dec_out)
        logit = self.generator(dec_out.view(-1, dec_out.size(2)))
        recon_loss = self.criterion(logit, tgt).sum()
        n_correct, n_words = self._correct(logit, tgt)
        stats = Statistics(
            loss=recon_loss.item(),
            n_correct=n_correct,
            n_words=n_words,
            n_sents=sents.size(1),
        )
        return recon_loss, stats
Ejemplo n.º 14
0
    def _decode(self, sents, mu, logvar, beta):
        src = sents[:-1]
        tgt = sents[1:]
        tgt = tgt.view(-1)
        recon_loss, n_correct, n_words = 0.0, 0, 0
        dec_outs = 0.0

        for _ in range(self.num_z_samples):
            z = self._reparameterize(mu, logvar)
            h = self.z2h(z)
            dec_state = self._build_dec_state(h)
            dec_out, _ = self.decoder(src, dec_state, z)
            dec_out = self.dropout(dec_out)
            if self.use_avg:
                dec_outs += dec_out
            else:
                logit = self.generator(dec_out.view(-1, dec_out.size(2)))
                recon_loss += self.criterion(logit, tgt).sum()
                n_correct_, n_words_ = self._correct(logit, tgt)
                n_correct += n_correct_
                n_words += n_words_

        if self.use_avg:
            dec_outs /= self.num_z_samples
            logit = self.generator(dec_outs.view(-1, dec_outs.size(2)))
            recon_loss = self.criterion(logit, tgt).sum()
            n_correct, n_words = self._correct(logit, tgt)
        else:
            recon_loss /= self.num_z_samples
            n_correct /= self.num_z_samples
            n_words /= self.num_z_samples

        kl_loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
        loss = recon_loss + (beta * kl_loss)
        stats = Statistics(
            loss.item(),
            recon_loss.item(),
            kl_loss.item(),
            n_correct,
            n_words,
            sents.size(1),
        )
        return loss, stats
Ejemplo n.º 15
0
    def generateChart(self):
        self.graphView.setScrollView(self.scrollView)

        stat = Statistics(self.startDate, self.endDate)

        if self.reportType == "tasks":
            self.graphView.setData(stat.countTasks(), self.reportType)

        elif self.reportType == "projects":
            self.graphView.setData(stat.countProjects(), self.reportType)

        elif self.reportType == "slacking":
            self.graphView.setData(stat.countSlacking(), self.reportType)

        self.graphView.setScale(stat.maxValue)
        self.lblWorkTotal.setStringValue_(secToTimeStr(stat.totalWork))
        self.lblAvgWork.setStringValue_(secToTimeStr(stat.avgWork))
        self.lblSlackTotal.setStringValue_(secToTimeStr(stat.totalSlacking))
        self.lblAvgSlack.setStringValue_(secToTimeStr(stat.avgSlacking))
        self.graphView.setNeedsDisplay_(True)
Ejemplo n.º 16
0
def start_simulation(no_of_servers, server_scheduler, no_of_dispatchers, dispatcher_policy, arrival_rate, job_distribution, sim_time):
    list_of_servers = []
    list_of_dispatchers = []
    for _ in range(0, int(no_of_servers)):
        scheduler_i = FIFO() if server_scheduler == 1 else PS()
        job_size_i = Expo(job_distribution)
        server_i = Server(job_size_i, scheduler_i)
        list_of_servers.append(server_i)
    for _ in range(0, int(no_of_dispatchers)):
        policy_i = RND() if dispatcher_policy == 1 else ShortestQueue()
        dispatcher_i = Dispatcher(policy_i, list_of_servers)
        list_of_dispatchers.append(dispatcher_i)

    statistics = Statistics()
    world = Global(statistics)
    p_arrivals = PoissonArrival(float(arrival_rate)/len(list_of_dispatchers))

    # Loop to generate first arrivals for each dispatcher
    for d in list_of_dispatchers:
        arrival = random.expovariate(p_arrivals._rate)
        params = [d, world]
        world.schedule_event(p_arrivals.generate_arrival, arrival, params) # Schedule the first arrivals for each dispatcher
    
    # Now that each dispatcher has an arrival, we can start looping through events
    for x in range(1, 101):
        # Now that each dispatcher has an arrival, we can start looping through events
        while world.next_event() <= float(sim_time)*(x*0.01): # while the virtual time of next event is less than our simulation time..
            world.process_event() # We take the event and process it (running the function(s))
        world._stats.write_to_file_jobs()
        print("{}%".format(x))
    #for loop to step between while loops (every 10%)while world.next

    total_no_jobs = world._stats.number_of_jobs
    print(total_no_jobs)
    world._stats.print_stats()
    ## Should add an interactive stage here to see if user wants to save data
    save_stats          = input("Do you want to save these stats to file Y/N?").upper()
    if save_stats == 'Y':
        file_name        = input("What name for the file? > ")
        print("Thank you, your stats will be saved in the Simulation_results directory.")
    world._stats.write_to_file_stats(file_name)
Ejemplo n.º 17
0
Archivo: train.py Proyecto: yyht/daga
def report_batch(batch_stats, epoch, step, num_batches, args):
    """Report batch statistics."""
    if step % args.report_every == -1 % args.report_every:
        r = batch_stats
        t = r.elapsed_time()
        logger.info(
            "Epoch %3d | %4d/%4d batches | acc %5.2f | "
            "nll %8.2f | kl %6.2f | ppl %8.2f | "
            "beta %4.2f | %.0f tok/s",
            epoch,
            step + 1,
            num_batches,
            r.accuracy(),
            r.nll(),
            r.kl(),
            r.ppl(),
            args.beta,
            r.n_words / (t + 1e-5),
        )
        sys.stdout.flush()
        batch_stats = Statistics()
    return batch_stats
Ejemplo n.º 18
0
Archivo: test.py Proyecto: yyht/daga
def report_iw_nll(model, test_iter, n_iw_iter, n_iw_samples):
    """Calculate the importance-weighted estimate of NLL."""
    model.eval()
    loss, n_sents, n_words = 0.0, 0, 0
    with torch.no_grad():
        for batch in tqdm.tqdm(test_iter, total=len(test_iter)):
            sents = batch.sent
            n_sents += sents.size(1)
            for i in range(sents.size(1)):
                sent = sents[:, i]  # get one sentence
                sent = sent.masked_select(sent.ne(model.padding_idx))  # trim pad token
                n_words += sent.size(0) - 1  # skip start symbol
                sent = sent.unsqueeze(1)
                logw = []
                for _ in range(n_iw_iter):
                    logw.extend(model.estimate_log_prob(sent, n_iw_samples))
                logw = torch.cat(logw)
                logp = torch.logsumexp(logw, dim=-1) - math.log(len(logw))
                loss += logp.item()
                torch.cuda.empty_cache()

    return Statistics(loss=-loss, n_words=n_words, n_sents=n_sents)
Ejemplo n.º 19
0
    def test_interactions(self):

        from models import Protein, Site, Kinase, KinaseGroup

        p1 = Protein(sites=[
            Site(),
            Site(kinases=[Kinase()], kinase_groups=[KinaseGroup()])
        ])
        db.session.add(p1)
        p2 = Protein(sites=[Site(kinases=[Kinase()])])
        db.session.add(p2)

        u_all_interactions = 0
        u_kinases_covered = set()
        u_kinase_groups_covered = set()
        u_proteins_covered = set()
        for protein in models.Protein.query.all():
            for site in protein.sites:
                kinases = site.kinases
                kinase_groups = site.kinase_groups
                u_all_interactions += len(kinases) + len(kinase_groups)
                u_kinases_covered.update(kinases)
                u_kinase_groups_covered.update(kinase_groups)

                if kinases or kinase_groups:
                    u_proteins_covered.add(protein)

        from stats import Statistics
        statistics = Statistics()
        all_interactions = statistics.interactions()
        kinases_covered = statistics.kinases_covered()
        kinase_groups_covered = statistics.kinase_groups_covered()
        proteins_covered = statistics.proteins_covered()

        assert all_interactions == u_all_interactions
        assert kinases_covered == len(u_kinases_covered)
        assert kinase_groups_covered == len(u_kinase_groups_covered)
        assert proteins_covered == len(u_proteins_covered)
Ejemplo n.º 20
0
def main():
    """
    The scanner's entry point.
    """

    stats = Statistics()
    args = parse_cmd_args()

    logger.setLevel(logging.__dict__[args.verbosity.upper()])

    logger.debug("Command line arguments: %s" % str(args))

    bootstrap_tor(args)
    controller = Controller.from_port(port=45679)
    stem.connection.authenticate(controller)

    # Redirect Tor's logging to work around the following problem:
    # https://bugs.torproject.org/9862

    logger.debug("Redirecting Tor's logging to /dev/null.")
    controller.set_conf("Log", "err file /dev/null")

    # We already have the current consensus, so we don't need additional
    # descriptors or the streams fetching them.

    controller.set_conf("FetchServerDescriptors", "0")

    if args.first_hop and \
       (not util.relay_in_consensus(args.first_hop,
                                    util.get_consensus_path(args))):
        raise error.PathSelectionError("Given first hop \"%s\" not found in "
                                       "consensus.  Is it offline?" %
                                       args.first_hop)

    for module_name in args.module:
        run_module(module_name, args, controller, stats)

    return 0
Ejemplo n.º 21
0
def esa_3_server(arr_rate, job_through, no_servers, file_name):
    list_of_servers = init_servers(no_servers, 1, 1)
    list_of_dispatchers = init_dispatchers(1, 2, list_of_servers)
    statistics = Statistics()
    world = Global(statistics)
    p_arrivals = PoissonArrival(float(arr_rate))

    init_first_jobs(world, list_of_dispatchers,
                    p_arrivals)  #Create the first arrival to initiate loop

    #Main loop
    while world.next_event(
    ):  # while the virtual time of next event is less than our simulation time..
        if world._stats.number_of_jobs > job_through:  # If we've reached the desired # of jobs
            data = world._stats.get_mean_sd_sojourn()  #format of 'mean,sd'
            with open(
                    './Simulation_results/esa_' + str(no_servers) +
                    '_subfile_' + str(file_name) + '_server_test2.txt',
                    'a') as myfile:
                myfile.write(data + "\n")
            break
        world.process_event(
        )  # We take the event and process it (running the function(s))
Ejemplo n.º 22
0
 def exposed_stats(self, limit_to):
     from stats import Statistics
     s = Statistics()
     s.calc_all(limit_to=limit_to)
     return s.get_all()
def start_simulation_state(no_of_jobs_server_1, no_of_jobs_server_2, arrival_rate, job_distribution, given_x, given_y, sim_time):
    list_of_servers = []
    for _ in range(0,2): # Create 2 servers
        scheduler_i = FIFO()
        job_size_i = Expo(job_distribution)
        server_i = Server(job_size_i, scheduler_i)
        list_of_servers.append(server_i)
    # Create dispatcher
    policy_i = ShortestQueue() #Join shortest queue policy
    dispatcher = Dispatcher(policy_i, list_of_servers) #Create a dispatcher with JSQ policy

    statistics = Statistics()
    world = Global(statistics)
    p_arrivals = PoissonArrival(float(arrival_rate))
    arrival = 0

    
    def set_up_servers():
        nonlocal list_of_servers, world
        for job in range(0, int(no_of_jobs_server_1)):
            job = Job(0) #All arrive at time 0
            list_of_servers[0]._total_jobs +=1
            enter_service = max(list_of_servers[0]._total_processing_time, 0) #Takes processing time (time when server is idle) and the time of arrival. Max of those is enter service for this job
            job.set_enter_service(enter_service) #Setting the enter service, first is 0, next is 0+service_1, next is 0+service_1 + service_2.. ...
            job.set_service(list_of_servers[0].get_service_time()) #Generates service time AND increases processing time for server
            departure_time = job._enter_service + job._service_time # Enter service will be correct because of processing time of server bookkeeping
            job.set_departure(departure_time) #Set the departure time as enter service+service time. (Its a timeperiod on the timeline)
            world.schedule_event(list_of_servers[0].departure, job._departure_time, [job, world]) #Schedule this departure to the world queue
        # Now we have some jobs already in the server(s) before we start the main loop of adding arrivals e.t.c.
        for job in range(0, int(no_of_jobs_server_2)):
            job = Job(0)
            list_of_servers[1]._total_jobs +=1
            enter_service = max(list_of_servers[1]._total_processing_time, 0)
            job.set_enter_service(enter_service)
            job.set_service(list_of_servers[1].get_service_time())
            departure_time = job._enter_service + job._service_time
            job.set_departure(departure_time)
            world.schedule_event(list_of_servers[1].departure, job._departure_time, [job, world])
        #cost function is basically sojourn time in these cases.    

    #### IMPORTANT
    # Need here to make a check to see if we're out of bounds before each process_event
    # If we're out of bounds we 'stop' and save the stats.
    # Then reset the 'world' and run again, until the sim_time is over. 
    # May be best to do a sub-routine to 'reset' the world (can be used for both simulation processes)
    
    def reset_world(first_arrival):
        nonlocal list_of_servers, dispatcher, statistics, world # if we want to modify variable from closure, must place as nonlocal
        list_of_servers.clear() #clear the 2 servers
        for _ in range(0,2): # Create 2 servers
            scheduler_i = FIFO()
            job_size_i = Expo(job_distribution) # use job_distr from closure
            server_i = Server(job_size_i, scheduler_i) # create a new server to place in list
            list_of_servers.append(server_i)
        dispatcher = Dispatcher(policy_i, list_of_servers) # resetting the dispatcher with new servers
        statistics = Statistics()
        world = Global(statistics)
        set_up_servers()
        params = [dispatcher, world]
        world.schedule_event(p_arrivals.generate_arrival, first_arrival, params) #Schedule first arrival to start chain
        #Now we have created two new servers, reset them and created a dispatcher with the new servers
        #Then we reset the world(nonlocal) and statistics to get a clean slate
        #Then we called function to set the initial jobs in the servers again (not same jobs!)

    reset_world(0) # Call function to setup our world

    # Now we need to schedule the initial arrivals to start the chain of events.
    for x in range(1, 11):
        # Now that each dispatcher has an arrival, we can start looping through events
        while world.next_event() <= float(sim_time)*(x*0.1): # while the virtual time of next event is less than our simulation time..
            if list_of_servers[0]._total_jobs > int(given_x) or list_of_servers[1]._total_jobs > int(given_y):
                next_arrival_new_world = world.next_event() #Get the time for the first event for new world
                #reset the world here (e.g. clear all stats and world queue, then reset jobs in servers and 'restart')
                print('resetting world')
                world._stats.write_to_file_intermediate_stats('given_end_of_'+given_x+'_and_'+given_y)
                reset_world(next_arrival_new_world) # This function should reset statistics, world event queue, and server states
                #also remember to log the stats before reset.
            world.process_event() # We take the event and process it (running the function(s))
        print("{}%".format(x*10))
    #for loop to step between while loops (every 10%)while world.next

    total_no_jobs = world._stats.number_of_jobs
    print(total_no_jobs)
    world._stats.print_stats()
def start_simulation_less_than_n(no_of_jobs_server_1, no_of_jobs_server_2, arrival_rate, job_distribution, sim_time):
    list_of_servers = []
    global jobs_ran
    global final_data
    stopping_n = no_of_jobs_server_2 
    for _ in range(0,2): # Create 2 servers
        scheduler_i = FIFO()
        job_size_i = Expo(job_distribution)
        server_i = Server(job_size_i, scheduler_i)
        list_of_servers.append(server_i)
    # Create dispatcher
    policy_i = ShortestQueue() #Join shortest queue policy
    dispatcher = Dispatcher(policy_i, list_of_servers) #Create a dispatcher with JSQ policy

    statistics = Statistics()
    world = Global(statistics)
    p_arrivals = PoissonArrival(float(arrival_rate))
    arrival = 0

    #For each job in server 1 we:
    #Create a job with arrival at time 0, then increase # of jobs in the server.
    #We get time the job enters service, either immediately or after the total processing done.
    #Set the enter service and service time needed.
    #Next calculate departure time and set it, then we can schedule the departure.
    server_1_processing_time = 0
    for job in range(0, int(no_of_jobs_server_1)):
        job = Job(0) #All arrive at time 0
        list_of_servers[0]._total_jobs +=1
        enter_service = max(list_of_servers[0]._total_processing_time, 0) #Takes processing time (time when server is idle) and the time of arrival. Max of those is enter service for this job
        job.set_enter_service(enter_service) #Setting the enter service, first is 0, next is 0+service_1, next is 0+service_1 + service_2.. ...
        job.set_service(list_of_servers[0].get_service_time()) #Generates service time AND increases processing time for server
        departure_time = job._enter_service + job._service_time # Enter service will be correct because of processing time of server bookkeeping
        job.set_departure(departure_time) #Set the departure time as enter service+service time. (Its a timeperiod on the timeline)
        world.schedule_event(list_of_servers[0].departure, job._departure_time, [job, world]) #Schedule this departure to the world queue
    for job in range(0, int(no_of_jobs_server_2)):
        job = Job(0)
        list_of_servers[1]._total_jobs +=1
        enter_service = max(list_of_servers[1]._total_processing_time, 0)
        job.set_enter_service(enter_service)
        job.set_service(list_of_servers[1].get_service_time())
        departure_time = job._enter_service + job._service_time
        job.set_departure(departure_time)
        world.schedule_event(list_of_servers[1].departure, job._departure_time, [job, world])
    
    initial_arrival = random.expovariate(p_arrivals._rate)
    params = [dispatcher, world]
    world.schedule_event(p_arrivals.generate_arrival, initial_arrival, params) #Schedule first arrival to start chain

    last_event = 0
    world.number_of_arr_dep = 0 #resetting the number of events before we start
    # Now we need to schedule the initial arrivals to start the chain of events.
    # Now that each dispatcher has an arrival, we can start looping through events
    while world.next_event() <= float(sim_time): # while the virtual time of next event is less than our simulation time..
        if(list_of_servers[0]._total_jobs<=stopping_n and list_of_servers[1]._total_jobs <= stopping_n):
            break
        last_event = world.next_event()
        world.process_event() # We take the event and process it (running the function(s))
    #for loop to step between while loops (every 10%)while world.next
    #We've reached a stopping state. Record event parameters and print to file
    jobs_ran += world._stats.number_of_jobs # We stopped, we add the number of jobs ran this time to global variable
    recorded_x = list_of_servers[0]._total_jobs
    recorded_y = list_of_servers[1]._total_jobs
    recorded_T = last_event #Last event that happened (e.g. departure that caused the total jobs to be < 4)
    recorded_N = world.number_of_arr_dep #Get the number of events that happened'
    final_data.append((recorded_x, recorded_y, recorded_T, recorded_N))
Ejemplo n.º 25
0
def mash(argv=sys.argv[1:]):
    # Initializing command-line arguments
    args = parser.parse_args(argv)

    # Set up logging
    logging.basicConfig(
        level=logging.DEBUG,
        format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
        datefmt='%d-%m %H:%M:%S',
        filename=args.log,
        filemode='w')
    logger = logging.getLogger('main.py')

    #"""
    # remove old handler for tester
    #logger.root.handlers[0].stream.close()
    logger.root.removeHandler(logger.root.handlers[0])
    file_handler = logging.FileHandler(args.log)
    file_handler.setLevel(logging.DEBUG)
    formatter = logging.Formatter(
        '%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
    file_handler.setFormatter(formatter)
    logger.root.addHandler(file_handler)
    #"""
    if args.quiet:
        logger.setLevel(logging.WARNING)
        #console.setLevel(logging.WARNING)
    else:
        console = logging.StreamHandler(sys.stdout)
        console.setLevel(logging.INFO)
        formatter = logging.Formatter('# %(message)s')
        console.setFormatter(formatter)
        logging.getLogger('').addHandler(console)

    if not os.path.exists(args.outputDir):
        os.makedirs(args.outputDir)

    logger.info('Using the following settings: %s', args)
    # Pick algorithm
    if args.nb:
        logger.info('Using sparse Naive Bayes for learning.')
        model = sparseNBClassifier(args.NBDefaultPriorWeight, args.NBPosWeight,
                                   args.NBDefVal)
    elif args.snow:
        logger.info('Using naive bayes (SNoW) for learning.')
        model = SNoW()
    elif args.predef:
        logger.info('Using predefined predictions.')
        model = Predefined(args.predef)
    else:
        logger.info('No algorithm specified. Using sparse Naive Bayes.')
        model = sparseNBClassifier(args.NBDefaultPriorWeight, args.NBPosWeight,
                                   args.NBDefVal)

    # Initializing model
    if args.init:
        logger.info('Initializing Model.')
        startTime = time()

        # Load all data
        dicts = Dictionaries()
        dicts.init_all(args)

        # Create Model
        trainData = dicts.featureDict.keys()
        model.initializeModel(trainData, dicts)

        if args.learnTheories:
            depFile = os.path.join(args.inputDir, args.depFile)
            theoryModels = TheoryModels(args.theoryDefValPos,
                                        args.theoryDefValNeg,
                                        args.theoryPosWeight)
            theoryModels.init(depFile, dicts)
            theoryModels.save(args.theoryFile)

        model.save(args.modelFile)
        dicts.save(args.dictsFile)

        logger.info('All Done. %s seconds needed.',
                    round(time() - startTime, 2))
        return 0
    # Create predictions and/or update model
    else:
        lineCounter = 1
        statementCounter = 1
        computeStats = False
        dicts = Dictionaries()
        theoryModels = TheoryModels(args.theoryDefValPos, args.theoryDefValNeg,
                                    args.theoryPosWeight)
        # Load Files
        if os.path.isfile(args.dictsFile):
            #logger.info('Loading Dictionaries')
            #startTime = time()
            dicts.load(args.dictsFile)
            #logger.info('Done %s',time()-startTime)
        if os.path.isfile(args.modelFile):
            #logger.info('Loading Model')
            #startTime = time()
            model.load(args.modelFile)
            #logger.info('Done %s',time()-startTime)
        if os.path.isfile(args.theoryFile) and args.learnTheories:
            #logger.info('Loading Theory Models')
            #startTime = time()
            theoryModels.load(args.theoryFile)
            #logger.info('Done %s',time()-startTime)
        logger.info('All loading completed')

        # IO Streams
        OS = open(args.predictions, 'w')
        IS = open(args.inputFile, 'r')

        # Statistics
        if args.statistics:
            stats = Statistics(args.cutOff)
            if args.learnTheories:
                theoryStats = TheoryStatistics()

        predictions = None
        predictedTheories = None
        #Reading Input File
        for line in IS:
            #           try:
            if True:
                if line.startswith('!'):
                    problemId = dicts.parse_fact(line)
                    # Statistics
                    if args.statistics and computeStats:
                        computeStats = False
                        # Assume '!' comes after '?'
                        if args.predef:
                            predictions = model.predict(problemId)
                        if args.learnTheories:
                            tmp = [
                                dicts.idNameDict[x]
                                for x in dicts.dependenciesDict[problemId]
                            ]
                            usedTheories = set([x.split('.')[0] for x in tmp])
                            theoryStats.update(
                                (dicts.idNameDict[problemId]).split('.')[0],
                                predictedTheories, usedTheories,
                                len(theoryModels.accessibleTheories))
                        stats.update(predictions,
                                     dicts.dependenciesDict[problemId],
                                     statementCounter)
                        if not stats.badPreds == []:
                            bp = string.join([
                                str(dicts.idNameDict[x])
                                for x in stats.badPreds
                            ], ',')
                            logger.debug('Bad predictions: %s', bp)

                    statementCounter += 1
                    # Update Dependencies, p proves p
                    dicts.dependenciesDict[problemId] = [
                        problemId
                    ] + dicts.dependenciesDict[problemId]
                    if args.learnTheories:
                        theoryModels.update(problemId,
                                            dicts.featureDict[problemId],
                                            dicts.dependenciesDict[problemId],
                                            dicts)
                    if args.snow:
                        model.update(problemId, dicts.featureDict[problemId],
                                     dicts.dependenciesDict[problemId], dicts)
                    else:
                        model.update(problemId, dicts.featureDict[problemId],
                                     dicts.dependenciesDict[problemId])
                elif line.startswith('p'):
                    # Overwrite old proof.
                    problemId, newDependencies = dicts.parse_overwrite(line)
                    newDependencies = [problemId] + newDependencies
                    model.overwrite(problemId, newDependencies, dicts)
                    if args.learnTheories:
                        theoryModels.overwrite(problemId, newDependencies,
                                               dicts)
                    dicts.dependenciesDict[problemId] = newDependencies
                elif line.startswith('?'):
                    startTime = time()
                    computeStats = True
                    if args.predef:
                        continue
                    name, features, accessibles, hints = dicts.parse_problem(
                        line)

                    # Create predictions
                    logger.info('Starting computation for problem on line %s',
                                lineCounter)
                    # Update Models with hints
                    if not hints == []:
                        if args.learnTheories:
                            accessibleTheories = set([
                                (dicts.idNameDict[x]).split('.')[0]
                                for x in accessibles
                            ])
                            theoryModels.update_with_acc(
                                'hints', features, hints, dicts,
                                accessibleTheories)
                        if args.snow:
                            pass
                        else:
                            model.update('hints', features, hints)

                    # Predict premises
                    if args.learnTheories:
                        predictedTheories, accessibles = theoryModels.predict(
                            features, accessibles, dicts)

                    # Add additional features on premise lvl if sine is enabled
                    if args.sineFeatures:
                        origFeatures = [f for f, _w in features]
                        secondaryFeatures = []
                        for f in origFeatures:
                            if dicts.featureCountDict[f] == 1:
                                continue
                            triggeredFormulas = dicts.featureTriggeredFormulasDict[
                                f]
                            for formula in triggeredFormulas:
                                tFeatures = dicts.triggerFeaturesDict[formula]
                                #tFeatures = [ff for ff,_fw in dicts.featureDict[formula]]
                                newFeatures = set(tFeatures).difference(
                                    secondaryFeatures + origFeatures)
                            for fNew in newFeatures:
                                secondaryFeatures.append(
                                    (fNew, args.sineWeight))
                        predictionsFeatures = features + secondaryFeatures
                    else:
                        predictionsFeatures = features
                    predictions, predictionValues = model.predict(
                        predictionsFeatures, accessibles, dicts)
                    assert len(predictions) == len(predictionValues)

                    # Delete hints
                    if not hints == []:
                        if args.learnTheories:
                            theoryModels.delete('hints', features, hints,
                                                dicts)
                        if args.snow:
                            pass
                        else:
                            model.delete('hints', features, hints)

                    logger.info('Done. %s seconds needed.',
                                round(time() - startTime, 2))
                    # Output
                    predictionNames = [
                        str(dicts.idNameDict[p])
                        for p in predictions[:args.numberOfPredictions]
                    ]
                    predictionValues = [
                        str(x)
                        for x in predictionValues[:args.numberOfPredictions]
                    ]
                    predictionsStringList = [
                        '%s=%s' % (predictionNames[i], predictionValues[i])
                        for i in range(len(predictionNames))
                    ]
                    predictionsString = string.join(predictionsStringList, ' ')
                    outString = '%s: %s' % (name, predictionsString)
                    OS.write('%s\n' % outString)
                else:
                    logger.warning('Unspecified input format: \n%s', line)
                    sys.exit(-1)
                lineCounter += 1
            """
            except:
                logger.warning('An error occurred on line %s .',line)
                lineCounter += 1
                continue
            """
        OS.close()
        IS.close()

        # Statistics
        if args.statistics:
            if args.learnTheories:
                theoryStats.printAvg()
            stats.printAvg()

        # Save
        if args.saveModel:
            model.save(args.modelFile)
            if args.learnTheories:
                theoryModels.save(args.theoryFile)
        dicts.save(args.dictsFile)
        if not args.saveStats == None:
            if args.learnTheories:
                theoryStatsFile = os.path.join(args.outputDir, 'theoryStats')
                theoryStats.save(theoryStatsFile)
            statsFile = os.path.join(args.outputDir, args.saveStats)
            stats.save(statsFile)
    return 0
Ejemplo n.º 26
0
 def exposed_stats(self):
     from stats import Statistics
     s = Statistics()
     s.calc_all()
     return s.get_all()
Ejemplo n.º 27
0
from defaults import Threshold
from nature import Nature
from stats import Statistics

if __name__ == "__main__":
    nature = Nature()
    round = 1
    while not nature.total_population >= Threshold.POPULATION:
        print(f'Round: {round}')
        nature.enforce_darwinism()
        nature.select()
        nature.age()
        round += 1

    stats = Statistics(nature.male_population, nature.female_population)
    print('##########################')
    print(f'Total Population: {len(nature.total_population)}')
    print(f'Total Fit Population %: {stats.calculate_total_fit_percent()}')
    print(f'Total Male: {len(nature.male_population)}')
    print(f'Total Fit Male %: {stats.calculate_total_fit_male_percent()}')
    print('')
    print(f'Total FeMale: {len(nature.female_population)}')
    print(f'Total Fit FeMale %: {stats.calculate_total_fit_female_percent()}')
    print('##########################')
Ejemplo n.º 28
0
    "KeyCode":("character(kind=c_char)", "c_char"),
    "KeySym":("integer(c_long)", "c_long"),
    # enum GWin32OSType
    "GWin32OSType":("integer(c_int)", "c_int")
}

# Two words types:
TYPES2_DICT = {
    "long double": ("real(c_long_double)", "c_long_double"),
    "unsigned long":("integer(c_long)", "c_long"),
    "unsigned short":("integer(c_short)", "c_short"),
    "unsigned int":("integer(c_int)", "c_int")
}

# An instance of the Statistics class:
my_stats = Statistics()

# An instance of the Errors class:
my_errors = Errors()

#*************************************************************************
# Pass 1: scan all header files to find all enum types, all pointers to
# functions (funptr) and add derived GTK types
#*************************************************************************
print("\033[1m Pass 1: looking for enumerators, funptr and derived types...\033[0m")

gtk_types = []

# These lists will be used by the iso_c_binding() function:
gtk_enums = []
gtk_funptr = []
Ejemplo n.º 29
0
from obj_tracker import Obj_tracker
# from sort import Sort
from stats import Statistics
from rectifier import Rectifier
import pandas as pd

# background subtraction model
bg_sub = Bg_subtractor()

# pedestrians detection model
ped_det = Obj_detector()

# ped_tr = Sort()

# performance statistics
stats = Statistics("../groundtruth.txt")

cap = cv2.VideoCapture('../pedestrians.mp4')
width = int(cap.get(3))
height = int(cap.get(4))

# rectifying model
rect = Rectifier(width, height, shift=50, up=True, central_zoom=80)

# create a named windows and move it
cv2.namedWindow('video')
cv2.moveWindow('video', 70, 30)

next_frame, frame = cap.read()
play = True