示例#1
0
    def deploy_job(self, job):
        current_time = ProvisionerConfig().simulate_time
        instance_types = ProvisionerConfig().instance_types
        for resource in self.resources:
            if resource.state == "IDLE":
                for instance in instance_types:
                    # check that it fits this instance
                    if (resource.type == instance.type
                            and self.check_requirements(instance, job)):

                        # this is now good, so lets put it on there.
                        resource.job_id = job.id
                        # set the time for the job to finish
                        # first convert the exec time to the instance
                        exec_seconds = self.exec_time(job, resource.type)

                        logger.debug("SIMULATION CONDOR: Deploying " +
                                     "job %s to resource %s for %s" %
                                     (job.id, resource.id, exec_seconds))
                        # convert the jobs request time into a timestamp

                        req_time = job.req_time
                        ProvisionerConfig().dbconn.execute(
                            ("insert into jobs (test, job_id, start_time, "
                             "req_time) values ('%s', %s, '%s', '%s');" %
                             (ProvisionerConfig().run_name, int(
                                 job.id), self.get_fake_time(), req_time)))

                        resource.job_finish = current_time + \
                            datetime.timedelta(seconds=exec_seconds)
                        resource.state = "EXECUTING"
                        job.sim_status = "EXECUTING"
                        self.executing_jobs = self.executing_jobs + [job.id]
                        return
示例#2
0
    def get_spot_prices(self, resource, tenant):
        """
        Get the current spot price for each instance type.
        """

        new_time = ProvisionerConfig().simulate_time
        now = new_time.strftime('%Y-%m-%d %H:%M:%S')
        start_time = (ProvisionerConfig().simulate_time -
                      datetime.timedelta(seconds=60))
        start_time_z = start_time.strftime('%Y-%m-%d %H:%M:%S')

        conn = boto.connect_ec2(tenant.access_key, tenant.secret_key)
        jobCost = 0
        timeStr = str(now).replace(" ", "T") + "Z"
        startTimeStr = str(start_time_z).replace(" ", "T") + "Z"
        prices = conn.get_spot_price_history(
            instance_type=resource.type,
            product_description="Linux/UNIX (Amazon VPC)",
            end_time=timeStr,
            start_time=startTimeStr)
        lowest_price = 1000000
        for price in prices:
            for key, val in tenant.subnets.iteritems():
                if (price.availability_zone == key and val == resource.subnet
                        and float(price.price) < lowest_price):
                    lowest_price = float(price.price)
        return lowest_price
示例#3
0
    def make_distributions(self):
        cmd = ("select extract(epoch from(exec_start_time - join_time)) from"
               " launch_stats where exec_start_time is not null and "
               "extract(epoch from(exec_start_time - join_time)) < 300;")
        data = ProvisionerConfig().dbconn.execute(cmd)
        neg_list = []
        for r in data:
            neg_list.append(r['date_part'])
        mu, sigma = stats.norm.fit(np.log(neg_list))
        shape, loc, scale = stats.lognorm.fit(neg_list, floc=0)
        dist = stats.lognorm(shape, loc, scale)
        self.negotiate_time_dist = list(dist.rvs(size=10000))

        mu, sigma = 7.118134, 0.895632  # mean and standard deviation
        self.fulfilled_time_dist = list(np.random.normal(mu, sigma, 10000))

        cmd = ("select extract(epoch from(join_time - fulfilled_time)) from "
               "launch_stats where exec_start_time is not null and "
               "extract(epoch from(join_time - fulfilled_time)) < 300;")
        data = ProvisionerConfig().dbconn.execute(cmd)
        context_list = []
        for r in data:
            context_list.append(r['date_part'])
        mu, sigma = stats.norm.fit(np.log(context_list))
        shape, loc, scale = stats.lognorm.fit(context_list, floc=0)
        dist = stats.lognorm(shape, loc, scale)
        self.contextualise_time_dist = list(dist.rvs(size=10000))
示例#4
0
def cancel_unmigrated_requests(tenants):
    """
    There are two cases to handle here. Either there are no idle jobs, so
    all requests should be cancelled.
    Or there are idle jobs but the existing requests could not be migrated
    to them. In this case, any orphaned requests should also be cancelled.
    """
    for tenant in tenants:
        # start by grabbing all of the open spot requests for this tenant
        ids_to_check = ProvisionerConfig().simulator.get_open_requests()

        # Get the set of idle job numbers
        idle_job_numbers = []
        for job in tenant.jobs:
            if job.sim_status == 'IDLE':
                idle_job_numbers.append(job.id)

        # now get all of the orphaned requests
        reqs = get_orphaned_requests(tenant, ids_to_check, idle_job_numbers)
        reqs_to_cancel = []
        # build a nice list we can pass to boto
        for req in reqs:
            reqs_to_cancel.append(req['request_id'])

        # now cancel all of these requests
        try:
            if len(reqs_to_cancel) > 0:
                logger.debug("Cancelling unmigrated requests: %s" %
                             reqs_to_cancel)
                ProvisionerConfig().simulator.cancel_spot_instance_requests(
                    ids_to_check)
        except Exception as e:
            logger.exception("Error removing spot instance requests.")
            raise e
示例#5
0
def instance_acquired(inst, request, tenant, conn):
    """
    A new instance has been acquired, so insert a record into the instance
    table and tag it with the tenant name
    """

    launch_time = ProvisionerConfig().simulator.get_fake_time()
    # insert it into the database
    ProvisionerConfig().dbconn.execute(
        ("insert into instance (request_id, instance_id, fulfilled_time, " +
         "public_dns, private_dns) values ('%s', '%s', '%s', '%s', '%s')") %
        (request['id'], inst.id, launch_time, 'pubdns', 'privdns'))
    logger.debug("An instance has been acquired. " +
                 "Tenant={0}; Request={1}, Instance={2}".format(
                     tenant.name, repr(request), repr(inst)))

    # now tag the request
    api.tag_requests(inst.id, tenant.name, conn)

    # if the job is still in the idle queue, we should remove it as the
    # instance was now launched for it
    for job in tenant.jobs:
        logger.debug("Checking {0} vs {1}".format(repr(job), repr(request)))
        if job.id == request['job_runner_id']:
            logger.debug("Launched an instance for job %s - removing it." %
                         request['job_runner_id'])
            job.fulfilled = True
示例#6
0
 def provision_resources(self):
     # This passes tenant[0] (a test tenant with my credentials) to use its
     # credentials to query the AWS API for price data
     # price data is stored in the Instance objects
     for t in self.tenants:
         if len(t.idle_jobs) == 0:
             continue
         if (ProvisionerConfig().DrAFTS or
                 ProvisionerConfig().DrAFTSProfiles):
             if ProvisionerConfig().simulate:
                 # when simulating only load it every 5 mins.
                 if ((ProvisionerConfig().simulate_time -
                      ProvisionerConfig().sim_time).total_seconds() %
                         300 == 0):
                     self.load_drafts_data()
             else:
                 if self.run_iterations % 300 == 0:
                     self.load_drafts_data()
         # Get the spot prices for this tenant's AZ's
         if ProvisionerConfig().simulate:
             simaws.api.get_spot_prices(
                 ProvisionerConfig().instance_types, t)
         else:
             aws.api.get_spot_prices(ProvisionerConfig().instance_types,
                                     t)
         # Select a request to make for each job
         self.select_instance_type(ProvisionerConfig().instance_types)
         # Make the requests for the resources
         if ProvisionerConfig().simulate:
             simaws.api.request_resources(t)
         else:
             aws.api.request_resources(t)
示例#7
0
    def run_condor(self, tenants):
        """
        Be the condor agent. This will manage putting jobs on
        the resources etc.
        """
        logger.debug("SIMULATION CONDOR: starting.")
        instance_types = ProvisionerConfig().instance_types

        current_time = ProvisionerConfig().simulate_time

        # logger.debug("SIMULATION CONDOR: loaded tenants.")
        # now i need to add status to each of the jobs
        # Run through the jobs and set their states so they
        # are ignored by other things
        for t in tenants:
            for job in list(t.jobs):
                if job.id in self.finished_jobs:
                    job.sim_status = "FINISHED"
                    if job in t.idle_jobs:
                        t.idle_jobs.remove(job)
                    t.jobs.remove(job)

                elif job.id in self.executing_jobs:
                    job.sim_status = "EXECUTING"
                    if job in t.idle_jobs:
                        t.idle_jobs.remove(job)
            for t in tenants:
                for job in t.jobs:
                    for resource in self.resources:
                        if (job.id == resource.job_id
                                and resource.job_finish is not None
                                and resource.job_finish < current_time):
                            # Mark it as all done
                            job.sim_status = "FINISHED"
                            resource.state = "IDLE"
                            if job.id in self.executing_jobs:
                                self.executing_jobs.remove(job.id)
                            if job.id not in self.finished_jobs:

                                self.finished_jobs = self.finished_jobs + \
                                    [job.id]
                                logger.debug("SIMULATION CONDOR: Finished " +
                                             "job %s." % (job.id))
                                # resource.state = "IDLE"
                                ProvisionerConfig().dbconn.execute(
                                    ("update jobs set end_time = '%s' " +
                                     "where job_id = %s and test = '%s';") %
                                    (ProvisionerConfig().simulate_time,
                                     int(job.id),
                                     ProvisionerConfig().run_name))

            logger.debug("SIMULATION CONDOR: deploying new jobs.")
            for t in tenants:
                for job in t.jobs:
                    # check if it can fit on the instance
                    if job.sim_status == "IDLE":
                        self.deploy_job(job)
示例#8
0
 def get_fake_time(self, time=None):
     """
     Get the time difference between the passed in time and
     the relative_time,
     then add that difference to the simulate time
     """
     offset = datetime.timedelta(seconds=time)
     if time is None:
         return ProvisionerConfig().simulate_time
     else:
         return ProvisionerConfig().simulate_time + offset
示例#9
0
文件: api.py 项目: globus-labs/SCRIMP
def launch_spot_request(conn, request, tenant, job):
    try:

        cost_aware_req = job.cost_aware
        drafts_req = job.DrAFTS
        drafts_avg = job.DrAFTSAvg

        cost_aware_req = job.cost_aware
        drafts_req = job.cost_aware
        drafts_avg = job.cost_aware

        mapping = None

        my_req_ids = ProvisionerConfig().simulator.request_spot_instances(
            price=request.bid, image_id=request.ami,
            subnet_id=tenant.subnets[request.zone],
            count=request.count,
            key_name=tenant.key_pair,
            security_group_ids=[tenant.security_group],
            instance_type=request.instance_type,
            user_data=customise_cloudinit(tenant, job),
            block_device_map=mapping,
            job=job)
        for req in my_req_ids:
            # tag each request
            tag_requests(req, tenant.name, conn)

            ProvisionerConfig().dbconn.execute(
                ("insert into instance_request (tenant, instance_type, " +
                 "price, job_runner_id, request_type, request_id, " +
                 "subnet, cost_aware_ins, cost_aware_bid, " +
                 "cost_aware_subnet, " +
                 "drafts_ins, drafts_bid, drafts_subnet, selected_avg_price, "
                 "cost_aware_avg_price, drafts_avg_price, drafts_avg_ins, " +
                 "drafts_avg_bid, drafts_avg_subnet, drafts_avg_avg_price) " +
                 "values ('%s', '%s', %s, '%s', '%s', '%s', %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)") %
                (tenant.db_id, request.instance.db_id, request.OraclePrice,
                 job.id,
                 "spot", req, tenant.subnets_db_id[request.zone],
                 cost_aware_req.instance.db_id, cost_aware_req.bid,
                 tenant.subnets_db_id[cost_aware_req.zone],
                 drafts_req.instance.db_id,
                 drafts_req.DrAFTS, tenant.subnets_db_id[drafts_req.zone],
                 request.AvgPrice, cost_aware_req.AvgPrice,
                 drafts_req.AvgPrice,
                 drafts_avg.instance.db_id, drafts_avg.DrAFTS,
                 tenant.subnets_db_id[
                     drafts_avg.zone],
                 drafts_avg.AvgPrice))

        return my_req_ids
    except boto.exception.EC2ResponseError:
        logger.exception("There was an error communicating with EC2.")
示例#10
0
def get_orphaned_requests(tenant, ids_to_check, idle_job_numbers):
    """
    Check if there are any requests that don't belong to a job in the idle
    queue
    """

    res = []

    if len(ids_to_check) > 0:
        try:
            # Add quotes and commas to the list items for psql.
            sir_ids = (', '.join('\'' + item + '\'' for item in ids_to_check))
            # Get any requests that do not belong to an idle job
            rows = []
            print sir_ids
            print idle_job_numbers
            if len(idle_job_numbers) > 0:
                rows = ProvisionerConfig().dbconn.execute(
                    ("select instance_request.id, instance_type.type, " +
                     "instance_request.job_runner_id, " +
                     "instance_request.request_id from instance_request, " +
                     "instance_type where instance_request.instance_type = " +
                     "instance_type.id and job_runner_id not in (%s) and " +
                     "request_id in (%s) and request_type = 'spot' and " +
                     "tenant = %s") % (",".join(
                         '\'' + num + '\''
                         for num in idle_job_numbers), sir_ids, tenant.db_id))
            else:
                rows = ProvisionerConfig().dbconn.execute(
                    ("select instance_request.id, instance_type.type, " +
                     "instance_request.job_runner_id, " +
                     "instance_request.request_id from instance_request, " +
                     "instance_type where instance_request.instance_type = " +
                     "instance_type.id and " +
                     "request_id in (%s) and request_type = 'spot' and " +
                     "tenant = %s") % (sir_ids, tenant.db_id))
            # I had some issues with the rows object closing after
            # returning it, so this just builds a dict for it
            for row in rows:
                res.append({
                    'id': row['id'],
                    'type': row['type'],
                    'job_runner_id': row['job_runner_id'],
                    'request_id': row['request_id']
                })
                logger.warn("Orphaned request %s" % row['request_id'])

        except psycopg2.Error:
            logger.exception("Error migrating instances.")

    return res
示例#11
0
def update_launch_stats(inst, request, conn):
    """
    Update the launch stats so we record how long instances take to be spun up.
    """

    cmd = "update launch_stats set instance_id = '%s' where request_id = '%s'" % (
        inst.id, request['request_id'])
    ProvisionerConfig().dbconn.execute(cmd)
    cmd = "update launch_stats set fulfilled_time = '%s' where request_id = '%s'" % (
        inst.launch_time, request['request_id'])
    ProvisionerConfig().dbconn.execute(cmd)
    cmd = "update launch_stats set private_dns = '%s' where request_id = '%s'" % (
        inst.private_dns_name, request['request_id'])
    ProvisionerConfig().dbconn.execute(cmd)
示例#12
0
    def get_bid_price(self, job, tenant, req):
        """
        This function is not totally necessary at the moment, but it could be
        expanded to include more complex logic when placing a bid.
        Currently it just does bid percent * ondemand price of the resource
        and checks it is less than the maximum bid.
        """
        if ProvisionerConfig().DrAFTS or ProvisionerConfig().DrAFTSProfiles:
            return req.price

        bid = float(tenant.bid_percent) / 100 * float(req.odp)
        if bid <= tenant.max_bid_price:
            return bid
        else:
            return 0.40
示例#13
0
    def instance_acquired(self, resource):
        launch_time = ProvisionerConfig().simulator.get_fake_time()

        data = ProvisionerConfig().dbconn.execute(
            'select id from instance_request where ' + 'request_id = \'' +
            resource.reqid + '\';')
        reqid = 0
        for r in data:
            reqid = r['id']
        # insert it into the database
        ProvisionerConfig().dbconn.execute(
            ("insert into instance (request_id, instance_id, " +
             "fulfilled_time, " + "public_dns, private_dns) values " +
             "('%s', '%s', '%s', '%s', '%s')") %
            (reqid, resource.id, resource.launch_time, 'pubdns', 'privdns'))
示例#14
0
def instance_acquired(inst, request, tenant, conn):
    """
    A new instance has been acquired, so insert a record into the instance
    table and tag it with the tenant name
    """
    launch_time = datetime.datetime.strptime(inst.launch_time,
                                             "%Y-%m-%dT%H:%M:%S.000Z")
    # insert it into the database
    ProvisionerConfig().dbconn.execute(
        ("insert into instance (request_id, instance_id, fulfilled_time, " +
         "public_dns, private_dns) values ('%s', '%s', '%s', '%s', '%s')") %
        (request['id'], inst.id, launch_time, inst.public_dns_name,
         inst.private_dns_name))
    logger.debug("An instance has been acquired. " +
                 "Tenant={0}; Request={1}, Instance={2}".format(
                     tenant.name, repr(request), repr(inst)))

    # Update the launch stats table too.
    update_launch_stats(inst, request, conn)

    # now tag the request
    api.tag_requests(inst.id, tenant.name, conn)

    # if the job is still in the idle queue, we should remove it as the
    # instance was now launched for it
    for job in tenant.jobs:
        logger.debug("Checking {0} vs {1}".format(repr(job), repr(request)))
        if int(job.id) == int(request['job_runner_id']):
            logger.debug("Launched an instance for job %s - removing it." %
                         request['job_runner_id'])
            job.fulfilled = True
示例#15
0
 def print_cheapest_options(self, sorted_instances):
     # Print out the top three
     logger.info("Top three to select from:")
     top_three = 3
     for ins in sorted_instances:
         if top_three == 0:
             break
         if ProvisionerConfig().DrAFTS:
             logger.info("DrAFTS:  %s %s %s %s" % (ins.instance_type,
                 ins.zone, ins.price, ins.DrAFTS))
         if ProvisionerConfig().DrAFTSAvgPrice:
             logger.info("DrAFTS Oracle Price: %s %s %s %s" % (ins.instance_type,
                 ins.zone, ins.price, ins.OraclePrice))
         else:
             logger.info("    %s %s %s" %
                         (ins.instance_type, ins.zone, ins.price))
         top_three = top_three - 1
示例#16
0
    def load_tenants_and_jobs(self):
        """
        Get all of the tenants from the database and then read the condor
        queue to get their respective jobs.
        """
        # Load all of the tenants

        # Load all of the jobs from condor and associate them with the tenants.
        # This will also remove jobs that should not be processed (e.g. an
        # instance has been fulfilled for them already).
        if ProvisionerConfig().simulate:
            # lets only do this once.
            if ProvisionerConfig().relative_time is None:
                self.tenants = tenant.load_from_db()
            self.sched.only_load_jobs(self.tenants)

        else:
            self.tenants = tenant.load_from_db()
            self.sched.load_jobs(self.tenants)
示例#17
0
def check_for_terminated_instances(reservations):
    for r in reservations:
        if r.state == 'TERMINATED':
            if r.id not in ProvisionerConfig().simulator.already_terminated:
                ProvisionerConfig().simulator.already_terminated.append(r.id)
                # Sadly, I can't seem to get the actual shutdown time
                # i.state_reason does not contain it and i.state does not
                # exist. So instead, we will just flag it as now and sort
                # out determining the full hour when computing cost.
                ProvisionerConfig().dbconn.execute((
                    "update instance set terminate_time = '%s', reason = '%s' "
                    + "where instance_id = '%s' and terminate_time is null;"
                ) % (ProvisionerConfig().simulator.get_fake_time(), r.reason,
                     r.id))
                print(
                    "update instance set terminate_time = '%s', reason = '%s' "
                    + "where instance_id = '%s' and terminate_time is null;"
                ) % (ProvisionerConfig().simulator.get_fake_time(), r.reason,
                     r.id)
示例#18
0
 def __init__(self, price, subnet, ins_type, reqid, sleep_time, job):
     self.type = ins_type
     self.price = price
     self.subnet = subnet
     self.sleep_time = sleep_time
     self.job_runner_id = job
     self.reqid = reqid
     self.request_time = ProvisionerConfig().simulate_time
     self.ready_time = self.request_time + \
         datetime.timedelta(seconds=int(sleep_time))
示例#19
0
文件: api.py 项目: globus-labs/SCRIMP
def insert_launch_stats(req, request, tenant):
    """
    Record that the instance was launched.
    """
    cmd = ("insert into launch_stats (type, zone, bid, current_price, " +
           "request_id, request_time) values " +
           "('%s','%s','%s','%s','%s', NOW());" %
           (request.instance_type, request.zone, request.price, 0, req))
    print cmd
    ProvisionerConfig().dbconn.execute(cmd)
示例#20
0
    def only_load_jobs(self, tenants):
        """
        Only do the job load. This is so the ignore stuff can be run after
        the simulator
        has ordered things into executing/finished etc.
        """
        # Clear out the lists then reload them.
        for t in tenants:
            t.idle_jobs = []
            t.jobs = []
        all_jobs = self.get_global_queue()
        if ProvisionerConfig().simulate:
            if ProvisionerConfig().relative_time is None:

                self.job_data = None
                utc = timezone('UTC')
                ProvisionerConfig().relative_time = datetime.datetime.now(utc)

        # Assoicate the jobs from the global queue with each of the tenants
        self.process_global_queue(all_jobs, tenants)
示例#21
0
def update_database(tenants):
    """
    Record when an instance is started in the database. This should also
    try and record when an instance is terminated.
    In future work this should probably calculate the cost of the instance
    as well.
    """

    for tenant in tenants:
        try:
            # First get all operating instances (instances probably are not
            # yet tagged, so don't filter them yet.)
            reservations = ProvisionerConfig().simulator.get_all_instances()
            instance_spot_ids = ProvisionerConfig(
            ).simulator.get_spot_instances()

            check_for_terminated_instances(reservations)

        except psycopg2.Error:
            logger.exception("Error updating database.")
示例#22
0
    def __init__(self):
        self.tenants = []

        self.drafts_mapping = {'us-east-1a': 'us-east-1e',
                               'us-east-1b': 'us-east-1d',
                               'us-east-1c': 'us-east-1a',
                               'us-east-1d': 'us-east-1b',
                               'us-east-1e': 'us-east-1c', }

        # Read in any config data and set up the database connection
        ProvisionerConfig()
示例#23
0
    def load_drafts_data(self):
        """
        To speed this up, load in all the drafts data once per
        provisioning cycle
        """
        cur_time = datetime.datetime.utcnow()
        if ProvisionerConfig().simulate:
            cur_time = ProvisionerConfig().simulator.get_fake_time()

        minus_ten = cur_time - datetime.timedelta(seconds=600)
        query = ("select * from drafts_price where timestamp < "
                 "'%s'::TIMESTAMP and timestamp > '%s'::TIMESTAMP") % (
            cur_time.strftime("%Y-%m-%d %H:%M"),
            minus_ten.strftime("%Y-%m-%d %H:%M"))
        self.drafts_data = []
        logger.debug('getting drafts data: ' + query)
        rows = ProvisionerConfig().dbconn.execute(query)
        for row in rows:
            data = {'time': row['time'], 'price': row['price'],
                    'zone': row['zone'], 'type': row['type']}
            self.drafts_data.append(data)
示例#24
0
    def get_global_queue(self):
        """
        Read in the jobs that should have started prior to the
        current sim time.
        Create a new job object for each then return a list of them.
        """

        if self.job_data is None:
            with open(ProvisionerConfig().jobs_file) as data_file:
                logger.debug("SIMULATION: READING DATA")
                self.job_data = json.load(data_file)

        # NOTE: this now doesn't work for multiple tenants as this
        # is self.jobs. change it back
        # to read the full file over and over if i want tenants.

        # Work out how many seconds have passed since starting the test
        rel_time = (ProvisionerConfig().simulate_time -
                    ProvisionerConfig().sim_time).total_seconds()
        to_delete = []
        for j in self.job_data:

            if int(j['relative_time']) < rel_time:
                to_delete.append(j)
                description = {}
                description['instype'] = j['instance_type']
                description['duration'] = float(j['duration'])
                req_time = ProvisionerConfig().sim_time + \
                    datetime.timedelta(seconds=int(j['relative_time']))
                newjob = Job('tenant_addr',
                             "%s%s" % (j['id'], ProvisionerConfig().run_id), 1,
                             req_time, 1, 1, 1, description)

                self.jobs.append(newjob)
            else:
                break
        for j in to_delete:
            self.job_data.remove(j)

        return self.jobs
示例#25
0
def check_for_terminated_instances(reservations):
    for r in reservations:
        for i in r.instances:
            if i.state == 'terminated':
                # Sadly, I can't seem to get the actual shutdown time
                # i.state_reason does not contain it and i.state does not
                # exist. So instead, we will just flag it as now and sort
                # out determining the full hour when computing cost.
                ProvisionerConfig().dbconn.execute((
                    "update instance set terminate_time = NOW(), reason = '%s' "
                    + "where instance_id = '%s' and terminate_time is null;") %
                                                   (i.state_reason['message'],
                                                    i.id))
示例#26
0
    def load_jobs(self, tenants):
        """
        Read in the condor queue and manage the removal of jobs that should
        not be processed.
        """
        # Assess the global queue

        # Clear out the lists then reload them.
        for t in tenants:
            t.idle_jobs = []
            t.jobs = []
        t1 = datetime.datetime.now()
        all_jobs = self.get_global_queue()
        t2 = datetime.datetime.now()
        if ProvisionerConfig().simulate:
            if ProvisionerConfig().relative_time is None:

                self.job_data = None
                utc = timezone('UTC')
                ProvisionerConfig().relative_time = datetime.datetime.now(utc)

        # Assoicate the jobs from the global queue with each of the tenants
        self.process_global_queue(all_jobs, tenants)
        t3 = datetime.datetime.now()

        ignore_fulfilled_jobs(tenants)
        t4 = datetime.datetime.now()
        # Stop resources being requested too frequently
        stop_over_requesting(tenants)
        t5 = datetime.datetime.now()

        queue_time = (t2 - t1).total_seconds()
        process_time = (t3 - t2).total_seconds()
        ignore_time = (t4 - t3).total_seconds()
        stop_time = (t5 - t4).total_seconds()
        logger.debug("SIMULATION load times: queue (%s), process (%s), "
                     "ignore (%s), stop (%s)" %
                     (queue_time, process_time, ignore_time, stop_time))
示例#27
0
    def __init__(self, db_id, name, p_addr, c_addr, ip_addr, zone, subnet,
                 subnet_id, vpc, security_group, domain, max_bid, bid_percent,
                 timeout, access_key, secret_key, key_pair):
        self.db_id = db_id
        self.name = name
        self.public_address = p_addr
        self.condor_address = c_addr
        self.public_ip = ip_addr
        self.zone = zone
        self.subnet = subnet
        self.subnet_id = subnet_id
        self.vpc = vpc
        self.security_group = security_group
        self.max_bid_price = max_bid
        self.bid_percent = bid_percent
        self.timeout = timeout
        self.access_key = access_key
        self.secret_key = secret_key
        self.key_pair = key_pair
        self.domain = domain
        subnets = {}
        subnets_db_id = {}

        # TODO Add the option for varying idle times (how long a job must be
        # in queue before being processed) and request rates to the database

        self.idle_time = 120
        # self.idle_time = 128 # for the 1000 job test the clock was out by 8
        # seconds. This will change wait times.

        if ProvisionerConfig().simulate:
            self.idle_time = ProvisionerConfig().idle_time

        self.request_rate = 120
        self.jobs = []
        self.idle_jobs = []

        self.AvgDrAFTSPrice = {}
示例#28
0
    def restrict_instances(self, job):
        """
        Filter out instances that do not meet the requirements of a job then
        return a list of the eligible instances.
        """
        eligible_instances = []

        # Check if the instance is viable for the job
        instance_types = ProvisionerConfig().instance_types
        for instance in instance_types:
            if aws.manager.check_requirements(instance, job):
                eligible_instances.append(instance)

        return eligible_instances
示例#29
0
    def manage_resources(self):
        """
        Use the resource manager to keep the database up to date and manage
        aws requests and resources.
        """
        # Build a set of instances and their current spot prices so we don't
        # need to keep revisiting the AWS API

        if ProvisionerConfig().simulate:
            simaws.manager.process_resources(self.tenants)
        else:
            aws.manager.process_resources(self.tenants)

            scheduler.base_scheduler.ignore_fulfilled_jobs(self.tenants)
示例#30
0
def migrate_request_to_job(request, job):
    """
    Check if an instance can be repurposed to another job and update the
    database.
    """
    # Check to see if the job can be fulfilled by the requested instance
    if check_requirements(request['type'], job):
        next_idle_job_id = job.id
        try:
            logger.debug(
                ("Migrating instance request  %s, from job " + "%s to job %s.")
                % (request['id'], request['job_runner_id'], next_idle_job_id))
            ProvisionerConfig().dbconn.execute(
                ("update instance_request set job_runner_id = '%s' " +
                 "where id = %s") % (next_idle_job_id, request['id']))
            ProvisionerConfig().dbconn.execute(
                ("insert into request_migration " +
                 "(request_id, from_job, to_job, migration_time) " +
                 "values (%s, %s, %s, NOW())") %
                (request['id'], request['job_runner_id'], next_idle_job_id))
            return True
        except psycopg2.Error:
            logger.exception("Error performing migration in database.")