コード例 #1
0
ファイル: views.py プロジェクト: environmentalomics/eos-db
def extend_boost_on_server(request):
    """Extends the Boost period on a server by adding a new deboost timeout, if
       the user can afford it, and debiting the cost.
    """
    vm_id, actor_id = _resolve_vm(request)
    hours = int(request.POST['hours'])

    #See what level of boost we have just now.
    cores, ram = server.get_latest_specification(vm_id)

    cost = server.check_and_remove_credits(actor_id, ram, cores, hours)

    if not cost:
        #Either we can't afford it or we can't determine the cost.
        return HTTPBadRequest()

    #Work out when the new de-boost should be.  First get the remaining boost time as
    #hours.  It's unlikely to be a whole number.  If the boost has expired somehow then
    #don't be mean - count from now.
    remaining_time = (server.get_time_until_deboost(vm_id)[1] or 0) / 3600.0
    if remaining_time < 0: remaining_time = 0

    #Schedule a later de-boost
    server.touch_to_add_deboost(vm_id, hours + remaining_time)

    return dict(vm_id=vm_id, cost=cost)
コード例 #2
0
ファイル: views.py プロジェクト: environmentalomics/eos-db
def deboost_server(request):
    """Deboost a server: ie:
        Credit the users account
        Cancel any scheduled De-Boost
        Set the CPUs and RAM to the previous state
        Put the server in a "Pre_Deboosting" status

    Note that a user can Deboost at ANY time, but they only get credit if credit is due.
    Deboosting a non-boosted server just amounts to a restart.

    :param {vm or name}: ID of VApp which we want to deboost.
    :returns: dict(touch_id, vm_id, credit) where credit is the refunded amount
    """
    vm_id, actor_id = _resolve_vm(request)

    credit = server.get_time_until_deboost(vm_id)[3]
    server.touch_to_add_credit(actor_id, credit)

    #Scheduled timeouts don't need cancelling as they are ignored on unboosted servers,
    #and if the user re-boosts then the new timeout will mask the old one.

    #Previous semantics would return the VM to the previous state, but this is not
    #what I really want - altering the baseline in the config should lead to all VMs
    #ending up in the new state after a Boost/Deboost.
    new_cores, new_ram = server.get_baseline_specification(vm_id)

    server.touch_to_add_specification(vm_id, new_cores, new_ram)

    # Tell the agents to get to work.
    touch_id = server.touch_to_state(actor_id, vm_id, "Pre_Deboosting")

    return dict(touch_id=touch_id, vm_id=vm_id, credit=credit)
コード例 #3
0
ファイル: views.py プロジェクト: cedadev/eos-db
def extend_boost_on_server(request):
    """Extends the Boost period on a server by adding a new deboost timeout, if
       the user can afford it, and debiting the cost.
    """
    vm_id, actor_id = _resolve_vm(request)
    hours = int(request.POST["hours"])

    # See what level of boost we have just now.  Again, need to FIXME that hard-coding
    cores, ram = server.get_latest_specification(vm_id)

    cost = server.check_and_remove_credits(actor_id, ram, cores, hours)

    if not cost:
        # Either we can't afford it or we can't determine the cost.
        return HTTPBadRequest()

    # Work out when the new de-boost should be.  First get the remaining boost time as
    # hours.  It's unlikely to be a whole number.  If the boost has expired somehow then
    # don't be mean - count from now.
    remaining_time = (server.get_time_until_deboost(vm_id)[1] or 0) / 3600.0
    if remaining_time < 0:
        remaining_time = 0

    # Schedule a later de-boost
    server.touch_to_add_deboost(vm_id, hours + remaining_time)

    return dict(vm_id=vm_id, cost=cost)
コード例 #4
0
    def test_get_deboost_jobs(self):
        #Create 4 servers and boost them all to 40GB + 2cores.
        #Set deboost times at 14hrs ago, 1 hr ago, 0, and 1 hour hence
        #Call /deboost_jobs?past=24;future=12 should see all 4
        #Call /deboost_jobs?past=12 should see 2
        #Deboost VM2, then /get_deboost_jobs?past=12 should see 1

        #/get_deboost_jobs returns [ dict(boost_remain=123, artifact_id=..., artifact_name=...)]
        app = self._get_test_app()

        #Need to set a baseline boost level or all machines just show as unboosted
        new_BL = server.get_boost_levels()
        new_BL['levels'] = [{
            "label": "is_boosted",
            "ram": 40,
            "cores": 2,
            "cost": 1
        }]
        server.set_config(dict(BoostLevels=new_BL))

        servers = ['srv1', 'srv2', 'srv3', 'srv4']
        times = [-14, -1, 0, 1]
        user_id = create_user('someuser')
        for s, hours in zip(servers, times):
            vm_id = create_server(s, user_id)
            server.touch_to_add_specification(vm_id, 2, 40)
            server.touch_to_add_deboost(vm_id, hours)

        #Negative time deboost should be OK, and should show as Expired
        #Confirm the negative-time deboost worked (external + internal view)
        server_1_info = app.get('/servers/srv1').json
        self.assertEqual(server_1_info['boostremaining'], "Expired")

        server_1_tud = server.get_time_until_deboost(
            server_1_info['artifact_id'])
        self.assertTrue(server_1_tud[1] < (-13 * 60 * 60))

        #Look for all jobs - should be 4
        dj1 = app.get('/deboost_jobs', dict(past=24 * 60, future=12 * 60)).json
        self.assertEqual(len(dj1), 4)

        #Look for jobs in last 12 hours (what the deboost_daemon will normally do)
        dj2 = app.get('/deboost_jobs', dict(past=12 * 60)).json
        self.assertEqual(set(s['artifact_name'] for s in dj2),
                         set(('srv2', 'srv3')))

        #And if we deboost VM2 (via an API call to set the default baseline)...
        app.post('/servers/srv2/specification', dict(cores=1, ram=2))
        dj3 = app.get('/deboost_jobs', dict(past=12 * 60)).json
        self.assertEqual(set(s['artifact_name'] for s in dj3), set(('srv3', )))
コード例 #5
0
ファイル: views.py プロジェクト: cedadev/eos-db
def deboost_server(request):
    """Deboost a server: ie:
        Credit the users account
        Cancel any scheduled De-Boost
        Set the CPUs and RAM to the previous state
        Put the server in a "Pre_Deboosting" status

    Note that a user can Deboost at ANY time, but they only get credit if credit is due.
    Deboosting a non-boosted server just amounts to a restart.

    :param {vm or name}: ID of VApp which we want to deboost.
    :returns: ???
    """
    vm_id, actor_id = _resolve_vm(request)

    credit = server.get_time_until_deboost(vm_id)[3]
    server.touch_to_add_credit(actor_id, credit)

    #Scheduled timeouts don't need cancelling as they are ignored on unboosted servers.

    #FIXME - yet more hard-coding for cores/RAM
    prev_cores = 1
    prev_ram = 16
    try:
        prev_cores, prev_ram = server.get_previous_specification(vm_id)
    except:
        #OK, use the defaults.
        pass

    #If we're not careful, with this "go back to previous config" semantics, if a user de-boosts
    #a server twice they will actually end up setting their baseline config to the boosted specs.
    #Therefore do a check.
    current_cores, current_ram = server.get_latest_specification(vm_id)

    if not (prev_ram > current_ram):
        server.touch_to_add_specification(vm_id, prev_cores, prev_ram)

    # Tell the agents to get to work.
    touch_id = server.touch_to_state(actor_id, vm_id, "Pre_Deboosting")

    return dict(touch_id=touch_id, vm_id=vm_id, credit=credit)
コード例 #6
0
ファイル: views.py プロジェクト: cedadev/eos-db
def deboost_server(request):
    """Deboost a server: ie:
        Credit the users account
        Cancel any scheduled De-Boost
        Set the CPUs and RAM to the previous state
        Put the server in a "Pre_Deboosting" status

    Note that a user can Deboost at ANY time, but they only get credit if credit is due.
    Deboosting a non-boosted server just amounts to a restart.

    :param {vm or name}: ID of VApp which we want to deboost.
    :returns: ???
    """
    vm_id, actor_id = _resolve_vm(request)

    credit = server.get_time_until_deboost(vm_id)[3]
    server.touch_to_add_credit(actor_id, credit)

    # Scheduled timeouts don't need cancelling as they are ignored on unboosted servers.

    # FIXME - yet more hard-coding for cores/RAM
    prev_cores = 1
    prev_ram = 16
    try:
        prev_cores, prev_ram = server.get_previous_specification(vm_id)
    except:
        # OK, use the defaults.
        pass

    # If we're not careful, with this "go back to previous config" semantics, if a user de-boosts
    # a server twice they will actually end up setting their baseline config to the boosted specs.
    # Therefore do a check.
    current_cores, current_ram = server.get_latest_specification(vm_id)

    if not (prev_ram > current_ram):
        server.touch_to_add_specification(vm_id, prev_cores, prev_ram)

    # Tell the agents to get to work.
    touch_id = server.touch_to_state(actor_id, vm_id, "Pre_Deboosting")

    return dict(touch_id=touch_id, vm_id=vm_id, credit=credit)
コード例 #7
0
ファイル: test_agent_api.py プロジェクト: cedadev/eos-db
    def test_get_deboost_jobs(self):
        #Create 4 servers and boost them all to 40GB + 2cores.
        #Set deboost times at 14hrs ago, 1 hr ago, 0, and 1 hour hence
        #Call /deboost_jobs?past=24;future=12 should see all 4
        #Call /deboost_jobs?past=12 should see 2
        #Deboost VM2, then /get_deboost_jobs?past=12 should see 1

        #/get_deboost_jobs returns [ dict(boost_remain=123, artifact_id=..., artifact_name=...)]
        app = self._get_test_app()

        servers = ['srv1', 'srv2', 'srv3', 'srv4']
        times   = [  -14 ,    -1 ,     0 ,     1 ]
        user_id = create_user('someuser')
        for s, hours in zip(servers, times):
            vm_id = create_server(s, user_id)
            server.touch_to_add_specification(vm_id, 2, 40)
            server.touch_to_add_deboost(vm_id, hours)

        #Negative time deboost should be OK, and should show as Expired
        #Confirm the negative-time deboost worked (eternal + internal view)
        server_1_info = app.get('/servers/srv1').json
        self.assertEqual(server_1_info['boostremaining'], "Expired")

        server_1_tud = server.get_time_until_deboost(server_1_info['artifact_id'])
        self.assertTrue(server_1_tud[1] < (-13 * 60 * 60))

        #Look for all jobs - should be 4
        dj1 = app.get('/deboost_jobs', dict(past=24*60, future=12*60)).json
        self.assertEqual(len(dj1), 4)

        #Look for jobs in last 12 hours (what the deboost_daemon will normally do)
        dj2 = app.get('/deboost_jobs', dict(past=12*60)).json
        self.assertEqual( set(s['artifact_name'] for s in dj2), set(('srv2', 'srv3')) )

        #And if we deboost VM2 (via an API call, why not!)...
        app.post('/servers/srv2/specification', dict(cores=1, ram=16))
        dj3 = app.get('/deboost_jobs', dict(past=12*60)).json
        self.assertEqual( set(s['artifact_name'] for s in dj3), set(('srv3',)) )