コード例 #1
0
ファイル: test_vm_actions.py プロジェクト: cedadev/eos-db
 def test_add_specification(self):
     """Add a specification to a machine and recall it."""
     artifact_id = self.my_create_appliance("testspecification")
     s.touch_to_add_specification(artifact_id,2,4)
     cores, ram = s.get_latest_specification(artifact_id)
     self.assertEqual(cores, 2)
     self.assertEqual(ram, 4)
コード例 #2
0
ファイル: views.py プロジェクト: environmentalomics/eos-db
def deboost_server(request):
    """Deboost a server: ie:
        Credit the users account
        Cancel any scheduled De-Boost
        Set the CPUs and RAM to the previous state
        Put the server in a "Pre_Deboosting" status

    Note that a user can Deboost at ANY time, but they only get credit if credit is due.
    Deboosting a non-boosted server just amounts to a restart.

    :param {vm or name}: ID of VApp which we want to deboost.
    :returns: dict(touch_id, vm_id, credit) where credit is the refunded amount
    """
    vm_id, actor_id = _resolve_vm(request)

    credit = server.get_time_until_deboost(vm_id)[3]
    server.touch_to_add_credit(actor_id, credit)

    #Scheduled timeouts don't need cancelling as they are ignored on unboosted servers,
    #and if the user re-boosts then the new timeout will mask the old one.

    #Previous semantics would return the VM to the previous state, but this is not
    #what I really want - altering the baseline in the config should lead to all VMs
    #ending up in the new state after a Boost/Deboost.
    new_cores, new_ram = server.get_baseline_specification(vm_id)

    server.touch_to_add_specification(vm_id, new_cores, new_ram)

    # Tell the agents to get to work.
    touch_id = server.touch_to_state(actor_id, vm_id, "Pre_Deboosting")

    return dict(touch_id=touch_id, vm_id=vm_id, credit=credit)
コード例 #3
0
    def test_get_deboost_jobs(self):
        #Create 4 servers and boost them all to 40GB + 2cores.
        #Set deboost times at 14hrs ago, 1 hr ago, 0, and 1 hour hence
        #Call /deboost_jobs?past=24;future=12 should see all 4
        #Call /deboost_jobs?past=12 should see 2
        #Deboost VM2, then /get_deboost_jobs?past=12 should see 1

        #/get_deboost_jobs returns [ dict(boost_remain=123, artifact_id=..., artifact_name=...)]
        app = self._get_test_app()

        #Need to set a baseline boost level or all machines just show as unboosted
        new_BL = server.get_boost_levels()
        new_BL['levels'] = [{
            "label": "is_boosted",
            "ram": 40,
            "cores": 2,
            "cost": 1
        }]
        server.set_config(dict(BoostLevels=new_BL))

        servers = ['srv1', 'srv2', 'srv3', 'srv4']
        times = [-14, -1, 0, 1]
        user_id = create_user('someuser')
        for s, hours in zip(servers, times):
            vm_id = create_server(s, user_id)
            server.touch_to_add_specification(vm_id, 2, 40)
            server.touch_to_add_deboost(vm_id, hours)

        #Negative time deboost should be OK, and should show as Expired
        #Confirm the negative-time deboost worked (external + internal view)
        server_1_info = app.get('/servers/srv1').json
        self.assertEqual(server_1_info['boostremaining'], "Expired")

        server_1_tud = server.get_time_until_deboost(
            server_1_info['artifact_id'])
        self.assertTrue(server_1_tud[1] < (-13 * 60 * 60))

        #Look for all jobs - should be 4
        dj1 = app.get('/deboost_jobs', dict(past=24 * 60, future=12 * 60)).json
        self.assertEqual(len(dj1), 4)

        #Look for jobs in last 12 hours (what the deboost_daemon will normally do)
        dj2 = app.get('/deboost_jobs', dict(past=12 * 60)).json
        self.assertEqual(set(s['artifact_name'] for s in dj2),
                         set(('srv2', 'srv3')))

        #And if we deboost VM2 (via an API call to set the default baseline)...
        app.post('/servers/srv2/specification', dict(cores=1, ram=2))
        dj3 = app.get('/deboost_jobs', dict(past=12 * 60)).json
        self.assertEqual(set(s['artifact_name'] for s in dj3), set(('srv3', )))
コード例 #4
0
ファイル: views.py プロジェクト: environmentalomics/eos-db
def set_server_specification(request):
    """ Set number of cores and amount of RAM for a VM directly.

        Regular users can only do this indirectly via boost/deboost, which
        will check that the levels match one of the approved values.
    """
    vm_id, actor_id = _resolve_vm(request)

    cores = int(request.POST.get('cores'))
    ram = int(request.POST.get('ram'))
    if (cores <= 0) or (ram <= 0):
        return HTTPBadRequest()
    else:
        server.touch_to_add_specification(vm_id, cores, ram)
        return dict(cores=cores, ram=ram, artifact_id=vm_id)
コード例 #5
0
ファイル: views.py プロジェクト: cedadev/eos-db
def set_server_specification(request):
    """ Set number of cores and amount of RAM for a VM. These numbers should
        only match the given specification types listed below.
        Regular users can only do this indirectly via boost/deboost.
    """
    vm_id, actor_id = _resolve_vm(request)

    # FIXME - This really shouldn't be hardcoded.
    cores = request.POST.get('cores')
    ram = request.POST.get('ram')
    if (cores not in ['1', '2', '4', '16']) or (ram not in ['1', '4', '8', '16', '400']):
        return HTTPBadRequest()
    else:
        server.touch_to_add_specification(vm_id, cores, ram)
        return dict(cores=cores, ram=ram, artifact_id=vm_id)
コード例 #6
0
ファイル: views.py プロジェクト: cedadev/eos-db
def set_server_specification(request):
    """ Set number of cores and amount of RAM for a VM. These numbers should
        only match the given specification types listed below.
        Regular users can only do this indirectly via boost/deboost.
    """
    vm_id, actor_id = _resolve_vm(request)

    # FIXME - This really shouldn't be hardcoded.
    cores = request.POST.get("cores")
    ram = request.POST.get("ram")
    if (cores not in ["1", "2", "4", "16"]) or (ram not in ["1", "4", "8", "16", "400"]):
        return HTTPBadRequest()
    else:
        server.touch_to_add_specification(vm_id, cores, ram)
        return dict(cores=cores, ram=ram, artifact_id=vm_id)
コード例 #7
0
    def test_boost_avail(self):

        #Now the capacity feature is implemented, we should be able to get
        #an indicator as to which boost levels are avilable.

        #Load up the config
        conf = self._get_conf_for_test()
        server.set_config(conf)

        bl1 = self.testapp.get('/boostlevels').json

        #All the levels should hava available = 1
        avail = [l['available'] for l in bl1['levels']]
        self.assertEqual(avail, [1] * len(avail))

        #Since I need to do this multiple times, here it a mini function
        get_avail = (lambda: [
            l['available']
            for l in self.testapp.get('/boostlevels').json['levels']
        ])

        #Now make 10 machines.
        machines = [
            server.create_appliance(*['machine_%i' % n] * 2) for n in range(10)
        ]

        #Boost 5 of them to L1 - all levs should be OK
        # We don't need an owner or credit - just set the spec directly
        for n in [1, 2, 3, 4, 5]:
            server.touch_to_add_specification(
                machines[n], conf['BoostLevels']['levels'][0]['cores'],
                conf['BoostLevels']['levels'][0]['ram'])

        self.assertEqual(get_avail(), [1, 1, 1])

        #Boost 1 to L2 - levs 1 and 2 should be avail
        for n in [6]:
            server.touch_to_add_specification(
                machines[n], conf['BoostLevels']['levels'][1]['cores'],
                conf['BoostLevels']['levels'][1]['ram'])

        self.assertEqual(get_avail(), [1, 1, 0])

        #Boost 2 more to L2 - no levs should be avail
        for n in [7, 8]:
            server.touch_to_add_specification(
                machines[n], conf['BoostLevels']['levels'][1]['cores'],
                conf['BoostLevels']['levels'][1]['ram'])

        self.assertEqual(get_avail(), [0, 0, 0])

        #Deboost one from L1 - L1 should be avail again but no others
        for n in [1]:
            server.touch_to_add_specification(
                machines[n], conf['BoostLevels']['baseline']['cores'],
                conf['BoostLevels']['baseline']['ram'])

        self.assertEqual(get_avail(), [1, 0, 0])
コード例 #8
0
ファイル: views.py プロジェクト: cedadev/eos-db
def deboost_server(request):
    """Deboost a server: ie:
        Credit the users account
        Cancel any scheduled De-Boost
        Set the CPUs and RAM to the previous state
        Put the server in a "Pre_Deboosting" status

    Note that a user can Deboost at ANY time, but they only get credit if credit is due.
    Deboosting a non-boosted server just amounts to a restart.

    :param {vm or name}: ID of VApp which we want to deboost.
    :returns: ???
    """
    vm_id, actor_id = _resolve_vm(request)

    credit = server.get_time_until_deboost(vm_id)[3]
    server.touch_to_add_credit(actor_id, credit)

    #Scheduled timeouts don't need cancelling as they are ignored on unboosted servers.

    #FIXME - yet more hard-coding for cores/RAM
    prev_cores = 1
    prev_ram = 16
    try:
        prev_cores, prev_ram = server.get_previous_specification(vm_id)
    except:
        #OK, use the defaults.
        pass

    #If we're not careful, with this "go back to previous config" semantics, if a user de-boosts
    #a server twice they will actually end up setting their baseline config to the boosted specs.
    #Therefore do a check.
    current_cores, current_ram = server.get_latest_specification(vm_id)

    if not (prev_ram > current_ram):
        server.touch_to_add_specification(vm_id, prev_cores, prev_ram)

    # Tell the agents to get to work.
    touch_id = server.touch_to_state(actor_id, vm_id, "Pre_Deboosting")

    return dict(touch_id=touch_id, vm_id=vm_id, credit=credit)
コード例 #9
0
ファイル: views.py プロジェクト: cedadev/eos-db
def deboost_server(request):
    """Deboost a server: ie:
        Credit the users account
        Cancel any scheduled De-Boost
        Set the CPUs and RAM to the previous state
        Put the server in a "Pre_Deboosting" status

    Note that a user can Deboost at ANY time, but they only get credit if credit is due.
    Deboosting a non-boosted server just amounts to a restart.

    :param {vm or name}: ID of VApp which we want to deboost.
    :returns: ???
    """
    vm_id, actor_id = _resolve_vm(request)

    credit = server.get_time_until_deboost(vm_id)[3]
    server.touch_to_add_credit(actor_id, credit)

    # Scheduled timeouts don't need cancelling as they are ignored on unboosted servers.

    # FIXME - yet more hard-coding for cores/RAM
    prev_cores = 1
    prev_ram = 16
    try:
        prev_cores, prev_ram = server.get_previous_specification(vm_id)
    except:
        # OK, use the defaults.
        pass

    # If we're not careful, with this "go back to previous config" semantics, if a user de-boosts
    # a server twice they will actually end up setting their baseline config to the boosted specs.
    # Therefore do a check.
    current_cores, current_ram = server.get_latest_specification(vm_id)

    if not (prev_ram > current_ram):
        server.touch_to_add_specification(vm_id, prev_cores, prev_ram)

    # Tell the agents to get to work.
    touch_id = server.touch_to_state(actor_id, vm_id, "Pre_Deboosting")

    return dict(touch_id=touch_id, vm_id=vm_id, credit=credit)
コード例 #10
0
ファイル: test_agent_api.py プロジェクト: cedadev/eos-db
    def test_get_deboost_jobs(self):
        #Create 4 servers and boost them all to 40GB + 2cores.
        #Set deboost times at 14hrs ago, 1 hr ago, 0, and 1 hour hence
        #Call /deboost_jobs?past=24;future=12 should see all 4
        #Call /deboost_jobs?past=12 should see 2
        #Deboost VM2, then /get_deboost_jobs?past=12 should see 1

        #/get_deboost_jobs returns [ dict(boost_remain=123, artifact_id=..., artifact_name=...)]
        app = self._get_test_app()

        servers = ['srv1', 'srv2', 'srv3', 'srv4']
        times   = [  -14 ,    -1 ,     0 ,     1 ]
        user_id = create_user('someuser')
        for s, hours in zip(servers, times):
            vm_id = create_server(s, user_id)
            server.touch_to_add_specification(vm_id, 2, 40)
            server.touch_to_add_deboost(vm_id, hours)

        #Negative time deboost should be OK, and should show as Expired
        #Confirm the negative-time deboost worked (eternal + internal view)
        server_1_info = app.get('/servers/srv1').json
        self.assertEqual(server_1_info['boostremaining'], "Expired")

        server_1_tud = server.get_time_until_deboost(server_1_info['artifact_id'])
        self.assertTrue(server_1_tud[1] < (-13 * 60 * 60))

        #Look for all jobs - should be 4
        dj1 = app.get('/deboost_jobs', dict(past=24*60, future=12*60)).json
        self.assertEqual(len(dj1), 4)

        #Look for jobs in last 12 hours (what the deboost_daemon will normally do)
        dj2 = app.get('/deboost_jobs', dict(past=12*60)).json
        self.assertEqual( set(s['artifact_name'] for s in dj2), set(('srv2', 'srv3')) )

        #And if we deboost VM2 (via an API call, why not!)...
        app.post('/servers/srv2/specification', dict(cores=1, ram=16))
        dj3 = app.get('/deboost_jobs', dict(past=12*60)).json
        self.assertEqual( set(s['artifact_name'] for s in dj3), set(('srv3',)) )
コード例 #11
0
ファイル: views.py プロジェクト: environmentalomics/eos-db
def boost_server(request):
    """Boost a server: ie:
        Debit the users account
        Schedule a De-Boost
        Set the CPUs and RAM
        Put the server in a "preparing" status

    :param {vm or name}: ID of VApp which we want to boost.
    :ram: ram wanted
    :cores: cores wanted
    :hours: hours of boost wanted
    :returns: JSON containing VApp ID and job ID for progress calls.
    """
    vm_id, actor_id = _resolve_vm(request)

    hours = int(request.POST['hours'])
    cores = int(request.POST['cores'])
    ram = int(request.POST['ram'])

    # FIXME: Really the user should boost to a named level, rather than directly
    # specifying RAM and cores.  For now I'm just going to work out the cost based
    # on the cores requested, and assume the RAM level matches it.
    cost = server.check_and_remove_credits(actor_id, ram, cores, hours)

    if not cost:
        #Either we can't afford it or we can't determine the cost.
        return HTTPBadRequest()

    #Schedule a de-boost
    server.touch_to_add_deboost(vm_id, hours)

    # Set spec
    server.touch_to_add_specification(vm_id, cores, ram)

    # Tell the agents to get to work.
    touch_id = server.touch_to_state(actor_id, vm_id, "Preparing")

    return dict(touch_id=touch_id, vm_id=vm_id, cost=cost)
コード例 #12
0
ファイル: views.py プロジェクト: cedadev/eos-db
def boost_server(request):
    """Boost a server: ie:
        Debit the users account
        Schedule a De-Boost
        Set the CPUs and RAM
        Put the server in a "preparing" status

    :param {vm or name}: ID of VApp which we want to boost.
    :ram: ram wanted
    :cores: cores wanted
    :hours: hours of boost wanted
    :returns: JSON containing VApp ID and job ID for progress calls.
    """
    vm_id, actor_id = _resolve_vm(request)

    hours = int(request.POST["hours"])
    cores = int(request.POST["cores"])
    ram = int(request.POST["ram"])

    # FIXME: Really the user should boost to a named level, rather than directly
    # specifying RAM and cores.  For now I'm just going to work out the cost based
    # on the cores requested, and assume the RAM level matches it.
    cost = server.check_and_remove_credits(actor_id, ram, cores, hours)

    if not cost:
        # Either we can't afford it or we can't determine the cost.
        return HTTPBadRequest()

    # Schedule a de-boost
    server.touch_to_add_deboost(vm_id, hours)

    # Set spec
    server.touch_to_add_specification(vm_id, cores, ram)

    # Tell the agents to get to work.
    touch_id = server.touch_to_state(actor_id, vm_id, "Preparing")

    return dict(touch_id=touch_id, vm_id=vm_id, cost=cost)