def test_now_returns_transaction_time(self):
     date_now = now()
     # Perform a write database operation.
     obj = TimestampedModelTestModel()
     obj.save()
     transaction.commit()
     self.assertLessEqual(date_now, now())
Example #2
0
 def test_created_bracketed_by_before_and_after_time(self):
     before = now()
     obj = TimestampedModelTestModel()
     obj.save()
     transaction.commit()
     after = now()
     self.assertLessEqual(before, obj.created)
     self.assertGreaterEqual(after, obj.created)
Example #3
0
 def test_created_bracketed_by_before_and_after_time(self):
     before = now()
     obj = TimestampedModelTestModel()
     obj.save()
     transaction.commit()
     after = now()
     self.assertLessEqual(before, obj.created)
     self.assertGreaterEqual(after, obj.created)
Example #4
0
    def _update(self):
        """Repopulate the database with process, endpoint, and connection
        information."""
        # Get the region controller and update its hostname and last
        # updated time.
        region_obj = RegionController.objects.get_running_controller()
        hostname = gethostname()
        if region_obj.hostname != hostname:
            region_obj.hostname = hostname
            region_obj.save()

        # Get all the existing processes for the region controller. This is
        # used to remove the old processes that we did not update.
        previous_process_ids = set(
            RegionControllerProcess.objects.filter(
                region=region_obj).values_list("id", flat=True))

        # Loop through all the current workers to update the records in the
        # database. Caution is needed because other region controllers can
        # remove expired processes.
        for pid, conn in self.connections.items():
            process = self._getProcessObjFor(pid)
            process.updated = now()
            process.save()
            if conn['rpc']['port']:
                # Update the endpoints for the provided port.
                self._updateEndpoints(
                    process, self._getListenAddresses(conn['rpc']['port']))
            else:
                # RPC is not running, no endpoints.
                self._updateEndpoints(process, [])
            self._updateConnections(process, conn['rpc']['connections'])
            previous_process_ids.discard(process.id)

        # Delete all the old processes that are dead.
        if previous_process_ids:
            RegionControllerProcess.objects.filter(
                id__in=previous_process_ids).delete()

        # Remove any old processes not owned by this controller. Every
        # controller should update its processes based on the `UPDATE_INTERVAL`
        # any that are older than `REMOVE_INTERVAL` are dropped.
        remove_before_time = now() - timedelta(seconds=self.REMOVE_INTERVAL)
        RegionControllerProcess.objects.exclude(region=region_obj).filter(
            updated__lte=remove_before_time).delete()

        # Update the status of this regiond service for this region based on
        # the number of running processes.
        self._updateService(region_obj)

        # Update the status of all regions that have no processes running.
        for other_region in RegionController.objects.exclude(
                system_id=region_obj.id).prefetch_related("processes"):
            # Use len with `all` so the prefetch cache is used.
            if len(other_region.processes.all()) == 0:
                Service.objects.mark_dead(other_region, dead_region=True)
Example #5
0
 def test_resets_status_expires(self):
     node = factory.make_Node(status=NODE_STATUS.DEPLOYING,
                              status_expires=factory.make_date(),
                              with_empty_script_sets=True)
     payload = {
         'event_type':
         random.choice(['start', 'finish']),
         'origin':
         'curtin',
         'name':
         random.choice([
             'cmd-install', 'cmd-install/stage-early',
             'cmd-install/stage-late'
         ]),
         'description':
         'Installing',
         'timestamp':
         datetime.utcnow(),
     }
     self.processMessage(node, payload)
     node = reload_object(node)
     # Testing for the exact time will fail during testing due to now()
     # being different in reset_status_expires vs here. Pad by 1 minute
     # to make sure its reset but won't fail testing.
     expected_time = now() + timedelta(
         minutes=get_node_timeout(NODE_STATUS.DEPLOYING))
     self.assertGreaterEqual(node.status_expires,
                             expected_time - timedelta(minutes=1))
     self.assertLessEqual(node.status_expires,
                          expected_time + timedelta(minutes=1))
Example #6
0
    def test_uses_param_runtime(self):
        node, script_set = self.make_node()
        current_time = now()
        script_set.last_ping = current_time
        script_set.save()
        passed_script_result = factory.make_ScriptResult(
            script_set=script_set, status=SCRIPT_STATUS.PASSED)
        failed_script_result = factory.make_ScriptResult(
            script_set=script_set, status=SCRIPT_STATUS.FAILED)
        pending_script_result = factory.make_ScriptResult(
            script_set=script_set, status=SCRIPT_STATUS.PENDING)
        script = factory.make_Script(timeout=timedelta(minutes=2))
        running_script_result = factory.make_ScriptResult(
            script_set=script_set, status=SCRIPT_STATUS.RUNNING, script=script,
            started=current_time - timedelta(minutes=50),
            parameters={'runtime': {
                'type': 'runtime',
                'value': 60 * 60,
                }})

        mark_nodes_failed_after_missing_script_timeout(current_time, 20)
        node = reload_object(node)

        self.assertEquals(self.status, node.status)
        self.assertThat(self.mock_stop, MockNotCalled())
        self.assertEquals(
            SCRIPT_STATUS.PASSED, reload_object(passed_script_result).status)
        self.assertEquals(
            SCRIPT_STATUS.FAILED, reload_object(failed_script_result).status)
        self.assertEquals(
            SCRIPT_STATUS.PENDING,
            reload_object(pending_script_result).status)
        self.assertEquals(
            SCRIPT_STATUS.RUNNING,
            reload_object(running_script_result).status)
Example #7
0
 def _updateLastPing(self, node, message):
     """
     Update the last ping in any status which uses a script_set whenever a
     node in that status contacts us.
     """
     script_set_statuses = {
         NODE_STATUS.COMMISSIONING: 'current_commissioning_script_set_id',
         NODE_STATUS.TESTING: 'current_testing_script_set_id',
         NODE_STATUS.DEPLOYING: 'current_installation_script_set_id',
     }
     script_set_property = script_set_statuses.get(node.status)
     if script_set_property is not None:
         script_set_id = getattr(node, script_set_property)
         if script_set_id is not None:
             try:
                 script_set = ScriptSet.objects.select_for_update(
                     nowait=True).get(id=script_set_id)
             except ScriptSet.DoesNotExist:
                 # Wierd that it would be deleted, but let not cause a
                 # stack trace for this error.
                 pass
             except DatabaseError:
                 # select_for_update(nowait=True) failed instantly. Raise
                 # error so @transactional will retry the whole operation.
                 raise make_serialization_failure()
             else:
                 current_time = now()
                 if (script_set.last_ping is None
                         or current_time > script_set.last_ping):
                     script_set.last_ping = current_time
                     script_set.save(update_fields=['last_ping'])
Example #8
0
    def test_update_updates_updated_time_on_processes(self):
        current_time = now()
        self.patch(timestampedmodel, "now").return_value = current_time

        master = self.make_IPCMasterService()
        yield master.startService()

        pid = random.randint(1, 512)
        yield master.registerWorker(pid, MagicMock())

        def set_to_old_time(procId):
            old_time = current_time - timedelta(seconds=90)
            region_process = RegionControllerProcess.objects.get(id=procId)
            region_process.created = region_process.updated = old_time
            region_process.save()
            return region_process

        region_process = yield deferToDatabase(
            set_to_old_time, master.connections[pid]["process_id"])

        yield master.update()

        region_process = yield deferToDatabase(reload_object, region_process)
        self.assertEquals(current_time, region_process.updated)

        yield master.stopService()
Example #9
0
def create_staticipaddresses_for_bmcs(apps, schema_editor):
    now = timestampedmodel.now()
    BMC = apps.get_model("maasserver", "BMC")
    StaticIPAddress = apps.get_model("maasserver", "StaticIPAddress")

    for bmc in BMC.objects.all().order_by('id'):
        # parse power_parameters and create new ip addresses
        new_ip = extract_ip_address(bmc.power_type, bmc.power_parameters)
        old_ip = bmc.ip_address.ip if bmc.ip_address else None
        if new_ip != old_ip:
            try:
                if new_ip is None:
                    # Set ip to None, save, then delete the old ip.
                    old_ip_address = bmc.ip_address
                    bmc.ip_address = None
                    bmc.save()
                    if old_ip_address is not None:
                        old_ip_address.delete()
                else:
                    subnet_id = raw_subnet_id_containing_ip(new_ip)
                    # Update or create new StaticIPAddress.
                    if bmc.ip_address:
                        bmc.ip_address.ip = new_ip
                        bmc.ip_address.subnet_id = subnet_id
                        bmc.ip_address.save()
                    else:
                        ip_address = StaticIPAddress(
                            created=now, updated=now, subnet_id=subnet_id,
                            ip=new_ip, alloc_type=IPADDRESS_TYPE.STICKY)
                        ip_address.save()
                        bmc.ip_address = ip_address
                        bmc.save()
            except Exception:
                # Extracting the IP is best-effort.
                pass
Example #10
0
def update_last_image_sync(system_id):
    """Update rack controller's last_image_sync.

    for :py:class:`~provisioningserver.rpc.region.UpdateLastImageSync.
    """
    RackController.objects.filter(system_id=system_id).update(
        last_image_sync=now())
Example #11
0
    def get_next_version_name(self):
        """Return the version a `BootResourceSet` should use when adding to
        this resource.

        The version naming is specific to how the resource sets will be sorted
        by simplestreams. The version name is YYYYmmdd, with an optional
        revision index. (e.g. 20140822.1)

        This method gets the current date, and checks if a revision already
        exists in the database. If it doesn't then just the current date is
        returned. If it does exists then the next revision in the set for that
        date will be returned.

        :return: Name of version to use for a new set on this `BootResource`.
        :rtype: string
        """
        version_name = now().strftime("%Y%m%d")
        sets = self.sets.filter(version__startswith=version_name).order_by(
            "version"
        )
        if not sets.exists():
            return version_name
        max_idx = 0
        for resource_set in sets:
            if "." in resource_set.version:
                _, set_idx = resource_set.version.split(".")
                set_idx = int(set_idx)
                if set_idx > max_idx:
                    max_idx = set_idx
        return "%s.%d" % (version_name, max_idx + 1)
Example #12
0
def _gen_cluster_nodes_power_parameters(nodes, limit):
    """Generate power parameters for `nodes`.

    These fulfil a subset of the return schema for the RPC call for
    :py:class:`~provisioningserver.rpc.region.ListNodePowerParameters`.

    :return: A generator yielding `dict`s.
    """
    five_minutes_ago = now() - timedelta(minutes=5)
    queryable_power_types = [
        driver.name for _, driver in PowerDriverRegistry if driver.queryable
    ]

    qs = (
        nodes.exclude(status=NODE_STATUS.BROKEN)
        .filter(bmc__power_type__in=queryable_power_types)
        .filter(
            Q(power_state_queried=None)
            | Q(power_state_queried__lte=five_minutes_ago)
        )
        .order_by(F("power_state_queried").asc(nulls_first=True), "system_id")
        .distinct()
    )
    for node in qs[:limit]:
        power_info = node.get_effective_power_info()
        if power_info.power_type is not None:
            yield {
                "system_id": node.system_id,
                "hostname": node.hostname,
                "power_state": node.power_state,
                "power_type": power_info.power_type,
                "context": power_info.power_parameters,
            }
Example #13
0
def list_cluster_nodes_power_parameters(system_id, limit=10):
    """Return power parameters that a rack controller should power check,
    in priority order.

    For :py:class:`~provisioningserver.rpc.region.ListNodePowerParameters`.

    :param limit: Limit the number of nodes for which to return power
        parameters. Pass `None` to remove this numerical limit; there is still
        a limit on the quantity of power information that will be returned.
    """
    try:
        rack = RackController.objects.get(system_id=system_id)
    except RackController.DoesNotExist:
        raise NoSuchCluster.from_uuid(system_id)

    # Generate all the the power queries that will fit into the response.
    nodes = rack.get_bmc_accessible_nodes()
    details = _gen_cluster_nodes_power_parameters(nodes, limit)
    details = _gen_up_to_json_limit(details, 60 * (2 ** 10))  # 60kiB
    details = list(details)

    # Update the queried time on all of the nodes at once. So another
    # rack controller does not update them at the same time. This operation
    # is done on all nodes at the same time in one query.
    system_ids = [detail["system_id"] for detail in details]
    Node.objects.filter(system_id__in=system_ids).update(
        power_state_queried=now()
    )

    return details
def move_package_repositories(apps, schema_editor):
    Config = apps.get_model("maasserver", "Config")
    PackageRepository = apps.get_model("maasserver", "PackageRepository")

    # Copied from PackageRepository model.
    MAIN_ARCHES = ["amd64", "i386"]
    PORTS_ARCHES = ["armhf", "arm64", "powerpc", "ppc64el"]

    now = timestampedmodel.now()

    for config in Config.objects.filter(name="main_archive"):
        PackageRepository.objects.create(
            name=config.name,
            description=config.name,
            url=config.value,
            arches=MAIN_ARCHES,
            default=True,
            enabled=True,
            created=now,
            updated=now,
        )
        config.delete()

    if not PackageRepository.objects.filter(name="main_archive").exists():
        PackageRepository.objects.create(
            name="main_archive",
            description="main_archive",
            url="http://archive.ubuntu.com/ubuntu",
            arches=MAIN_ARCHES,
            default=True,
            enabled=True,
            created=now,
            updated=now,
        )

    for config in Config.objects.filter(name="ports_archive"):
        PackageRepository.objects.create(
            name=config.name,
            description=config.name,
            url=config.value,
            arches=PORTS_ARCHES,
            default=True,
            enabled=True,
            created=now,
            updated=now,
        )
        config.delete()

    if not PackageRepository.objects.filter(name="ports_archive").exists():
        PackageRepository.objects.create(
            name="ports_archive",
            description="ports_archive",
            url="http://ports.ubuntu.com/ubuntu-ports",
            arches=PORTS_ARCHES,
            default=True,
            enabled=True,
            created=now,
            updated=now,
        )
Example #15
0
 def make_Node(self, power_type=None, power_state_queried=None, **kwargs):
     if power_state_queried is None:
         # Ensure that this node was last queried at least 5 minutes ago.
         power_state_queried = now() - timedelta(minutes=randint(6, 16))
     node = factory.make_Node(power_type=power_type,
                              power_state_queried=power_state_queried,
                              **kwargs)
     return node
Example #16
0
    def test__updates_last_image_sync(self):
        rack = factory.make_RackController()
        previous_sync = rack.last_image_sync = now()
        rack.save()

        update_last_image_sync(rack.system_id)

        self.assertNotEqual(previous_sync, reload_object(rack).last_image_sync)
def migrate_power_data_from_node_to_bmc(apps, schema_editor):
    now = timestampedmodel.now()
    Node = apps.get_model("maasserver", "Node")
    BMC = apps.get_model("maasserver", "BMC")
    for node in Node.objects.all().order_by('id'):
        # update bmc info in new BMC tables as if we're re-saving the form
        update_power_type_and_parameters(BMC, node, now)
        node.save()
    clean_orphaned_bmcs(Node, BMC)
Example #18
0
    def test_mark_nodes_failed_after_builtin_commiss_script_overrun(self):
        user = factory.make_admin()
        node = factory.make_Node(status=NODE_STATUS.COMMISSIONING, owner=user)
        script_set = ScriptSet.objects.create_commissioning_script_set(node)
        node.current_commissioning_script_set = script_set
        node.save()
        current_time = now()
        script_set.last_ping = current_time
        script_set.save()
        pending_script_results = list(script_set.scriptresult_set.all())
        passed_script_result = pending_script_results.pop()
        passed_script_result.status = SCRIPT_STATUS.PASSED
        passed_script_result.save()
        failed_script_result = pending_script_results.pop()
        failed_script_result.status = SCRIPT_STATUS.FAILED
        failed_script_result.save()
        running_script_result = pending_script_results.pop()
        running_script_result.status = SCRIPT_STATUS.RUNNING
        running_script_result.started = current_time - timedelta(minutes=10)
        running_script_result.save()

        mark_nodes_failed_after_missing_script_timeout(current_time, 20)
        node = reload_object(node)

        self.assertEquals(NODE_STATUS.FAILED_COMMISSIONING, node.status)
        self.assertEquals(
            "%s has run past it's timeout(%s)" % (
                running_script_result.name,
                str(NODE_INFO_SCRIPTS[running_script_result.name]["timeout"]),
            ),
            node.error_description,
        )
        self.assertIn(
            call("%s: %s has run past it's timeout(%s)" % (
                node.hostname,
                running_script_result.name,
                str(NODE_INFO_SCRIPTS[running_script_result.name]["timeout"]),
            )),
            self.maaslog.call_args_list,
        )
        if node.enable_ssh:
            self.assertThat(self.mock_stop, MockNotCalled())
        else:
            self.assertThat(self.mock_stop, MockCalledOnce())
            self.assertIn(
                call("%s: Stopped because SSH is disabled" % node.hostname),
                self.maaslog.call_args_list,
            )
        self.assertEquals(SCRIPT_STATUS.PASSED,
                          reload_object(passed_script_result).status)
        self.assertEquals(SCRIPT_STATUS.FAILED,
                          reload_object(failed_script_result).status)
        self.assertEquals(SCRIPT_STATUS.TIMEDOUT,
                          reload_object(running_script_result).status)
        for script_result in pending_script_results:
            self.assertEquals(SCRIPT_STATUS.ABORTED,
                              reload_object(script_result).status)
    def test_mark_nodes_failed_after_script_overrun(self):
        node, script_set = self.make_node()
        current_time = now()
        script_set.last_ping = current_time
        script_set.save()
        passed_script_result = factory.make_ScriptResult(
            script_set=script_set, status=SCRIPT_STATUS.PASSED)
        failed_script_result = factory.make_ScriptResult(
            script_set=script_set, status=SCRIPT_STATUS.FAILED)
        pending_script_result = factory.make_ScriptResult(
            script_set=script_set, status=SCRIPT_STATUS.PENDING)
        script = factory.make_Script(timeout=timedelta(seconds=60))
        running_script_result = factory.make_ScriptResult(
            script_set=script_set,
            status=SCRIPT_STATUS.RUNNING,
            script=script,
            started=current_time - timedelta(minutes=10),
        )

        mark_nodes_failed_after_missing_script_timeout(current_time, 20)
        node = reload_object(node)

        self.assertEquals(self.failed_status, node.status)
        self.assertEquals(
            "%s has run past it's timeout(%s)" % (
                running_script_result.name,
                str(running_script_result.script.timeout),
            ),
            node.error_description,
        )
        self.assertIn(
            call("%s: %s has run past it's timeout(%s)" % (
                node.hostname,
                running_script_result.name,
                str(running_script_result.script.timeout),
            )),
            self.maaslog.call_args_list,
        )
        if node.enable_ssh:
            self.assertThat(self.mock_stop, MockNotCalled())
        else:
            self.assertThat(self.mock_stop, MockCalledOnce())
            self.assertIn(
                call("%s: Stopped because SSH is disabled" % node.hostname),
                self.maaslog.call_args_list,
            )
        self.assertEquals(SCRIPT_STATUS.PASSED,
                          reload_object(passed_script_result).status)
        self.assertEquals(SCRIPT_STATUS.FAILED,
                          reload_object(failed_script_result).status)
        self.assertEquals(SCRIPT_STATUS.ABORTED,
                          reload_object(pending_script_result).status)
        self.assertEquals(SCRIPT_STATUS.TIMEDOUT,
                          reload_object(running_script_result).status)
Example #20
0
 def make_old_processes():
     old_time = now() - timedelta(seconds=90)
     region = RegionController.objects.get_running_controller()
     other_region = factory.make_RegionController()
     old_region_process = RegionControllerProcess.objects.create(
         region=region, pid=random.randint(1, 1000), created=old_time,
         updated=old_time)
     old_other_region_process = RegionControllerProcess.objects.create(
         region=other_region, pid=random.randint(1000, 2000),
         created=old_time, updated=old_time)
     return old_region_process, old_other_region_process
Example #21
0
    def test__excludes_broken_nodes(self):
        rack = factory.make_RackController(power_type='')
        node_queryable = self.make_Node(bmc_connected_to=rack)

        self.make_Node(status=NODE_STATUS.BROKEN, bmc_connected_to=rack)
        self.make_Node(status=NODE_STATUS.BROKEN,
                       power_state_queried=(now() - timedelta(minutes=10)),
                       bmc_connected_to=rack)

        power_parameters = list_cluster_nodes_power_parameters(rack.system_id)
        system_ids = [params["system_id"] for params in power_parameters]

        self.assertItemsEqual([node_queryable.system_id], system_ids)
    def test_mark_nodes_handled_last_ping_None(self):
        node, script_set = self.make_node()
        script_set.last_ping = None
        script_set.save()
        for _ in range(3):
            factory.make_ScriptResult(script_set=script_set,
                                      status=SCRIPT_STATUS.PENDING)

        # No exception should be raised.
        mark_nodes_failed_after_missing_script_timeout(now(), 20)
        node = reload_object(node)
        self.assertEquals(self.status, node.status)
        self.assertThat(self.maaslog, MockNotCalled())
Example #23
0
    def test_excludes_no_power_type(self):
        rack = factory.make_RackController(power_type="")
        node_queryable = self.make_Node(bmc_connected_to=rack)

        factory.make_Device(power_type="")
        factory.make_Device(power_type="")
        factory.make_Device(power_type="",
                            power_state_queried=(now() -
                                                 timedelta(minutes=10)))

        power_parameters = list_cluster_nodes_power_parameters(rack.system_id)
        system_ids = [params["system_id"] for params in power_parameters]

        self.assertItemsEqual([node_queryable.system_id], system_ids)
 def test_skips_those_that_have_not_expired(self):
     maaslog = self.patch(status_monitor.maaslog, "info")
     self.useFixture(SignalsDisabled("power"))
     current_time = now()
     expired_time = current_time + timedelta(minutes=1)
     nodes = [
         factory.make_Node(status=status, status_expires=expired_time)
         for status in NODE_FAILURE_MONITORED_STATUS_TRANSITIONS.keys()
     ]
     mark_nodes_failed_after_expiring(current_time, 20)
     failed_statuses = [reload_object(node).status for node in nodes]
     self.assertItemsEqual(NODE_FAILURE_MONITORED_STATUS_TRANSITIONS.keys(),
                           failed_statuses)
     self.assertThat(maaslog, MockNotCalled())
Example #25
0
    def test_mark_nodes_failed_after_missing_timeout_prefetches(self):
        self.patch(Node, "mark_failed")
        current_time = now()
        node, script_set = self.make_node()
        script_set.last_ping = current_time
        script_set.save()
        script = factory.make_Script(timeout=timedelta(seconds=60))
        factory.make_ScriptResult(
            script_set=script_set,
            status=SCRIPT_STATUS.RUNNING,
            script=script,
            started=current_time - timedelta(minutes=3),
        )

        counter_one = CountQueries()
        with counter_one:
            mark_nodes_failed_after_missing_script_timeout(current_time, 20)

        nodes = []
        for _ in range(6):
            node, script_set = self.make_node()
            script_set.last_ping = current_time
            script_set.save()
            script = factory.make_Script(timeout=timedelta(seconds=60))
            factory.make_ScriptResult(
                script_set=script_set,
                status=SCRIPT_STATUS.RUNNING,
                script=script,
                started=current_time - timedelta(minutes=3),
            )
            nodes.append(node)

        counter_many = CountQueries()
        with counter_many:
            mark_nodes_failed_after_missing_script_timeout(current_time, 20)

        # Lookup takes 7 queries no matter the amount of Nodes
        # 1. Get all Nodes in commissioning or testing
        # 2. Get all commissioning ScriptSets
        # 3. Get all testing ScriptSets
        # 4. Get all commissioning ScriptResults
        # 5. Get all testing ScriptResults
        # 6. Get all commissioning Scripts
        # 7. Get all testing Scripts
        self.assertEquals(7, counter_one.num_queries)
        self.assertEquals(7, counter_many.num_queries)
Example #26
0
    def test__returns_unchecked_nodes_first(self):
        rack = factory.make_RackController(power_type='')
        datetime_10_minutes_ago = now() - timedelta(minutes=10)
        nodes = [
            self.make_Node(bmc_connected_to=rack,
                           power_state_queried=datetime_10_minutes_ago)
            for _ in range(5)
        ]
        node_unchecked = random.choice(nodes)
        node_unchecked.power_state_queried = None
        node_unchecked.save()

        power_parameters = list_cluster_nodes_power_parameters(rack.system_id)
        system_ids = [params["system_id"] for params in power_parameters]

        # The unchecked node is always the first out.
        self.assertEqual(node_unchecked.system_id, system_ids[0])
Example #27
0
def mark_nodes_failed_after_expiring():
    """Mark all nodes in that database as failed where the status did not
    transition in time. `status_expires` is checked on the node to see if the
    current time is newer than the expired time.
    """
    current_db_time = now()
    expired_nodes = Node.objects.filter(
        status__in=NODE_FAILURE_MONITORED_STATUS_TRANSITIONS.keys(),
        status_expires__isnull=False,
        status_expires__lte=current_db_time)
    for node in expired_nodes:
        comment = "Node operation '%s' timed out after %s minutes." % (
            NODE_STATUS_CHOICES_DICT[node.status],
            NODE_FAILURE_MONITORED_STATUS_TIMEOUTS[node.status],
        )
        node.mark_failed(comment=comment,
                         script_result_status=SCRIPT_STATUS.ABORTED)
Example #28
0
    def test_mark_nodes_failed_after_missing_timeout_heartbeat(self):
        node, script_set = self.make_node()
        current_time = now()
        node_timeout = Config.objects.get_config("node_timeout")
        script_set.last_ping = current_time - timedelta(
            minutes=(node_timeout + 1)
        )
        script_set.save()
        script_results = [
            factory.make_ScriptResult(
                script_set=script_set, status=SCRIPT_STATUS.PENDING
            )
            for _ in range(3)
        ]

        mark_nodes_failed_after_missing_script_timeout(
            current_time, node_timeout
        )
        node = reload_object(node)

        self.assertEquals(self.failed_status, node.status)
        self.assertEquals(
            "Node has not been heard from for the last %s minutes"
            % node_timeout,
            node.error_description,
        )
        self.assertIn(
            call(
                "%s: Has not been heard from for the last %s minutes"
                % (node.hostname, node_timeout)
            ),
            self.maaslog.call_args_list,
        )
        if node.enable_ssh:
            self.assertThat(self.mock_stop, MockNotCalled())
        else:
            self.assertThat(self.mock_stop, MockCalledOnce())
            self.assertIn(
                call("%s: Stopped because SSH is disabled" % node.hostname),
                self.maaslog.call_args_list,
            )
        for script_result in script_results:
            self.assertEquals(
                SCRIPT_STATUS.TIMEDOUT, reload_object(script_result).status
            )
Example #29
0
    def backwards(self, orm):
        # Create the "use-fastpath-installer" tag.
        current_time = now()
        fpi_tag, _ = orm['maasserver.Tag'].objects.get_or_create(
            name="use-fastpath-installer",
            defaults={
                'created': current_time,
                'updated': current_time,
            })

        # Add the "use-fastpath-installer" tag, to nodes that use that
        # boot_type.
        for node in orm['maasserver.Node'].objects.all():
            if node.boot_type == NODE_BOOT.FASTPATH:
                node.tags.add(fpi_tag)
            elif node.boot_type == NODE_BOOT.DEBIAN:
                if fpi_tag in node.tags.all():
                    node.tags.remove(fpi_tag)
            node.save()
Example #30
0
 def test__resets_status_expires(self):
     rack_controller = factory.make_RackController()
     local_ip = factory.make_ip_address()
     remote_ip = factory.make_ip_address()
     status = random.choice(MONITORED_STATUSES)
     node = self.make_node(
         status=status, status_expires=factory.make_date())
     mac = node.get_boot_interface().mac_address
     get_config(
         rack_controller.system_id, local_ip, remote_ip, mac=mac)
     node = reload_object(node)
     # Testing for the exact time will fail during testing due to now()
     # being different in reset_status_expires vs here. Pad by 1 minute
     # to make sure its reset but won't fail testing.
     expected_time = now() + timedelta(minutes=get_node_timeout(status))
     self.assertGreaterEqual(
         node.status_expires, expected_time - timedelta(minutes=1))
     self.assertLessEqual(
         node.status_expires, expected_time + timedelta(minutes=1))
Example #31
0
def _gen_cluster_nodes_power_parameters(nodes):
    """Generate power parameters for `nodes`.

    These fulfil a subset of the return schema for the RPC call for
    :py:class:`~provisioningserver.rpc.region.ListNodePowerParameters`.

    :return: A generator yielding `dict`s.
    """
    five_minutes_ago = now() - timedelta(minutes=5)
    queryable_power_types = [
        driver.name
        for _, driver in PowerDriverRegistry
        if driver.queryable
    ]

    nodes_unchecked = (
        nodes
        .filter(power_state_queried=None)
        .filter(bmc__power_type__in=queryable_power_types)
        .exclude(status=NODE_STATUS.BROKEN)
        .distinct()
    )
    nodes_checked = (
        nodes
        .exclude(power_state_queried=None)
        .exclude(power_state_queried__gt=five_minutes_ago)
        .filter(bmc__power_type__in=queryable_power_types)
        .exclude(status=NODE_STATUS.BROKEN)
        .order_by("power_state_queried", "system_id")
        .distinct()
    )

    for node in chain(nodes_unchecked, nodes_checked):
        power_info = node.get_effective_power_info()
        if power_info.power_type is not None:
            yield {
                'system_id': node.system_id,
                'hostname': node.hostname,
                'power_state': node.power_state,
                'power_type': power_info.power_type,
                'context': power_info.power_parameters,
            }
Example #32
0
 def test_now_returns_datetime(self):
     self.assertIsInstance(now(), datetime)
Example #33
0
 def test_now_returns_same_datetime_inside_transaction(self):
     date_now = now()
     self.assertEqual(date_now, now())
Example #34
0
 def test_now_returns_transaction_time(self):
     date_now = now()
     # Perform a write database operation.
     factory.make_node()
     transaction.commit()
     self.assertLessEqual(date_now, now())