Ejemplo n.º 1
0
    def test_nest_empty(self):
        data = DataArray()

        self.assertEqual(data.shape, ())

        mock_set_array = 'not really an array but we don\'t check'
        mock_set_array2 = 'another one'

        data.nest(2, action_index=44, set_array=mock_set_array)
        data.nest(3, action_index=66, set_array=mock_set_array2)

        # the array doesn't exist until you initialize it
        self.assertIsNone(data.ndarray)

        # but other attributes are set
        self.assertEqual(data.shape, (3, 2))
        self.assertEqual(data.action_indices, (66, 44))
        self.assertEqual(data.set_arrays, (mock_set_array2, mock_set_array))

        data.init_data()
        self.assertEqual(data.ndarray.shape, (3, 2))

        # after initializing data, you can't nest anymore because this isn't
        # a preset array
        with self.assertRaises(RuntimeError):
            data.nest(4)
Ejemplo n.º 2
0
    def test_init_data_error(self):
        data = DataArray(preset_data=[1, 2])
        data.shape = (3, )

        # not sure when this would happen... but if you call init_data
        # and it notices an inconsistency between shape and the actual
        # data that's already there, it raises an error
        with self.assertRaises(ValueError):
            data.init_data()
Ejemplo n.º 3
0
    def test_complete(self):
        array = DataArray(name='y', shape=(5,))
        array.init_data()
        data = new_data(arrays=(array,), location=False)
        self.syncing_array = array
        self.sync_index = 0
        data.sync = self.mock_sync
        bf = DataSet.background_functions
        bf['fail'] = self.failing_func
        bf['log'] = self.logging_func

        with LogCapture() as logs:
            # grab info and warnings but not debug messages
            logging.getLogger().setLevel(logging.INFO)
            data.complete(delay=0.001)

        logs = logs.value

        expected_logs = [
            'waiting for DataSet <False> to complete',
            'DataSet: 0% complete',
            'RuntimeError: it is called failing_func for a reason!',
            'background at index 1',
            'DataSet: 20% complete',
            'RuntimeError: it is called failing_func for a reason!',
            'background function fail failed twice in a row, removing it',
            'background at index 2',
            'DataSet: 40% complete',
            'background at index 3',
            'DataSet: 60% complete',
            'background at index 4',
            'DataSet: 80% complete',
            'background at index 5',
            'DataSet <False> is complete'
        ]

        log_index = 0
        for line in expected_logs:
            self.assertIn(line, logs, logs)
            try:
                log_index_new = logs.index(line, log_index)
            except ValueError:
                raise ValueError('line {} not found after {} in: \n {}'.format(
                    line, log_index, logs))
            self.assertTrue(log_index_new >= log_index, logs)
            log_index = log_index_new + len(line) + 1  # +1 for \n
        self.assertEqual(log_index, len(logs), logs)
Ejemplo n.º 4
0
    def test_fraction_complete(self):
        data = DataArray(shape=(5, 10))
        self.assertIsNone(data.ndarray)
        self.assertEqual(data.fraction_complete(), 0.0)

        data.init_data()
        self.assertEqual(data.fraction_complete(), 0.0)

        # index = 1 * 10 + 7 - add 1 (for index 0) and you get 18
        # each index is 2% of the total, so this is 36%
        data[1, 7] = 1
        self.assertEqual(data.fraction_complete(), 18 / 50)

        # add a last_saved_index but modified_range is still bigger
        data.mark_saved(13)
        self.assertEqual(data.fraction_complete(), 18 / 50)

        # now last_saved_index wins
        data.mark_saved(19)
        self.assertEqual(data.fraction_complete(), 20 / 50)

        # now pretend we get more info from syncing
        data.synced_index = 22
        self.assertEqual(data.fraction_complete(), 23 / 50)
Ejemplo n.º 5
0
    def test_preset_data(self):
        onetwothree = [
            # lists and tuples work
            [1.0, 2.0, 3.0],
            (1.0, 2.0, 3.0),

            # iterators get automatically cast to floats
            (i + 1 for i in range(3)),
            map(float, range(1, 4)),

            # and of course numpy arrays themselves work
            np.array([1.0, 2.0, 3.0]),
        ]

        expected123 = [1.0, 2.0, 3.0]

        for item in onetwothree:
            data = DataArray(preset_data=item)
            self.assertEqual(data.ndarray.tolist(), expected123)
            self.assertEqual(data.shape, (3, ))

        # you can re-initialize a DataArray with the same shape data,
        # but not with a different shape
        list456 = [4, 5, 6]
        data.init_data(data=list456)
        self.assertEqual(data.ndarray.tolist(), list456)
        with self.assertRaises(ValueError):
            data.init_data([1, 2])
        self.assertEqual(data.ndarray.tolist(), list456)
        self.assertEqual(data.shape, (3, ))

        # you can call init_data again with no data, and nothing changes
        data.init_data()
        self.assertEqual(data.ndarray.tolist(), list456)
        self.assertEqual(data.shape, (3, ))

        # multidimensional works too
        list2d = [[1, 2], [3, 4]]
        data2 = DataArray(preset_data=list2d)
        self.assertEqual(data2.ndarray.tolist(), list2d)
        self.assertEqual(data2.shape, (2, 2))
Ejemplo n.º 6
0
    def read_one_file(self, data_set, f, ids_read):
        # I have written more flexible code in my life
        if len(data_set.arrays) != 0:
            return
        line, data_set.header = self._read_header(f, "# ")
        # interpret metadata (header)

        set_columns = []
        set_arrays = ()
        data_columns = []
        data_arrays = []
        c = [i + 1 for i in range(len(line.split('\t')))]
        print(c)
        for i in c:
            if 'Column ' + str(i) not in data_set.header:
                raise KeyError('File header does not match nr of columns '
                               '(Column' + str(i) + ')')

            info = data_set.header['Column ' + str(i)]
            if info['type'] == 'coordinate':
                set_columns.append(['Column ' + str(i), i - 1])
            elif info['type'] == 'value':
                data_columns.append(['Column ' + str(i), i - 1])

        set_columns.reverse()
        shape = ()
        for n in set_columns:
            size = data_set.header[n[0]]['size']
            shape += (int(size), )
            name = data_set.header[n[0]]['name']
            set_array = DataArray(label=name,
                                  array_id="DataArray" + str(n[1]),
                                  set_arrays=set_arrays,
                                  shape=shape,
                                  is_setpoint=True,
                                  snapshot=None)
            set_array.init_data()
            data_set.add_array(set_array)
            set_arrays = set_arrays + (set_array, )

        for n in data_columns:
            name = data_set.header[n[0]]['name']
            data_array = DataArray(label=name,
                                   array_id="DataArray" + str(n[1]),
                                   set_arrays=set_arrays,
                                   shape=shape,
                                   snapshot=None)
            data_array.init_data()
            data_set.add_array(data_array)
            data_arrays.append(data_array)

        npoints = 1
        print(shape)
        for n in shape:
            npoints = npoints * n
        print(npoints)

        indices = [0] * len(set_arrays)

        elements = line.split('\t')
        set_values = []
        for i in set_columns:
            set_values.append(float(elements[i[1]]))
        p = 0
        watchdog = 0
        while True:
            # print(indices)
            # handle all elements in this line
            if len(elements) != len(data_set.arrays):
                raise Exception("Wrong number of columns in data row " +
                                str(p))

            # Check hypercubic consistency
            set_value_check = []
            for i in set_columns:
                set_value_check.append(float(elements[i[1]]))

            for a, b in zip(set_value_check, set_values):
                if a != b:
                    raise Exception("data is not hypercubic")

            # add data points to the dataset
            for c in data_columns:
                i = tuple(indices)
                data_set.arrays['DataArray' + str(c[1])][i] = float(
                    elements[c[1]])

            # add set_points to the dataset

            for j, c in enumerate(set_columns):
                i = tuple(indices)[0:j + 1]
                data_set.arrays['DataArray' + str(c[1])][i] = float(
                    elements[c[1]])

            p += 1
            if p >= npoints:
                break

            line = ""
            watchdog = 0
            while line is "" and watchdog < 100:
                line = f.readline()
                line = line.rstrip('\n\t\r')
                watchdog += 1
            if watchdog >= 100:
                break

            elements = line.split('\t')
            # update indices en set new set_values to check hypercubicity
            pointer = len(indices) - 1

            indices[pointer] += 1
            # new set value and update it to dataset
            val = float(elements[set_columns[pointer][1]])
            set_values[pointer] = val

            while pointer >= 0 and indices[pointer] >= shape[pointer]:
                # reset and decrease pointer
                indices[pointer] = 0
                pointer -= 1

                indices[pointer] += 1
                # new set value and update it to dataset
                val = float(elements[set_columns[pointer][1]])
                set_values[pointer] = val

        if watchdog >= 100:
            print("unexpected EOF")
            print(p)
            print(npoints)

        line = f.readline()
        line = line.rstrip('\n\t\r')
        if line is not "":
            raise Exception("File is supposed to be empty, but nonempty line "
                            "found (" + line + ")")
Ejemplo n.º 7
0
    def _create_data_array(
        self,
        action_indices: Tuple[int],
        result,
        parameter: Parameter = None,
        is_setpoint: bool = False,
        name: str = None,
        label: str = None,
        unit: str = None,
    ):
        """Create a data array from a parameter and result.

        The data array shape is extracted from the result shape, and the current
        loop dimensions.

        The data array is added to the current data set.

        Args:
            parameter: Parameter for which to create a DataArray. Can also be a
                string, in which case it is the data_array name
            result: Result returned by the Parameter
            action_indices: Action indices for which to store parameter
            is_setpoint: Whether the Parameter is used for sweeping or measuring
            label: Data array label. If not provided, the parameter label is
                used. If the parameter is a name string, the label is extracted
                from the name.
            unit: Data array unit. If not provided, the parameter unit is used.

        Returns:
            Newly created data array

        """
        if parameter is None and name is None:
            raise SyntaxError(
                "When creating a data array, must provide either a parameter or a name"
            )

        if len(running_measurement().data_arrays) >= self.max_arrays:
            raise RuntimeError(
                f"Number of arrays in dataset exceeds "
                f"Measurement.max_arrays={self.max_arrays}. Perhaps you forgot"
                f"to encapsulate a loop with a Sweep()?"
            )

        array_kwargs = {
            "is_setpoint": is_setpoint,
            "action_indices": action_indices,
            "shape": self.loop_shape,
        }

        if is_setpoint or isinstance(result, (np.ndarray, list)):
            array_kwargs["shape"] += np.shape(result)

        # Use dummy index (1, ) if measurement is performed outside a Sweep
        if not array_kwargs["shape"]:
            array_kwargs["shape"] = (1,)

        if isinstance(parameter, Parameter):
            array_kwargs["parameter"] = parameter
            # Add a custom name
            if name is not None:
                array_kwargs["full_name"] = name
            if label is not None:
                array_kwargs["label"] = label
            if unit is not None:
                array_kwargs["unit"] = unit
        else:
            array_kwargs["name"] = name
            if label is None:
                label = name[0].capitalize() + name[1:].replace("_", " ")
            array_kwargs["label"] = label
            array_kwargs["unit"] = unit or ""

        # Add setpoint arrays
        if not is_setpoint:
            array_kwargs["set_arrays"] = self._add_set_arrays(
                action_indices, result, parameter=parameter, name=(name or parameter.name)
            )

        data_array = DataArray(**array_kwargs)

        data_array.array_id = data_array.full_name
        data_array.array_id += "_" + "_".join(str(k) for k in action_indices)

        data_array.init_data()

        self.dataset.add_array(data_array)
        with self.timings.record(['dataset', 'save_metadata']):
            self.dataset.save_metadata()

        # Add array to set_arrays or to data_arrays of this Measurement
        if is_setpoint:
            self.set_arrays[action_indices] = data_array
        else:
            self.data_arrays[action_indices] = data_array

        return data_array