Example #1
0
    def __call__(self):
        """ generator, each one should be a row index number and corresponding dict content, the dict keys
        which may be table headers or alias you give.
        """
        keys = self.headers
        sheet = self.sheet
        start_col = self.start_col
        content_bak = {}
        if self.merge_cell:
            for row_index in _range(self.start_row+1, sheet.nrows):
                content = _RowProcess(sheet, keys, start_col)(row_index)
                if not content:
                    continue

                # merge cell use the first cell value
                for key in keys:
                    if content[key]:
                        pass
                    else:
                        content[key] = content_bak.get(key, '')

                content_bak = content.copy()
                yield row_index+1, content
        else:
            for row_index in _range(self.start_row+1, sheet.nrows):
                content = _RowProcess(sheet, keys, start_col)(row_index)
                yield row_index+1, content
Example #2
0
    def __call__(self):
        """ generator, each one should be a row index number and corresponding dict content, the dict keys
        which may be table headers or alias you give.
        """
        keys = self.headers
        sheet = self.sheet
        start_col = self.start_col
        content_bak = {}
        if self.merge_cell:
            for row_index in _range(self.start_row + 1, sheet.nrows):
                content = _RowProcess(sheet, keys, start_col)(row_index)
                if not content:
                    continue

                # merge cell use the first cell value
                for key in keys:
                    if content[key]:
                        pass
                    else:
                        content[key] = content_bak.get(key, '')

                content_bak = content.copy()
                yield row_index + 1, content
        else:
            for row_index in _range(self.start_row + 1, sheet.nrows):
                content = _RowProcess(sheet, keys, start_col)(row_index)
                yield row_index + 1, content
Example #3
0
    def _get_sheets_by_index(self, path, index_sheets, merge_cell):
        """ index sheets means only process index in index_sheets,
        :param path:
        :param index_sheets: it's a dict value, key is the sheet index, value is the header alias
        :param merge_cell:
        :return:
        """
        all_sheets = get_sheets(path)
        sheets = {}
        try:
            for index in index_sheets:
                sheets[int(index)] = index_sheets[index]
        except ValueError:
            raise ValueError('sheet index should be a int value')
        all_index_set = set([i for i in _range(0, len(all_sheets))])
        index_set = all_index_set - set(sheets.keys())
        if set(sheets.keys()) <= all_index_set:
            pass
        else:
            raise ValueError('sheet index: {} not exist'.format(index_set))
        self.sheets = {
            i: _SheetProcess(all_sheets[i],
                             alias=sheets[i],
                             merge_cell=merge_cell)
            for i in sheets
        }

        if self.patch_sheet:
            if index_set:
                self.sheets.update({
                    i: _SheetProcess(all_sheets[i], merge_cell=merge_cell)
                    for i in index_set
                })
Example #4
0
    def _get_sheets_by_index(self, path, index_sheets, merge_cell):
        """ index sheets means only process index in index_sheets,
        :param path:
        :param index_sheets: it's a dict value, key is the sheet index, value is the header alias
        :param merge_cell:
        :return:
        """
        all_sheets = get_sheets(path)
        sheets = {}
        try:
            for index in index_sheets:
                sheets[int(index)] = index_sheets[index]
        except ValueError:
            raise ValueError('sheet index should be a int value')
        all_index_set = set([i for i in _range(0, len(all_sheets))])
        index_set = all_index_set - set(sheets.keys())
        if set(sheets.keys()) <= all_index_set:
            pass
        else:
            raise ValueError('sheet index: {} not exist'.format(index_set))
        self.sheets = {i: _SheetProcess(all_sheets[i], alias=sheets[i], merge_cell=merge_cell) for i in sheets}

        if self.patch_sheet:
            if index_set:
                self.sheets.update({i: _SheetProcess(all_sheets[i], merge_cell=merge_cell) for i in index_set})
Example #5
0
 def _fetch_start_row(self):
     """find start row which should be a table header
     :return:
     """
     self.start_row = self.MAX+1
     try:
         for i in _range(self.MAX):
             row = self.sheet.row(i)
             for j in _range(len(row)):
                 if row[j].value.strip():
                     self.start_row = i
                     return
     except IndexError:
         raise ValueError('exist empty sheet, please check')
     if self.start_row >= self.MAX+1:
         raise ValueError('scan {} rows but not find the content header'.format(self.MAX))
Example #6
0
def solve_ode(t0, t1, h, y0, f, queue, wait_for=None):
    ctx = queue.context
    dev = queue.device
    y_type = y0.dtype
    weight_type = (np.float64 if y_type in (np.float64, np.complex128)
                   else np.float32)
    nsteps = int((t1 - t0) / h)
    # Arrays for results in each steps
    ys = [cl_array.Array(queue, y0.shape, y_type, strides=y0.strides)
          for i in _range(nsteps + 1)]
    ks = [cl_array.Array(queue, y0.shape, y_type, strides=y0.strides)
          for i in _range(4)]
    tmp_y = cl_array.Array(queue, y0.shape, y_type, strides=y0.strides)
    total_size = ys[0].size
    # initialize
    prev_evt = cl.enqueue_copy(queue, ys[0].base_data, y0,
                               device_offset=ys[0].offset, is_blocking=False,
                               wait_for=wait_for)
    h_8 = h / 8.
    h3_8 = h * 3 / 8.
    h_3 = h / 3.
    h2_3 = h * 2 / 3.
    comb_knls = [lin_comb_diff_kernel(ctx, y_type, y_type, y_type, weight_type,
                                      i, name='ode_lin_diff_%d' % i)
                 for i in _range(1, 5)]
    g_size, l_size = get_group_sizes(total_size, dev, comb_knls[0])

    def _run_comb_knls(l, wait_for, *args):
        return _run_elwise(comb_knls[l], queue, g_size, l_size,
                           total_size, wait_for, *args)

    for i in _range(nsteps):
        prev_y = ys[i]
        next_y = ys[i + 1]
        tn = t0 + i * h
        prev_evt = f(tn, prev_y, ks[0], wait_for=[prev_evt])
        prev_evt = _run_comb_knls(0, [prev_evt], tmp_y, prev_y, ks[0], h_3)
        prev_evt = f(tn + h_3, tmp_y, ks[1], wait_for=[prev_evt])
        prev_evt = _run_comb_knls(1, [prev_evt], tmp_y, prev_y, ks[0], ks[1],
                                  -h_3, h)
        prev_evt = f(tn + h2_3, tmp_y, ks[2], wait_for=[prev_evt])
        prev_evt = _run_comb_knls(2, [prev_evt], tmp_y, prev_y, ks[0], ks[1],
                                  ks[2], h, -h, h)
        prev_evt = f(tn + h, tmp_y, ks[3], wait_for=[prev_evt])
        prev_evt = _run_comb_knls(3, [prev_evt], next_y, prev_y, ks[0], ks[1],
                                  ks[2], ks[3], h_8, h3_8, h3_8, h_8)
    return ys, prev_evt
Example #7
0
def lin_comb_kernel(ctx, res_type, ary_type, weight_type, length, name=None):
    res_type = np.dtype(res_type)
    ary_type = np.dtype(ary_type)
    weight_type = np.dtype(weight_type)

    res_fmt, mul_fmt, _ = _get_lin_comb_expr_fmts(ary_type, weight_type,
                                                  res_type)
    mul_fmt = mul_fmt % ('ary%d[i]', 'weight%d')
    expr = ' + '.join((mul_fmt % (i, i)) for i in _range(length))
    expr = res_fmt % expr

    name = name or 'lin_comb_kernel'
    return cl_elwise.get_elwise_kernel(
        ctx, [VectorArg(res_type, 'res', with_offset=True)] +
        [ConstArg(ary_type, 'ary%d' % i) for i in _range(length)] +
        [ScalarArg(weight_type, 'weight%d' % i) for i in _range(length)],
        'res[i] = ' + expr, name=name, auto_preamble=True)
Example #8
0
 def _fetch_start_row(self):
     """find start row which should be a table header
     :return:
     """
     self.start_row = self.MAX + 1
     try:
         for i in _range(self.MAX):
             row = self.sheet.row(i)
             for j in _range(len(row)):
                 if row[j].value.strip():
                     self.start_row = i
                     return
     except IndexError:
         raise ValueError('exist empty sheet, please check')
     if self.start_row >= self.MAX + 1:
         raise ValueError(
             'scan {} rows but not find the content header'.format(
                 self.MAX))
Example #9
0
def test_reprs():
    list_type = containers.List(primitives.Integer)
    obj = list_type.from_python_std(list(_range(3)))
    assert obj.short_string(
    ) == "List<Integer>(len=3, [Integer(0), Integer(1), Integer(2)])"
    assert obj.verbose_string() == \
        "List<Integer>(\n" \
        "\tlen=3,\n" \
        "\t[\n" \
        "\t\tInteger(0),\n" \
        "\t\tInteger(1),\n" \
        "\t\tInteger(2)\n" \
        "\t]\n" \
        ")"

    nested_list_type = containers.List(containers.List(primitives.Integer))
    nested_obj = nested_list_type.from_python_std(
        [list(_range(3)), list(_range(3))])

    assert nested_obj.short_string() == \
        "List<List<Integer>>(len=2, [List<Integer>(len=3, [Integer(0), Integer(1), Integer(2)]), " \
        "List<Integer>(len=3, [Integer(0), Integer(1), Integer(2)])])"
    assert nested_obj.verbose_string() == \
        "List<List<Integer>>(\n" \
        "\tlen=2,\n" \
        "\t[\n" \
        "\t\tList<Integer>(\n" \
        "\t\t\tlen=3,\n" \
        "\t\t\t[\n" \
        "\t\t\t\tInteger(0),\n" \
        "\t\t\t\tInteger(1),\n" \
        "\t\t\t\tInteger(2)\n" \
        "\t\t\t]\n" \
        "\t\t),\n" \
        "\t\tList<Integer>(\n" \
        "\t\t\tlen=3,\n" \
        "\t\t\t[\n" \
        "\t\t\t\tInteger(0),\n" \
        "\t\t\t\tInteger(1),\n" \
        "\t\t\t\tInteger(2)\n" \
        "\t\t\t]\n" \
        "\t\t)\n" \
        "\t]\n" \
        ")"
Example #10
0
def chunks(arr, size):
    """Splits a list into chunks

    :param arr: list to split
    :type arr: :class:`list`
    :param size: number of elements in each chunk
    :type size: :class:`int`
    :return: generator object
    :rtype: :class:`generator`
    """
    for i in _range(0, len(arr), size):
        yield arr[i:i+size]
Example #11
0
def chunks(arr, size):
    """Splits a list into chunks

    :param arr: list to split
    :type arr: :class:`list`
    :param size: number of elements in each chunk
    :type size: :class:`int`
    :return: generator object
    :rtype: :class:`generator`
    """
    for i in _range(0, len(arr), size):
        yield arr[i:i + size]
Example #12
0
 def _header_start_col(self):
     """find the header row corresponding column index
     """
     self.start_col = self.MAX+1
     try:
         for i in _range(self.MAX):
             if self.sheet.row(self.header_index)[i].value.strip():
                 self.start_col = i
                 break
     except IndexError:
         raise ValueError('header_index: {} row is an empty row'.format(self.header_index))
     if self.start_col >= self.MAX + 1:
         raise ValueError('scan {} columns with row {}, but not found header'.format(self.MAX),
                          self.header_index)
Example #13
0
 def _header_start_col(self):
     """find the header row corresponding column index
     """
     self.start_col = self.MAX + 1
     try:
         for i in _range(self.MAX):
             if self.sheet.row(self.header_index)[i].value.strip():
                 self.start_col = i
                 break
     except IndexError:
         raise ValueError('header_index: {} row is an empty row'.format(
             self.header_index))
     if self.start_col >= self.MAX + 1:
         raise ValueError(
             'scan {} columns with row {}, but not found header'.format(
                 self.MAX), self.header_index)
Example #14
0
    def __call__(self):
        """ get json keys
        :return: header start column, json keys
        """
        col_list = []
        row = self.sheet.row(self.header_index)
        row_length = len(row)
        for i in _range(self.start_col, row_length):
            key = row[i].value.strip()
            if key:
                alias_key = self.alias.pop(key, None) or key
                col_list.append(alias_key)
            else:
                raise ValueError('header should not have empty cell')
        if self.alias:
            raise ValueError('header alias {} not invalid'.format(self.alias))

        if len(set(col_list)) != row_length - self.start_col:
            raise ValueError('header duplicate')
        return self.start_col, col_list
Example #15
0
    def __call__(self):
        """ get json keys
        :return: header start column, json keys
        """
        col_list = []
        row = self.sheet.row(self.header_index)
        row_length = len(row)
        for i in _range(self.start_col, row_length):
            key = row[i].value.strip()
            if key:
                alias_key = self.alias.pop(key, None) or key
                col_list.append(alias_key)
            else:
                raise ValueError('header should not have empty cell')
        if self.alias:
            raise ValueError('header alias {} not invalid'.format(self.alias))

        if len(set(col_list)) != row_length - self.start_col:
            raise ValueError('header duplicate')
        return self.start_col, col_list
Example #16
0
    def run(self, t0, t1, h, y0, queue, extra_args=(), wait_for=None):
        # TODO?
        # check y0 type?
        nsteps = int((t1 - t0) / h)
        # Arrays for results in each steps
        y_type = self.__y_type
        ys = [cl_array.Array(queue, y0.shape, y_type, strides=y0.strides)
              for i in _range(nsteps + 1)]
        ks = [cl_array.Array(queue, y0.shape, y_type, strides=y0.strides)
              for i in _range(3)]
        tmp_y1 = cl_array.Array(queue, y0.shape, y_type, strides=y0.strides)
        tmp_y2 = cl_array.Array(queue, y0.shape, y_type, strides=y0.strides)
        total_size = ys[0].size
        # initialize
        prev_evt = cl.enqueue_copy(queue, ys[0].base_data, y0,
                                   device_offset=ys[0].offset,
                                   is_blocking=False, wait_for=wait_for)
        it1_knl = self.__prog.pyscical_ode_solver_iter1
        it2_knl = self.__prog.pyscical_ode_solver_iter2
        it3_knl = self.__prog.pyscical_ode_solver_iter3
        it4_knl = self.__prog.pyscical_ode_solver_iter4
        if self.__has_post:
            post_knl = self.__prog.pyscical_ode_solver_post
        g_size, l_size = get_group_sizes(total_size, self.__dev, it1_knl)

        t_type = self.__t_type.type
        it1_knl.set_args(t_type(t0), t_type(h), ys[0].base_data,
                         ks[0].base_data, tmp_y1.base_data,
                         np.int64(total_size), *extra_args)
        it2_knl.set_args(t_type(t0), t_type(h), tmp_y1.base_data,
                         ks[0].base_data, ks[1].base_data,
                         tmp_y2.base_data, np.int64(total_size), *extra_args)
        it3_knl.set_args(t_type(t0), t_type(h), tmp_y2.base_data,
                         ks[0].base_data, ks[1].base_data, ks[2].base_data,
                         tmp_y1.base_data, np.int64(total_size), *extra_args)
        it4_knl.set_args(t_type(t0), t_type(h), tmp_y1.base_data,
                         ks[0].base_data, ks[1].base_data, ks[2].base_data,
                         ys[1].base_data, np.int64(total_size), *extra_args)
        if self.__has_post:
            post_knl.set_args(t_type(t0), t_type(h), ys[1].base_data,
                              np.int64(total_size), *extra_args)

        for i in _range(nsteps):
            t = t_type(i * h + t0)
            it1_knl.set_arg(0, t)
            it1_knl.set_arg(2, ys[i].base_data)
            prev_evt = cl.enqueue_nd_range_kernel(queue, it1_knl, g_size,
                                                  l_size, None, [prev_evt])

            it2_knl.set_arg(0, t)
            prev_evt = cl.enqueue_nd_range_kernel(queue, it2_knl, g_size,
                                                  l_size, None, [prev_evt])

            it3_knl.set_arg(0, t)
            prev_evt = cl.enqueue_nd_range_kernel(queue, it3_knl, g_size,
                                                  l_size, None, [prev_evt])

            it4_knl.set_arg(0, t)
            it4_knl.set_arg(6, ys[i + 1].base_data)
            prev_evt = cl.enqueue_nd_range_kernel(queue, it4_knl, g_size,
                                                  l_size, None, [prev_evt])
            if self.__has_post:
                post_knl.set_arg(0, t)
                post_knl.set_arg(2, ys[i + 1].base_data)
                prev_evt = cl.enqueue_nd_range_kernel(queue, post_knl, g_size,
                                                      l_size, None, [prev_evt])

        return ys, prev_evt
Example #17
0
def evolve_sideband(ctx, queue, gamma_x, gamma_y, gamma_z, pump_branch,
                    omegas_x, omegas_y, omegas_z, h_t, gamma_total,
                    delta_xyz, omega_xyz, p_b, p_a=None, p_c=None):
    dim_x, d = gamma_x.shape
    if dim_x != d:
        raise ValueError("gamma_x is not a square matrix.")
    if gamma_x.dtype != np.float32:
        raise TypeError("The type of gamma_x should be float32.")

    dim_y, d = gamma_y.shape
    if dim_y != d:
        raise ValueError("gamma_y is not a square matrix.")
    if gamma_y.dtype != np.float32:
        raise TypeError("The type of gamma_y should be float32.")

    dim_z, d = gamma_z.shape
    if dim_z != d:
        raise ValueError("gamma_z is not a square matrix.")
    if gamma_z.dtype != np.float32:
        raise TypeError("The type of gamma_z should be float32.")

    total_dim = dim_x * dim_y * dim_z

    mf = cl.mem_flags
    events = []

    gamma_xyz = cl.Buffer(ctx, mf.READ_ONLY,
                          (dim_x**2 + dim_y**2 + dim_z**2) * 4)
    events.append(cl.enqueue_copy(queue, gamma_xyz, gamma_x,
                                  device_offset=0, is_blocking=False))
    events.append(cl.enqueue_copy(queue, gamma_xyz, gamma_y,
                                  device_offset=dim_x**2 * 4,
                                  is_blocking=False))
    events.append(cl.enqueue_copy(queue, gamma_xyz, gamma_z,
                                  device_offset=(dim_x**2 + dim_y**2) * 4,
                                  is_blocking=False))

    gidx_minmax_xyz = cl.Buffer(ctx, mf.READ_ONLY, (dim_x + dim_y + dim_z) * 8)
    is_cpu = queue.device.type == cl.device_type.CPU
    events.append(cl.enqueue_copy(queue, gidx_minmax_xyz,
                                  _get_gidx_minmax_xyz(dim_x, dim_y, dim_z,
                                                       gamma_x, gamma_y,
                                                       gamma_z,
                                                       align=not is_cpu),
                                  device_offset=0, is_blocking=False))

    if pump_branch.dtype != np.float32:
        raise TypeError("The type of pump_branch should be float32.")
    pump_branch_gpu = cl.Buffer(ctx, mf.READ_ONLY, 36)
    events.append(cl.enqueue_copy(queue, pump_branch_gpu, pump_branch,
                                  device_offset=0, is_blocking=False))

    num_omg_x, d = omegas_x.shape
    if dim_x != d:
        raise ValueError("The second dimension of omegas_x is not "
                         "the same with dim_x.")
    if omegas_x.dtype != np.float32:
        raise TypeError("The type of omegas_x should be float32.")

    num_omg_y, d = omegas_y.shape
    if dim_y != d:
        raise ValueError("The second dimension of omegas_y is not "
                         "the same with dim_y.")
    if omegas_y.dtype != np.float32:
        raise TypeError("The type of omegas_y should be float32.")

    num_omg_z, d = omegas_z.shape
    if dim_z != d:
        raise ValueError("The second dimension of omegas_z is not "
                         "the same with dim_z.")
    if omegas_z.dtype != np.float32:
        raise TypeError("The type of omegas_z should be float32.")

    omegas_gpu = cl.Buffer(ctx, mf.READ_ONLY,
                           (num_omg_x * dim_x + num_omg_y * dim_y +
                            num_omg_z * dim_z) * 4)
    events.append(cl.enqueue_copy(queue, omegas_gpu, omegas_x,
                                  device_offset=0, is_blocking=False))
    events.append(cl.enqueue_copy(queue, omegas_gpu, omegas_y,
                                  device_offset=num_omg_x * dim_x * 4,
                                  is_blocking=False))
    events.append(cl.enqueue_copy(queue, omegas_gpu, omegas_z,
                                  device_offset=(num_omg_x * dim_x +
                                                 num_omg_y * dim_y) * 4,
                                  is_blocking=False))

    h_t = np.float32(h_t)
    seq_len, d = gamma_total.shape
    t_len = (seq_len - 1) * h_t
    if d != 3:
        raise TypeError("Second dimension of gamma_total should be 3.")
    if gamma_total.dtype != np.float32:
        raise TypeError("The type of gamma_total should be float32.")
    gamma_total_gpu = cl.Buffer(ctx, mf.READ_ONLY, seq_len * 12)
    events.append(cl.enqueue_copy(queue, gamma_total_gpu, gamma_total,
                                  device_offset=0, is_blocking=False))

    d1, d2 = delta_xyz.shape
    if d1 != 3 or d2 != seq_len:
        raise TypeError("Dimensions of delta_xyz should be (3, seq_len).")
    if delta_xyz.dtype != np.uint32:
        raise TypeError("The type of delta_xyz should be uint32.")
    delta_xyz_gpu = cl.Buffer(ctx, mf.READ_ONLY, seq_len * 12)
    events.append(cl.enqueue_copy(queue, delta_xyz_gpu, delta_xyz,
                                  device_offset=0, is_blocking=False))

    d1, d2 = omega_xyz.shape
    if d1 != 3 or d2 != seq_len:
        raise TypeError("Dimensions of omega_xyz should be (3, seq_len).")
    if omega_xyz.dtype != np.uint32:
        raise TypeError("The type of omega_xyz should be uint32.")
    omega_xyz_offset = np.empty(seq_len * 3, np.uint32)
    for i in _range(seq_len):
        _omega_x = omega_xyz[0, i]
        if _omega_x >= num_omg_x:
            raise IndexError("omega_x index too larger")
        omega_xyz_offset[i] = _omega_x * dim_x
        _omega_y = omega_xyz[1, i]
        if _omega_y >= num_omg_y:
            raise IndexError("omega_y index too larger")
        omega_xyz_offset[seq_len + i] = _omega_y * dim_y + num_omg_x * dim_x
        _omega_z = omega_xyz[2, i]
        if _omega_z >= num_omg_z:
            raise IndexError("omega_z index too larger")
        omega_xyz_offset[seq_len * 2 + i] = (_omega_z * dim_z +
                                             num_omg_x * dim_x +
                                             num_omg_y * dim_y)
    omega_xyz_offset_gpu = cl.Buffer(ctx, mf.READ_ONLY, seq_len * 12)
    events.append(cl.enqueue_copy(queue, omega_xyz_offset_gpu, omega_xyz_offset,
                                  device_offset=0, is_blocking=False))


    dev = queue.device
    src = """
    #include <sideband.cl>
    """
    extra_args = (CLArg('dim_x', 'unsigned'),
                  CLArg('dim_y', 'unsigned'),
                  CLArg('dim_z', 'unsigned'),
                  CLArg('gamma_xyz', 'gcfloat_p'),
                  CLArg('gidx_minman_xyz', 'gcuint_p'),
                  CLArg('pump_branch', 'gcfloat_p'),
                  CLArg('omegas', 'gcfloat_p'),
                  CLArg('h_t', 'float'),
                  CLArg('seq_len', 'unsigned'),
                  CLArg('gamma_total', 'gcfloat_p'),
                  CLArg('delta_xyz', 'gcuint_p'),
                  CLArg('omega_xyz_offset', 'gcuint_p'))
    solver = ElwiseOdeSolver(ctx, dev, src, "calc_sbcooling_diff",
                             extra_args=extra_args,
                             options=['-I',
                                      _path.dirname(_path.abspath(__file__))],
                             post_func='calc_sbcooling_post')
    seq_len = np.uint32(seq_len)

    y0 = np.zeros(total_dim * 4, np.float32)

    if p_a is not None:
        if p_a.shape != (dim_x, dim_y, dim_z):
            raise ValueError("Initial value of p_a has wrong shape.")
        y0[:total_dim] = p_a.flatten()
    if p_b.shape != (dim_x, dim_y, dim_z):
        raise ValueError("Initial value of p_b has wrong shape.")
    y0[total_dim:total_dim * 2] = p_b.flatten()
    if p_c is not None:
        if p_c.shape != (dim_x, dim_y, dim_z):
            raise ValueError("Initial value of p_c has wrong shape.")
        y0[total_dim * 2:total_dim * 3] = p_c.flatten()

    extra_args_vals = (np.uint32(dim_x), np.uint32(dim_y), np.uint32(dim_z),
                       gamma_xyz, gidx_minmax_xyz, pump_branch_gpu, omegas_gpu,
                       h_t, seq_len, gamma_total_gpu, delta_xyz_gpu,
                       omega_xyz_offset_gpu)
    return solver.run_no_process(0, t_len, h_t, y0, queue,
                                 extra_args=extra_args_vals)