Exemple #1
0
def normalise_callback(data,
                       normalise_func=normalise,
                       use_imagenet_stats=False):

    if use_imagenet_stats:
        m, s = imagenet_mean, imagenet_std
    else:
        x, _ = next(iter(data.train_dl))
        m, s = extract_mean_std(
            x[0]) if normalise_func == cap_normalise else extract_mean_std(x)
    norm = partial(normalise_func, mean=m.cuda(), std=s.cuda())
    return partial(BatchTransformCallback, norm)
Exemple #2
0
    def update_clusters_cpu(self, timeout=2 * 60):
        cpu_family = self.get_ovirt_cpu_family()
        api = self.engine_vm().get_api_v4(check=True)
        clusters_service = api.system_service().clusters_service()
        clusters = clusters_service.list()

        if clusters is None:
            LOGGER.debug('no clusters found: skipping')
            return

        for cluster in clusters:
            if cluster.cpu.type == cpu_family:
                continue
            LOGGER.debug(
                ('found CPU cluster mismatch, current: {0}, required: '
                 '{1}').format(cluster.cpu.type, cpu_family))

            cluster_service = clusters_service.cluster_service(cluster.id)
            cluster_service.update(
                otypes.Cluster(cpu=otypes.Cpu(type=cpu_family)))

            def _assert_cluster_cpu(cluster):
                cluster = clusters_service.cluster_service(cluster.id).get()
                return cluster.cpu.type == cpu_family

            testlib.assert_true_within(partial(_assert_cluster_cpu, cluster),
                                       timeout=timeout)
            LOGGER.debug(('successfuly changed cluster id {0} to cpu family: '
                          '{1}').format(cluster.id, cpu_family))
Exemple #3
0
def get_cap_dls(labelled_list, train_bs, valid_bs,collate_fn = None, shuffle = True, num_workers = 0,flip = False):
    if valid_bs is None:
        valid_bs = train_bs
    if collate_fn is not None: 
        collate_fn = partial(collate_fn, pad_idx = labelled_list.train.py.tokenizer.pad_token_id, flip = flip)
    return (DataLoader(labelled_list.train, batch_size= train_bs, shuffle=shuffle, collate_fn=collate_fn, num_workers=num_workers),
              DataLoader(labelled_list.valid, batch_size= valid_bs, shuffle=shuffle, collate_fn=collate_fn, num_workers=num_workers))
Exemple #4
0
    def assert_vdsm_alive(self, timeout=2 * 60):
        """
        Assert service 'vdsmd' reports running on all vdsm hosts

        Args:
            timeout(int): timeout

        Returns:
            None

        Raises:
            AssertionError: if vdsmd is not reported running after the
                given timeout, or ssh is unreachable.
        """
        def _vdsm_up(host):
            status = host.service('vdsmd').alive()
            LOGGER.debug('vdsm status: %s', status)
            return status

        for host in self.host_vms():
            testlib.assert_true_within(
                partial(_vdsm_up, host),
                timeout=timeout,
                allowed_exceptions=self._get_check_running_allowed_exceptions(
                ),
            )
Exemple #5
0
    def start_all_hosts(self, timeout=5 * 60):
        api = self.get_api_v4(check=True)
        hosts_service = api.system_service().hosts_service()
        hosts = hosts_service.list(search='status=maintenance')
        if hosts:

            def _host_is_up(host):
                h_service = hosts_service.host_service(host.id)
                host_obj = h_service.get()
                if host_obj.status == otypes.HostStatus.UP:
                    return True

                if host_obj.status == otypes.HostStatus.NON_OPERATIONAL:
                    raise RuntimeError('Host %s is in non operational state' %
                                       host.name)
                elif host_obj.status == otypes.HostStatus.INSTALL_FAILED:
                    raise RuntimeError('Host %s installation failed' %
                                       host.name)

            for host in hosts:
                host_service = hosts_service.host_service(host.id)
                host_service.activate()

            for host in hosts:
                testlib.assert_true_within(partial(_host_is_up, host),
                                           timeout=timeout)
Exemple #6
0
    def parse(self):
        """ Parses the code
        """
        def is_fn(x):
            return x.startswith("fn_")
        
        fns=filter(is_fn, self.__dict__)
        self.fns=map(lambda x:x[3:], fns)
        
        def assign_line_nbr(line, linenbr):
            return (linenbr, line)
        
        def tokenize(x):
            try:
                code, value=versa_int(x) #maybe scalar
                assert(code==True)
                return (code, value)
            except AssertionError:
                if x in self.fns:
                    return ("function", x)
            return ("unknown", x)
        
        def res((_, tokens)):
            return (_, map(tokenize, tokens))
        
        ## we want non-empty lines
        _=self.code.split("\n")
        _=filter(not_empty, _)
        _lines=map(assign_line_nbr, _, range(0, len(_)))  ## (linenbr, line_data)

        ## since the separator is the space,
        ## we want to make sure that coma separated constructs
        ## are treated as correctly:  compress ', ' to ','
        f=compose([partial(freplace, ", ", ","), partial(fsplit, " ")])
        _sts=map(f, _lines)

        self.sts=filter(f_not_empty, _sts)
        
        self.xsts=map(res, self.sts)
                
        return self
Exemple #7
0
    def check_sds_status(self, status=None, timeout=5 * 60):
        # the default status cannot be used in the function header, because
        # the v4 sdk might not be available.
        if status is None:
            status = otypes.StorageDomainStatus.ACTIVE
        api = self.get_api_v4(check=True)
        dcs_service = api.system_service().data_centers_service()
        for dc in dcs_service.list():

            def _sds_state(dc_id):
                dc_service = dcs_service.data_center_service(dc_id)
                sds = dc_service.storage_domains_service()
                return all(sd.status == status for sd in sds.list())

            testlib.assert_true_within(partial(_sds_state, dc_id=dc.id),
                                       timeout=timeout)
Exemple #8
0
    def stop_all_vms(self, timeout=5 * 60):
        api = self.get_api_v4(check=True)
        vms_service = api.system_service().vms_service()
        ids = self._search_vms(api, query='status=up')
        [vms_service.vm_service(id).stop() for id in ids]

        def _vm_is_down(id):
            vm_srv = vms_service.vm_service(id)
            vm = vm_srv.get()
            if vm.status == otypes.VmStatus.DOWN:
                LOGGER.debug('Engine VM ID %s, is down', id)
                return True

        for id in ids:
            testlib.assert_true_within(partial(_vm_is_down, id=id),
                                       timeout=timeout)
Exemple #9
0
    def g(self):
        """Returns the left-hand side of the first equation of the system of
        non-linear PDE for the primal/dual variables (w,u):

        g(w,u) = - a div w + K* (Ku - z) ,

        where 'u' is the real image (volume), 'z' the observed image (
        projections), 'w' the variable dual to 'u', 'a' a coefficient
        balancing the trade-off between data fidelity '|| K u - z||^2_L2'
        and regularization 'TV(u) = || |grad u| ||_L1'.

         Returns
         -------
         numpy.array
             Array of the same dimension as the reconstruction volume 'u'.
         """

        ushape = self.u.shape
        assert self.K.num_voxel == ushape, "Volume data dimension mismatch"

        # K u
        self.K.set_volume_data(self.u)
        self.K.forward()

        assert self.z.shape == self.K.projection_shape, \
            "Projection data mismatch"

        # K* (K u - z)
        self.K.set_projection_data(self.K.projection_data - self.z)
        self.K.backward()

        # Create slice object in order to fix dimension mismatch after
        # taking derivative s = [slice(0, nn - 1) for nn in ushape]

        # In order to compute the divergence of w, a generator comprehension
        #  is used to create an iterable object containing the 1D
        # derivatives of 'w'. return - self.a * reduce(add, (np.diff(wn, 1,
        # n)[s] for (n, wn) in enumerate(self.w))) + self.K.volume_data[s]
        return -self.a * reduce(
            add, (partial(wn, n)
                  for (n, wn) in enumerate(self.w))) + self.K.volume_data
Exemple #10
0
    def assert_engine_alive(self, timeout=2 * 60):
        """
        Assert service 'ovirt-engine' reports running on the engine VM

        Args:
            timeout(int): timeout

        Returns:
            None

        Raises:
            AssertionError: if ovirt-engine is not reported running after the
                given timeout, or ssh is unreachable.
        """
        def _ovirt_engine_up(host):
            status = host.service('ovirt-engine').alive()
            LOGGER.debug('ovirt-engine status: %s', status)
            return status

        testlib.assert_true_within(partial(_ovirt_engine_up, self.engine_vm()),
                                   timeout=timeout)
Exemple #11
0
    def g(self):
        """Returns the left-hand side of the first equation of the system of
        non-linear PDE for the primal/dual variables (w,u):

        g(w,u) = - a div w + K* (Ku - z) ,

        where 'u' is the real image (volume), 'z' the observed image (
        projections), 'w' the variable dual to 'u', 'a' a coefficient
        balancing the trade-off between data fidelity '|| K u - z||^2_L2'
        and regularization 'TV(u) = || |grad u| ||_L1'.

         Returns
         -------
         numpy.array
             Array of the same dimension as the reconstruction volume 'u'.
         """

        ushape = self.u.shape
        assert self.K.num_voxel == ushape, "Volume data dimension mismatch"

        # K u
        self.K.set_volume_data(self.u)
        self.K.forward()

        assert self.z.shape == self.K.projection_shape, \
            "Projection data mismatch"

        # K* (K u - z)
        self.K.set_projection_data(self.K.projection_data - self.z)
        self.K.backward()

        # Create slice object in order to fix dimension mismatch after
        # taking derivative s = [slice(0, nn - 1) for nn in ushape]

        # In order to compute the divergence of w, a generator comprehension
        #  is used to create an iterable object containing the 1D
        # derivatives of 'w'. return - self.a * reduce(add, (np.diff(wn, 1,
        # n)[s] for (n, wn) in enumerate(self.w))) + self.K.volume_data[s]
        return - self.a * reduce(add, (partial(wn, n) for (n, wn) in
                                       enumerate(self.w))) + self.K.volume_data
Exemple #12
0
def fastmatch(t1, t2):
  """
  Calculates a match between t1 and t2.
  See figure 10 in reference.
  """
  M = Bimap()

  label(t1)
  label(t2)
  depth = max(get_depth(t1), get_depth(t2))

  while 0 <= depth:
    nodes1 = get_depth_nodes(t1, depth)
    nodes2 = get_depth_nodes(t2, depth)

    equal = utils.partial(depth_equal, 0.6, 0.5, M)

    _match(nodes1, nodes2, M, equal)

    depth -= 1

  return M
Exemple #13
0
def fastmatch(t1, t2):
    """
  Calculates a match between t1 and t2.
  See figure 10 in reference.
  """
    M = Bimap()

    label(t1)
    label(t2)
    depth = max(get_depth(t1), get_depth(t2))

    while 0 <= depth:
        nodes1 = get_depth_nodes(t1, depth)
        nodes2 = get_depth_nodes(t2, depth)

        equal = utils.partial(depth_equal, 0.6, 0.5, M)

        _match(nodes1, nodes2, M, equal)

        depth -= 1

    return M
Exemple #14
0
 def __init__(self,
              visual_model,
              num_visual_features,
              textual_features,
              vocab_size,
              pad_token_id,
              max_len=49,
              encoding_drop=0.1,
              N=6,
              heads=8,
              attn_drop=0.1,
              ff_drop=0.1,
              d_ff=2048,
              activation='GELU'):
     super().__init__()
     self.visual_backbone = visual_model
     self.th = Xcoder(True,
                      N,
                      textual_features,
                      h=heads,
                      d_ff=d_ff,
                      ff_drop=ff_drop,
                      attn_drop=attn_drop,
                      activation=activation)
     self.visual_features = []
     self.lin_projection = nn.Linear(num_visual_features, textual_features)
     self.embed = WordEmbedding(vocab_size,
                                textual_features,
                                padding_index=pad_token_id)
     self.pos_enc = PositionalEncoding(textual_features, max_len,
                                       encoding_drop)
     self._register_hook(self.visual_backbone,
                         partial(self.hook_function, self.visual_features))
     self.lin_out = nn.Linear(textual_features, vocab_size)
     self.lin_out.weight = self.embed.emb.weight
     self.pad_tok_id = pad_token_id
Exemple #15
0
    def matrix_norm(self, iterations, vol_init=1.0,
                    tv_norm=False, return_volume=False,
                    intermediate_results=False):
        """The matrix norm || K ||_2  of 'K' defined here as largest
        singular value of 'K'. Employs the generic power method to obtain a
        scalar 's' which tends to || K ||_2 as the iterations N increase.

        To be implemented: optionally return volume 'x', such that it can be
        re-used as initializer to continue the iteration.

        Parameters
        ----------
        :type iterations: int
        :param iterations: Number of iterations of the generic power method.
        :type vol_init: float | ndarray (default 1.0)
        :param vol_init: in I, initial image to start with.
        :type intermediate_results: bool
        :param intermediate_results: Returns list of intermediate results
        instead of scalar.
        :type return_volume: bool
        :param return_volume: Return volume in order to resume iteration via
        passing it over as initial volume.

        Returns
        -------
        :rtype: float | numpy.ndarray, numpay.array (optional)
        :returns: s, vol
         s: Scalar of final iteration or numpy.ndarray containing all
         results during iteration.
         vol: Volume vector
        """

        geom = self.geom
        vol = self.recon_space.element(vol_init)
        proj = Rn(geom.proj_size).zero()
        # projector = Projector(geom, vol.space, proj.space)
        projector = Projector(geom)
        # print 'projector scaling factor', projector.scal_fac
        tmp = None

        if intermediate_results:
            s = np.zeros(iterations)
        else:
            s = 0

        # Power method loop
        for n in range(iterations):

            # step 4: x_{n+1} <- K^T K x_n
            if tv_norm:
                # K = (A, grad) instead of K = A
                # Compute: - div grad x_n
                # use sum over generator expression
                tmp = -reduce(add,
                              (partial(
                                  partial(vol.data.reshape(geom.vol_shape),
                                          dim, geom.voxel_width[dim]),
                                  dim, geom.voxel_width[dim]) for dim in
                               range(geom.vol_ndim)))

            # x_n <- A^T (A x_n)
            vol = projector.backward(projector.forward(vol))
            vol *= self.adj_scal_fac

            if tv_norm:
                # x_n <- x_n - div grad x_n
                # print 'n: {2}. vol: min = {0}, max = {1}'.format(
                #     vol.data.min(), vol.data.max(), n)
                # print 'n: {2}. tv: min = {0}, max = {1}'.format(tmp.min(),
                #                                            tmp.max(), n)
                vol.data[:] += tmp.ravel()

            # step 5:
            # x_n <- x_n/||x_n||_2
            vol /= vol.norm()

            # step 6:
            # s_n <-|| K x ||_2
            if intermediate_results:
                # proj <- A^T x_n
                proj = projector.forward(vol)
                s[n] = proj.norm()
                if tv_norm:
                    s[n] = np.sqrt(s[n] ** 2 +
                                   reduce(add,
                                          (np.linalg.norm(
                                              partial(vol.data.reshape(
                                                  geom.vol_shape), dim,
                                                  geom.voxel_width[dim])) ** 2
                                           for dim in range(geom.vol_ndim))))

        # step 6: || K x ||_2
        if not intermediate_results:
            proj = projector.forward(vol)
            s = proj.norm()
            if tv_norm:
                s = np.sqrt(s ** 2 + reduce(add,
                                            (np.linalg.norm(partial(
                                                vol.data.reshape(
                                                    geom.vol_shape), dim,
                                                geom.voxel_width[dim])) ** 2
                                             for dim in range(geom.vol_ndim))))

        # Clear ASTRA memory
        projector.clear_astra_memory()

        # Returns
        if not return_volume:
            return s
        else:
            return s, vol.data
Exemple #16
0
    def least_squares(self, iterations=1, L=None, tau=None, sigma=None,
                      theta=None, non_negativiy_constraint=False,
                      tv_norm=False,
                      verbose=True):
        """Least-squares problem with optional TV-regularisation and/or
        non-negativity constraint.

        Parameters
        ----------
        :type iterations: int (default 1)
        :param iterations: Number of iterations the optimization should
        run for.
        :type L: float (defaul: None)
        :param L: Matrix norm of forward projector. If 'None' matrix_norm is
        called with 20 iterations.
        :type tau: float (default 1/L)
        :param tau:
        :type sigma: float (default 1/L)
        :param sigma:
        :type theta: float (default 1)
        :param theta:
        :type non_negativiy_constraint: bool (default False)
        :param non_negativiy_constraint: Add non-negativity constraint to
        optimization problem (via indicator function).
        :type tv_norm: bool | float (default False)
        :param tv_norm: Unless False, coincides with the numerical value of
        the parameter lambda for TV-Regularisation.
        :type verbose: bool (default False)
        :param verbose: Show intermediate reconstructions and
        convergence measures during iteration.

        Returns
        -------
        :rtype: odl.Vector, odl.Vector, numpy.ndarray, numpy.ndarray
        :returns: u, p, cpd, l2_du
         u: vector of reconstructed volume
         p: vector of dual projection variable
         cpd: condition primal-dual gap (convergence measure)
         l2_du: l2-norm of constraint-induced convergence measure
        """

        # step 1:
        if L is None:
            L = self.matrix_norm(20)
        if tau is None:
            tau = 1 / L
        if sigma is None:
            sigma = 1 / L
        if theta is None:
            theta = 1

        # print 'tau:', tau
        # print 'sigma:', sigma
        # print 'theta:', theta

        geom = self.geom
        g = self.proj  # domain: D

        # l2-norm of (volume update / tau)
        l2_du = np.zeros(iterations)
        # conditional primal-dual gap
        cpd = np.zeros(iterations)

        # step 2: initialize u and p with zeros
        u = self.recon_space.zero()  # domain: I
        p = g.space.zero()  # domain: D
        # q: spatial vector = list of ndarrays in I (not Rn vectors)
        if tv_norm:
            ndim = geom.vol_ndim
            # domain of q: V = [I, I, ...]
            q = [np.zeros(geom.vol_shape, dtype=u.data.dtype) for _ in range(
                ndim)]

        # step 3: ub <- u
        ub = u.copy()  # domain: I

        # initialize projector
        # A = Projector(geom, u.space, p.space)
        A = Projector(geom)

        # visual output instance
        disp = DisplayIntermediates(verbose=verbose, vol=u.data.reshape(
            geom.vol_shape), cpd=cpd, l2_du=l2_du)

        # step 4: repeat
        for n in range(iterations):

            # step 5: p_{n+1} <- (p_n + sigma(A^T ub_n - g)) / (1 + sigma)
            if n >= 0:
                # with(Timer('proj:')):
                #     # p_tmp <- A ub
                #     p_tmp = A.forward(ub)
                #     # p_tmp <- p_tmp - g
                #     p_tmp -= g
                #     # p <- p + sigma * p_tmp
                #     p += sigma * p_tmp
                # p_n <- p_n + sigma(A ub -g )
                tmp = A.forward(ub)
                # print 'p:', p.data.shape, 'Au:', tmp.data.shape, 'g:', \
                #     g.data.shape
                p += sigma * (A.forward(ub) - g)
            else:
                p -= sigma * g
            # p <- p / (1 + sigma)
            p /= 1 + sigma

            # TV step 6: q_{n+1} <- lambda(q_n + sigma grad ub_n) /
            # max(lambda 1_I, |q_n + sigma grad ub_n|)
            if tv_norm:

                for dim in range(ndim):
                    # q_n <- q_n + sigma * grad ub_n
                    q[dim] += sigma * partial(ub.data.reshape(
                        self.geom.vol_shape), dim, geom.voxel_width[dim])

                # |q_n|: isotropic TV
                # use div_q to save memory, q = [qi] where qi are ndarrays
                div_q = np.sqrt(reduce(add, (qi ** 2 for qi in q)))

                # max(lambda 1_I, |q_n + sigma diff ub_n|)
                # print 'q_mag:', div_q.min(), div_q.max()
                div_q[div_q < tv_norm] = tv_norm

                # q_n <- lambda * q_n / |q_n|
                for dim in range(ndim):
                    q[dim] /= div_q
                    q[dim] *= tv_norm

                # div q_{n+1}
                div_q = reduce(add, (partial(qi, dim, geom.voxel_width[dim])
                                     for (dim, qi) in enumerate(q)))
                div_q *= tau

            # step 6: u_{n+1} <- u_{n} - tau * A^T p_{n+1}
            # TV step 7: u_{n+1} <- u_{n} - tau * A^T p_{n+1} + div q_{n+1}
            # ub_tmp <- A^T p
            ub_tmp = A.backward(p)
            ub_tmp *= tau
            ub_tmp *= self.adj_scal_fac
            # l2-norm per voxel of ub_tmp = A^T p
            l2_du[n:] = ub_tmp.norm()  # / u.data.size
            if tv_norm:
                l2_du[n:] += np.linalg.norm(div_q.ravel())  # / u.data.size
            # store current u_n temporarily in ub_n
            ub = -u.copy()
            # u <- u - tau ub_tmp
            u -= ub_tmp
            # TV: u <- u + tau div q
            if tv_norm:
                print('{0}: u - A^T p: min = {1}, max = {2}'.format(
                    n, u.data.min(), u.data.max()))
                print('{0}: div q: min = {1}, max = {2}'.format(
                    n, div_q.min(), div_q.max()))
                u.data[:] += div_q.ravel()

            # Positivity constraint
            if non_negativiy_constraint:
                u.data[u.data < 0] = 0
                # print '\nu:', u.data.min(), u.data.max()

            # conditional primal-dual gap for current u and p
            # 1/2||A u - g||_2^2 + 1/2||p||_2^2 + <p,g>_D
            # p_tmp <- A u
            # p_tmp = A.forward(u)
            # p_tmp -= g
            # cpd[n:] = (0.5 * p_tmp.norm() ** 2 +
            cpd[n:] = (0.5 * p.space.norm(A.forward(u) - g) ** 2 +
                       0.5 * p.norm() ** 2 +
                       p.inner(g))  # / p.data.size
            if tv_norm:
                cpd[n:] += tv_norm * np.linalg.norm(
                    reduce(add, (partial(u.data.reshape(geom.vol_shape),
                                         dim, geom.voxel_width[dim]) for dim
                                 in range(geom.vol_ndim))
                           ).ravel(), ord=1)  # / u.data.size

            # step 7 / TV step 8: ub_{n+1} <- u_{n+1} + theta(u_{n+1} - u_n)
            # ub <- ub + u_{n+1}, remember ub = -u_n
            ub += u
            # ub <- theta * ub
            ub *= theta
            # ub <- ub + u_{n+1}
            ub += u

            # visual output
            disp.update()

        A.clear_astra_memory()

        # Should avoid window freezing
        disp.show()

        return u, p, cpd, l2_du
Exemple #17
0
def fit_voxel_grid(voxel_grid,
                   max_num_fitted_models=5,
                   use_sphere=True,
                   use_cuboid=True,
                   use_capsule=True,
                   visualize_intermediate=False,
                   loss_type=LossType.BEST_EFFORT,
                   use_cuda=False,
                   cuda_device=None,
                   component_threshold=0.05):
    fitted_models = []

    num_voxels_total = voxel_grid.sum().item()

    voxels_remaining = voxel_grid.clone()
    connected_components = voxel.connected_components(voxels_remaining)

    i = 0
    while len(connected_components) > 0 and i < max_num_fitted_models:
        component = argmax(connected_components, lambda comp: comp.shape[0])

        if (float(component.shape[0]) /
                num_voxels_total) <= component_threshold:
            break

        component_points = component.float()
        if use_cuda:
            component_points = component_points.cuda(device=cuda_device)

        lambda_ = 1.0

        potential_models = []
        if use_sphere:
            potential_models.append(SphereModel(component_points, lambda_))
        if use_cuboid:
            potential_models.append(CuboidModel(component_points, lambda_))
        if use_capsule:
            potential_models.append(CapsuleModel(component_points, lambda_))

        if use_cuda:
            for model in potential_models:
                model.cuda(device=cuda_device)

        best_model = argmin(
            potential_models,
            partial(optimize, component_points, loss_type=loss_type))

        if visualize_intermediate:
            fig = plt.figure()
            ax = fig.gca(projection='3d')
            draw.draw_voxels(ax, voxels_remaining)
            best_model.draw(ax)
            plt.show()

        points_inside_mask = best_model.exact_containment(component_points)

        indices_covered = component[points_inside_mask, :]
        voxel.batch_set(voxels_remaining, indices_covered, False)

        if points_inside_mask.sum().item() > 0:
            fitted_models.append(best_model)
        else:
            # We failed to fit to any of the voxels in this component, so just ignore it
            # Todo: try splitting the component up and fit to those pieces
            voxel.batch_set(voxels_remaining, component, False)

        i += 1

        connected_components = voxel.connected_components(voxels_remaining)

    return fitted_models
Exemple #18
0
    # This can be used once we want to support integer as integer values and
    # not as numeric anymore (using masked arrays ?).
    acls2dtype = {'real' : np.float, 'integer' : np.float, 'numeric' : np.float}
    acls2conv = {'real' : safe_float, 'integer' : safe_float, 'numeric' : safe_float}
    descr = []
    convertors = []
    if not hasstr:
        for name, value in attr:
            type = parse_type(value)
            if type == 'date':
                raise ValueError("date type not supported yet, sorry")
            elif type == 'nominal':
                n = maxnomlen(value)
                descr.append((name, 'S%d' % n))
                pvalue = get_nom_val(value)
                convertors.append(partial(safe_nominal, pvalue = pvalue))
            else:
                descr.append((name, acls2dtype[type]))
                convertors.append(safe_float)
                #dc.append(acls2conv[type])
                #sdescr.append((name, acls2sdtype[type]))
    else:
        # How to support string efficiently ? Ideally, we should know the max
        # size of the string before allocating the numpy array.
        raise NotImplementedError("String attributes not supported yet, sorry")

    ni = len(convertors)

    # Get the delimiter from the first line of data:
    def next_data_line(row_iter):
        """Assumes we are already in the data part (eg after @data)."""
Exemple #19
0
 def label_by_df(cls,items,csv_path,index_name,column_name,proc_x = None,proc_y = None):
     df = pd.read_csv(csv_path)
     df.set_index(index_name,inplace = True)
     return cls(items,label_all(items,partial(label_func,df,column_name)), proc_x = proc_x, proc_y = proc_y)
Exemple #20
0
from pathlib import Path
from optimizers import sgd_step, StatefulOptimizer
from callbacks import *
from torch import tensor
from torch.cuda import amp


def param_getter(m):
    return m.parameters()


def ifNone(a, b):
    return b if a is None else a


sgd_opt = partial(StatefulOptimizer, steppers=[sgd_step])


class Learner():
    def __init__(self,
                 model,
                 data,
                 loss_func,
                 opt_func=sgd_opt,
                 lr=1e-2,
                 splitter=param_getter,
                 cbs=None,
                 cb_funcs=None,
                 path=None):
        self.model, self.data, self.loss_func, self.opt_func, self.lr, self.splitter = model, data, loss_func, opt_func, lr, splitter
        self.in_train, self.logger, self.opt = False, print, None
Exemple #21
0
    def least_squares(self,
                      iterations=1,
                      L=None,
                      tau=None,
                      sigma=None,
                      theta=None,
                      non_negativiy_constraint=False,
                      tv_norm=False,
                      verbose=True):
        """Least-squares problem with optional TV-regularisation and/or
        non-negativity constraint.

        Parameters
        ----------
        :type iterations: int (default 1)
        :param iterations: Number of iterations the optimization should
        run for.
        :type L: float (defaul: None)
        :param L: Matrix norm of forward projector. If 'None' matrix_norm is
        called with 20 iterations.
        :type tau: float (default 1/L)
        :param tau:
        :type sigma: float (default 1/L)
        :param sigma:
        :type theta: float (default 1)
        :param theta:
        :type non_negativiy_constraint: bool (default False)
        :param non_negativiy_constraint: Add non-negativity constraint to
        optimization problem (via indicator function).
        :type tv_norm: bool | float (default False)
        :param tv_norm: Unless False, coincides with the numerical value of
        the parameter lambda for TV-Regularisation.
        :type verbose: bool (default False)
        :param verbose: Show intermediate reconstructions and
        convergence measures during iteration.

        Returns
        -------
        :rtype: odl.Vector, odl.Vector, numpy.ndarray, numpy.ndarray
        :returns: u, p, cpd, l2_du
         u: vector of reconstructed volume
         p: vector of dual projection variable
         cpd: condition primal-dual gap (convergence measure)
         l2_du: l2-norm of constraint-induced convergence measure
        """

        # step 1:
        if L is None:
            L = self.matrix_norm(20)
        if tau is None:
            tau = 1 / L
        if sigma is None:
            sigma = 1 / L
        if theta is None:
            theta = 1

        # print 'tau:', tau
        # print 'sigma:', sigma
        # print 'theta:', theta

        geom = self.geom
        g = self.proj  # domain: D

        # l2-norm of (volume update / tau)
        l2_du = np.zeros(iterations)
        # conditional primal-dual gap
        cpd = np.zeros(iterations)

        # step 2: initialize u and p with zeros
        u = self.recon_space.zero()  # domain: I
        p = g.space.zero()  # domain: D
        # q: spatial vector = list of ndarrays in I (not Rn vectors)
        if tv_norm:
            ndim = geom.vol_ndim
            # domain of q: V = [I, I, ...]
            q = [
                np.zeros(geom.vol_shape, dtype=u.data.dtype)
                for _ in range(ndim)
            ]

        # step 3: ub <- u
        ub = u.copy()  # domain: I

        # initialize projector
        # A = Projector(geom, u.space, p.space)
        A = Projector(geom)

        # visual output instance
        disp = DisplayIntermediates(verbose=verbose,
                                    vol=u.data.reshape(geom.vol_shape),
                                    cpd=cpd,
                                    l2_du=l2_du)

        # step 4: repeat
        for n in range(iterations):

            # step 5: p_{n+1} <- (p_n + sigma(A^T ub_n - g)) / (1 + sigma)
            if n >= 0:
                # with(Timer('proj:')):
                #     # p_tmp <- A ub
                #     p_tmp = A.forward(ub)
                #     # p_tmp <- p_tmp - g
                #     p_tmp -= g
                #     # p <- p + sigma * p_tmp
                #     p += sigma * p_tmp
                # p_n <- p_n + sigma(A ub -g )
                tmp = A.forward(ub)
                # print 'p:', p.data.shape, 'Au:', tmp.data.shape, 'g:', \
                #     g.data.shape
                p += sigma * (A.forward(ub) - g)
            else:
                p -= sigma * g
            # p <- p / (1 + sigma)
            p /= 1 + sigma

            # TV step 6: q_{n+1} <- lambda(q_n + sigma grad ub_n) /
            # max(lambda 1_I, |q_n + sigma grad ub_n|)
            if tv_norm:

                for dim in range(ndim):
                    # q_n <- q_n + sigma * grad ub_n
                    q[dim] += sigma * partial(
                        ub.data.reshape(self.geom.vol_shape), dim,
                        geom.voxel_width[dim])

                # |q_n|: isotropic TV
                # use div_q to save memory, q = [qi] where qi are ndarrays
                div_q = np.sqrt(reduce(add, (qi**2 for qi in q)))

                # max(lambda 1_I, |q_n + sigma diff ub_n|)
                # print 'q_mag:', div_q.min(), div_q.max()
                div_q[div_q < tv_norm] = tv_norm

                # q_n <- lambda * q_n / |q_n|
                for dim in range(ndim):
                    q[dim] /= div_q
                    q[dim] *= tv_norm

                # div q_{n+1}
                div_q = reduce(add, (partial(qi, dim, geom.voxel_width[dim])
                                     for (dim, qi) in enumerate(q)))
                div_q *= tau

            # step 6: u_{n+1} <- u_{n} - tau * A^T p_{n+1}
            # TV step 7: u_{n+1} <- u_{n} - tau * A^T p_{n+1} + div q_{n+1}
            # ub_tmp <- A^T p
            ub_tmp = A.backward(p)
            ub_tmp *= tau
            ub_tmp *= self.adj_scal_fac
            # l2-norm per voxel of ub_tmp = A^T p
            l2_du[n:] = ub_tmp.norm()  # / u.data.size
            if tv_norm:
                l2_du[n:] += np.linalg.norm(div_q.ravel())  # / u.data.size
            # store current u_n temporarily in ub_n
            ub = -u.copy()
            # u <- u - tau ub_tmp
            u -= ub_tmp
            # TV: u <- u + tau div q
            if tv_norm:
                print('{0}: u - A^T p: min = {1}, max = {2}'.format(
                    n, u.data.min(), u.data.max()))
                print('{0}: div q: min = {1}, max = {2}'.format(
                    n, div_q.min(), div_q.max()))
                u.data[:] += div_q.ravel()

            # Positivity constraint
            if non_negativiy_constraint:
                u.data[u.data < 0] = 0
                # print '\nu:', u.data.min(), u.data.max()

            # conditional primal-dual gap for current u and p
            # 1/2||A u - g||_2^2 + 1/2||p||_2^2 + <p,g>_D
            # p_tmp <- A u
            # p_tmp = A.forward(u)
            # p_tmp -= g
            # cpd[n:] = (0.5 * p_tmp.norm() ** 2 +
            cpd[n:] = (0.5 * p.space.norm(A.forward(u) - g)**2 +
                       0.5 * p.norm()**2 + p.inner(g))  # / p.data.size
            if tv_norm:
                cpd[n:] += tv_norm * np.linalg.norm(reduce(
                    add, (partial(u.data.reshape(geom.vol_shape), dim,
                                  geom.voxel_width[dim])
                          for dim in range(geom.vol_ndim))).ravel(),
                                                    ord=1)  # / u.data.size

            # step 7 / TV step 8: ub_{n+1} <- u_{n+1} + theta(u_{n+1} - u_n)
            # ub <- ub + u_{n+1}, remember ub = -u_n
            ub += u
            # ub <- theta * ub
            ub *= theta
            # ub <- ub + u_{n+1}
            ub += u

            # visual output
            disp.update()

        A.clear_astra_memory()

        # Should avoid window freezing
        disp.show()

        return u, p, cpd, l2_du
Exemple #22
0
        'real': safe_float,
        'integer': safe_float,
        'numeric': safe_float
    }
    descr = []
    convertors = []
    if not hasstr:
        for name, value in attr:
            type = parse_type(value)
            if type == 'date':
                raise ValueError("date type not supported yet, sorry")
            elif type == 'nominal':
                n = maxnomlen(value)
                descr.append((name, 'S%d' % n))
                pvalue = get_nom_val(value)
                convertors.append(partial(safe_nominal, pvalue=pvalue))
            else:
                descr.append((name, acls2dtype[type]))
                convertors.append(safe_float)
                #dc.append(acls2conv[type])
                #sdescr.append((name, acls2sdtype[type]))
    else:
        # How to support string efficiently ? Ideally, we should know the max
        # size of the string before allocating the numpy array.
        raise NotImplementedError("String attributes not supported yet, sorry")

    ni = len(convertors)

    # Get the delimiter from the first line of data:
    def next_data_line(row_iter):
        """Assumes we are already in the data part (eg after @data)."""
Exemple #23
0
    def matrix_norm(self,
                    iterations,
                    vol_init=1.0,
                    tv_norm=False,
                    return_volume=False,
                    intermediate_results=False):
        """The matrix norm || K ||_2  of 'K' defined here as largest
        singular value of 'K'. Employs the generic power method to obtain a
        scalar 's' which tends to || K ||_2 as the iterations N increase.

        To be implemented: optionally return volume 'x', such that it can be
        re-used as initializer to continue the iteration.

        Parameters
        ----------
        :type iterations: int
        :param iterations: Number of iterations of the generic power method.
        :type vol_init: float | ndarray (default 1.0)
        :param vol_init: in I, initial image to start with.
        :type intermediate_results: bool
        :param intermediate_results: Returns list of intermediate results
        instead of scalar.
        :type return_volume: bool
        :param return_volume: Return volume in order to resume iteration via
        passing it over as initial volume.

        Returns
        -------
        :rtype: float | numpy.ndarray, numpay.array (optional)
        :returns: s, vol
         s: Scalar of final iteration or numpy.ndarray containing all
         results during iteration.
         vol: Volume vector
        """

        geom = self.geom
        vol = self.recon_space.element(vol_init)
        proj = Rn(geom.proj_size).zero()
        # projector = Projector(geom, vol.space, proj.space)
        projector = Projector(geom)
        # print 'projector scaling factor', projector.scal_fac
        tmp = None

        if intermediate_results:
            s = np.zeros(iterations)
        else:
            s = 0

        # Power method loop
        for n in range(iterations):

            # step 4: x_{n+1} <- K^T K x_n
            if tv_norm:
                # K = (A, grad) instead of K = A
                # Compute: - div grad x_n
                # use sum over generator expression
                tmp = -reduce(add, (partial(
                    partial(vol.data.reshape(geom.vol_shape), dim,
                            geom.voxel_width[dim]), dim, geom.voxel_width[dim])
                                    for dim in range(geom.vol_ndim)))

            # x_n <- A^T (A x_n)
            vol = projector.backward(projector.forward(vol))
            vol *= self.adj_scal_fac

            if tv_norm:
                # x_n <- x_n - div grad x_n
                # print 'n: {2}. vol: min = {0}, max = {1}'.format(
                #     vol.data.min(), vol.data.max(), n)
                # print 'n: {2}. tv: min = {0}, max = {1}'.format(tmp.min(),
                #                                            tmp.max(), n)
                vol.data[:] += tmp.ravel()

            # step 5:
            # x_n <- x_n/||x_n||_2
            vol /= vol.norm()

            # step 6:
            # s_n <-|| K x ||_2
            if intermediate_results:
                # proj <- A^T x_n
                proj = projector.forward(vol)
                s[n] = proj.norm()
                if tv_norm:
                    s[n] = np.sqrt(
                        s[n]**2 +
                        reduce(add, (np.linalg.norm(
                            partial(vol.data.reshape(geom.vol_shape), dim,
                                    geom.voxel_width[dim]))**2
                                     for dim in range(geom.vol_ndim))))

        # step 6: || K x ||_2
        if not intermediate_results:
            proj = projector.forward(vol)
            s = proj.norm()
            if tv_norm:
                s = np.sqrt(s**2 +
                            reduce(add, (np.linalg.norm(
                                partial(vol.data.reshape(geom.vol_shape), dim,
                                        geom.voxel_width[dim]))**2
                                         for dim in range(geom.vol_ndim))))

        # Clear ASTRA memory
        projector.clear_astra_memory()

        # Returns
        if not return_volume:
            return s
        else:
            return s, vol.data
Exemple #24
0
 def begin_fit(self):
     self.mbar = master_bar(range(self.epochs))
     self.mbar.on_iter_begin()
     self.run.logger = partial(self.mbar.write, table=True)