예제 #1
0
def vis_hybrid_image(hybrid_image):
    """
  %visualize a hybrid image by progressively downsampling the image and
  %concatenating all of the images together.
  """
    scales = 5  #how many downsampled versions to create
    padding = 5  #how many pixels to pad.

    original_height = hybrid_image.shape[0]
    num_colors = hybrid_image.shape[2]
    #counting how many color channels the input has
    output = hybrid_image
    cur_image = hybrid_image

    for i in range(2, scales + 1):

        gap_array = np.ones((original_height, padding, num_colors))

        output = cat((output, gap_array),
                     axis=1)  # wrongly done in the sample code

        cur_image = resize(cur_image,
                           (cur_image.shape[0] // 2, cur_image.shape[1] // 2),
                           anti_aliasing=True)

        vertical_gap = np.ones((original_height - cur_image.shape[0],
                                cur_image.shape[1], num_colors))
        tmp = cat((vertical_gap, cur_image), axis=0)
        output = cat((output, tmp), axis=1)

    return (output)
예제 #2
0
def concatenate(data, axis):
    """Concatenate multiple trials into one trials, according to any dimension.

    Parameters
    ----------
    data : instance of DataTime, DataFreq, or DataTimeFreq

    axis : str
        axis that you want to concatenate (it can be 'trial')

    Returns
    -------
    instace of same class as input
        the data will always have only one trial

    Notes
    -----
    If axis is 'trial', it will add one more dimension, and concatenate based
    on it. It will then create a new axis, called 'trial_axis' (not 'trial'
    because that axis is hard-coded).

    If you want to concatenate across trials, you need:

    >>> expand_dims(data1.data[0], axis=1).shape
    """
    output = data._copy(axis=False)

    for dataaxis in data.axis:
        output.axis[dataaxis] = empty(1, dtype='O')

        if dataaxis == axis:
            output.axis[dataaxis][0] = cat(data.axis[dataaxis])
        else:
            output.axis[dataaxis][0] = data.axis[dataaxis][0]

        if len(unique(output.axis[dataaxis][0])) != len(
                output.axis[dataaxis][0]):
            lg.warning('Axis ' + dataaxis + ' does not have unique values')

    output.data = empty(1, dtype='O')
    if axis == 'trial':

        # create new axis
        new_axis = empty(1, dtype='O')
        n_trial = data.number_of('trial')
        trial_name = ['trial{0:06}'.format(x) for x in range(n_trial)]
        new_axis[0] = asarray(trial_name, dtype='U')
        output.axis['trial_axis'] = new_axis

        # concatenate along the extra dimension
        all_trial = []
        for one_trial in data.data:
            all_trial.append(expand_dims(one_trial, -1))
        output.data[0] = cat(all_trial, axis=-1)

    else:
        output.data[0] = cat(data.data, axis=output.index_of(axis))

    return output
예제 #3
0
def pos_to_mat(pos, rot_mat=None):
    """
    Given an (x, y, z) position, create a 4x4 homogeneous transformation matrix.
    """
    mat = eye(3) if rot_mat is None else rot_mat
    mat = cat((mat, A(*(pos,)).T), axis=1)
    mat = cat((mat, A((0,0,0,1))))
    return mat
예제 #4
0
파일: merge.py 프로젝트: gpiantoni/phypno
def concatenate(data, axis):
    """Concatenate multiple trials into one trials, according to any dimension.

    Parameters
    ----------
    data : instance of DataTime, DataFreq, or DataTimeFreq

    axis : str
        axis that you want to concatenate (it can be 'trial')

    Returns
    -------
    instace of same class as input
        the data will always have only one trial

    Notes
    -----
    If axis is 'trial', it will add one more dimension, and concatenate based
    on it. It will then create a new axis, called 'trial_axis' (not 'trial'
    because that axis is hard-coded).

    If you want to concatenate across trials, you need:

    >>> expand_dims(data1.data[0], axis=1).shape
    """
    output = data._copy(axis=False)

    for dataaxis in data.axis:
        output.axis[dataaxis] = empty(1, dtype='O')

        if dataaxis == axis:
            output.axis[dataaxis][0] = cat(data.axis[dataaxis])
        else:
            output.axis[dataaxis][0] = data.axis[dataaxis][0]

        if len(unique(output.axis[dataaxis][0])) != len(output.axis[dataaxis][0]):
            lg.warning('Axis ' + dataaxis + ' does not have unique values')

    output.data = empty(1, dtype='O')
    if axis == 'trial':

        # create new axis
        new_axis = empty(1, dtype='O')
        n_trial = data.number_of('trial')
        trial_name = ['trial{0:06}'.format(x) for x in range(n_trial)]
        new_axis[0] = asarray(trial_name, dtype='U')
        output.axis['trial_axis'] = new_axis

        # concatenate along the extra dimension
        all_trial = []
        for one_trial in data.data:
            all_trial.append(expand_dims(one_trial, -1))
        output.data[0] = cat(all_trial, axis=-1)

    else:
        output.data[0] = cat(data.data, axis=output.index_of(axis))

    return output
    def compute_target_part_scoremap_slices(self, joint_id, coords, data_item, size, scale):
        dist_thresh = self.cfg.pos_dist_thresh * scale
        dist_thresh_sq = dist_thresh ** 2
        num_joints = self.cfg.num_joints

        # TODO: truncate size to agree with resnet depth
        size[0] = int(np.sqrt(size[0]))**2 # new

        scmap = np.zeros(cat([size, arr([num_joints])]))
        locref_size = cat([size, arr([num_joints * 2])])
        locref_mask = np.zeros(locref_size)
        locref_map = np.zeros(locref_size)
        width = size[2] # new
        height = size[1] # new
        truncated_depth = size[0] - 1 # new

        for person_id in range(len(coords)):
            for k, j_id in enumerate(joint_id[person_id]):
                joint_pt = coords[person_id][k, :]
                # print("Joint {} ".format(k), joint_pt)
                j_x = np.asscalar(joint_pt[0])
                j_y = np.asscalar(joint_pt[1])
                # Note: the annotations are XYZ, but the masks are DHW=ZXY
                # TODO: for now, truncate to depth of resnet output
                j_z = min(int(joint_pt[2])-1, truncated_depth) # new; starts at 1; not affected by stride or distance

                # don't loop over entire heatmap, but just relevant locations
                j_x_sm = round((j_x - self.half_stride) / self.stride)
                j_y_sm = round((j_y - self.half_stride) / self.stride)
                min_x = round(max(j_x_sm - dist_thresh - 1, 0))
                max_x = round(min(j_x_sm + dist_thresh + 1, width - 1))
                min_y = round(max(j_y_sm - dist_thresh - 1, 0))
                max_y = round(min(j_y_sm + dist_thresh + 1, height - 1))

                for j in range(min_y, max_y + 1):  # range(height):
                    pt_y = j * self.stride + self.half_stride
                    for i in range(min_x, max_x + 1):  # range(width):
                        # pt = arr([i*stride+half_stride, j*stride+half_stride])
                        # diff = joint_pt - pt
                        # The code above is too slow in python
                        pt_x = i * self.stride + self.half_stride
                        dx = j_x - pt_x
                        dy = j_y - pt_y
                        dist = dx ** 2 + dy ** 2
                        # print(la.norm(diff))
                        if dist <= dist_thresh_sq:
                            # New index: z, which is not iterated
                            scmap[j_z, j, i, j_id] = 1
                            locref_mask[j_z, j, i, j_id * 2 + 0] = 1
                            locref_mask[j_z, j, i, j_id * 2 + 1] = 1
                            locref_map[j_z, j, i, j_id * 2 + 0] = dx * self.locref_scale
                            locref_map[j_z, j, i, j_id * 2 + 1] = dy * self.locref_scale

        weights = self.compute_scmap_weights(scmap.shape, joint_id, data_item)

        return scmap, weights, locref_map, locref_mask
예제 #6
0
    def compute_target_part_scoremap(self, joint_id, coords, data_item, size,
                                     scale):
        stride = self.cfg.stride
        dist_thresh = self.cfg.pos_dist_thresh * scale
        num_joints = self.cfg.num_joints
        half_stride = stride / 2
        scmap = np.zeros(cat([size, arr([num_joints])]))
        locref_size = cat([size, arr([num_joints * 2])])
        locref_mask = np.zeros(locref_size)
        locref_map = np.zeros(locref_size)

        locref_scale = 1.0 / self.cfg.locref_stdev
        dist_thresh_sq = dist_thresh**2

        width = size[1]
        height = size[0]

        for person_id in range(len(coords)):
            for k, j_id in enumerate(joint_id[person_id]):
                joint_pt = coords[person_id][k, :]
                j_x = np.asscalar(joint_pt[0])
                j_y = np.asscalar(joint_pt[1])

                if np.isnan(j_x) or np.isnan(j_y):
                    continue

                # don't loop over entire heatmap, but just relevant locations
                j_x_sm = round((j_x - half_stride) / stride)
                j_y_sm = round((j_y - half_stride) / stride)
                min_x = round(max(j_x_sm - dist_thresh - 1, 0))
                max_x = round(min(j_x_sm + dist_thresh + 1, width - 1))
                min_y = round(max(j_y_sm - dist_thresh - 1, 0))
                max_y = round(min(j_y_sm + dist_thresh + 1, height - 1))

                for j in range(int(min_y), int(max_y) + 1):  # range(height):
                    pt_y = j * stride + half_stride
                    for i in range(int(min_x),
                                   int(max_x) + 1):  # range(width):
                        # pt = arr([i*stride+half_stride, j*stride+half_stride])
                        # diff = joint_pt - pt
                        # The code above is too slow in python
                        pt_x = i * stride + half_stride
                        dx = j_x - pt_x
                        dy = j_y - pt_y
                        dist = dx**2 + dy**2
                        # print(la.norm(diff))
                        if dist <= dist_thresh_sq:
                            scmap[j, i, j_id] = 1
                            locref_mask[j, i, j_id * 2 + 0] = 1
                            locref_mask[j, i, j_id * 2 + 1] = 1
                            locref_map[j, i, j_id * 2 + 0] = dx * locref_scale
                            locref_map[j, i, j_id * 2 + 1] = dy * locref_scale

        weights = self.compute_scmap_weights(scmap.shape, joint_id, data_item)

        return scmap, weights, locref_map, locref_mask
예제 #7
0
def __generate_linear_row(T, depth, zeros, dim):
    """Generates linear state transition matrix rows"""
    core = np.eye(dim)
    if depth == 1:
        return cat((np.zeros((dim, dim * zeros)), core), axis=1)
    depth = depth - 1
    return cat((
        __generate_linear_row(T, depth, zeros, dim),
        T**depth / math.factorial(depth) * core,
    ),
               axis=1)
예제 #8
0
파일: sigmoid.py 프로젝트: argju/cgptoolbox
def cgpstudy():
    """
    Connecting the building blocks of a cGP study.
    
    This implements the simulation pipeline in Gjuvsland et al. (2011).
    """
    from numpy import concatenate as cat
    gt = np.array(genotypes)
    par = cat([gt2par(list(g), hetpar, loc2par) for g in gt])
    ph = cat([par2ph(p) for p in par])
    return gt, par, ph
예제 #9
0
파일: ws.py 프로젝트: timseries/py_utils
 def subband_group_sum(self,s,group_type,average=True, energy=True):
     """
     Computes the sum (or average) (energy) of one of two group types for subband s
     Group_type can be either 'parent_children' or  'children' or 'parent_child'
     """
     if group_type != 'children':
         w_parent = self.get_upsampled_parent(s) #THIS BAD, we have two functions calling each other (potentially)
         if energy:
             w_parent = np.abs(w_parent)**2
     w_child = self.get_subband(s)   
     if energy:
         w_child = np.abs(w_child)**2
     if group_type == 'parent_children' or group_type == 'children':
         w_child = np.sum(cat([w_child[self.ds_slices[i]][...,np.newaxis] 
                               for i in xrange(len(self.ds_slices))],
                               axis=self.int_dimension),axis=self.int_dimension)
         w_child_us = np.zeros(2*np.asarray(w_child.shape))
         for j in xrange(len(self.ds_slices)):
             w_child_us[self.ds_slices[j]] = w_child
         del w_child    
         if group_type == 'parent_children':             
             divisor = 2.0**self.int_dimension + 1
         else: #children only
             w_parent = 0
             divisor = 2.0**self.int_dimension    
     elif group_type == 'parent_child':
         w_child_us = w_child
         divisor = 2.0
     w_parent += w_child_us
     if average:
         w_parent /= divisor
     return w_parent    
예제 #10
0
def __generate_linear_cols(T, depth, target, dim):
    """Carefully concatenates linear state transition matrix rows"""
    if depth == 1:
        return __generate_linear_row(T, depth, target, dim)
    return cat((__generate_linear_row(T, depth, target, dim),
                __generate_linear_cols(T, depth - 1, target + 1, dim)),
               axis=0)
예제 #11
0
    def add_input_key(self, cfg: GenomeConfig, k: int):
        """Extend the input-key list with the given key, and expand the corresponding weights."""
        # Update self.weight_xh_full if key never seen before
        if k not in self.input_keys_full:
            # Find the index to insert the key
            lst = [
                i + 1 for i in range(len(self.input_keys_full))
                if self.input_keys_full[i] < k
            ]  # List of indices
            i = lst[-1] if lst else 0  # Index to insert key in

            # Save key to list
            self.input_keys_full.insert(i, k)

            # Update weight_xh_full correspondingly by inserting random initialized tensor in correct position
            new_tensor = rnn.init(cfg, hid_dim=self.hid_dim, input_size=1)
            assert new_tensor.shape == (self.hid_dim, 1)
            self.weight_xh_full = cat((self.weight_xh_full[:, :i], new_tensor,
                                       self.weight_xh_full[:, i:]),
                                      axis=1)

        # Update input_keys (current key-set) analogously
        if k not in self.input_keys:
            lst = [
                i + 1 for i in range(len(self.input_keys))
                if self.input_keys[i] < k
            ]  # List of indices
            i = lst[-1] if lst else 0
            self.input_keys.insert(i, k)
예제 #12
0
def cgpstudy():
    """
    Basic example of connecting the building blocks of a cGP study.
    
    This implements the simulation pipeline used in Gjuvsland et al. (2007) and
    Gjuvsland et al. (2011)
    
    :TODO: save and read into R
    :TODO: in R create classic lineplots for 3-locus GP mapping
    :TODO: in R use noia package to partition variance
    """
    from numpy import concatenate as cat

    gt = np.array(genotypes)
    par = cat([gt2par(list(g), hetpar, absvar) for g in gt])
    ph = cat([par2ph(p) for p in par])
예제 #13
0
def generate_scmap(cfg, joint_id, coords, size):
    dist_thresh = cfg.pos_dist_thresh * cfg.global_scale
    num_joints = cfg.num_joints

    scmap = np.zeros(cat([size, arr([num_joints])]))

    dist_thresh_sq = dist_thresh ** 2

    width = size[1]
    height = size[0]

    for person_id in range(len(coords)):
        for k, j_id in enumerate(joint_id[person_id]):
            joint_pt = coords[person_id][k, :]
            j_x = np.asscalar(joint_pt[0])
            j_y = np.asscalar(joint_pt[1])
            
            # don't loop over entire heatmap, but just relevant locations
            min_x = int(round(max(j_x - dist_thresh - 1, 0)))
            max_x = int(round(min(j_x + dist_thresh + 1, width - 1)))
            min_y = int(round(max(j_y - dist_thresh - 1, 0)))
            max_y = int(round(min(j_y + dist_thresh + 1, height - 1)))

            for j in range(min_y, max_y + 1):  # range(height):
                pt_y = j
                for i in range(min_x, max_x + 1):  # range(width):
                    pt_x = i
                    dx = j_x - pt_x
                    dy = j_y - pt_y
                    dist = dx ** 2 + dy ** 2

                    if dist <= dist_thresh_sq:
                        scmap[j, i, j_id] = 1

    return scmap * 255
예제 #14
0
 def update(self, fore, back):
     template = self.template[:back.shape[0], :back.shape[1], :]
     if self.reset:
         id=torch.reshape(torch.arange(1,template.shape[0]*template.shape[1]+1), \
          (template.shape[0],template.shape[1]))
         self.slice.append(id)
         self.slice_volume = torch.stack(self.slice)
         self.max_id = torch.max(id)
         self.id_aligned = torch.cat([self.one.long(), id.view(-1)])
         self.id_len = torch.ones_like(self.id_aligned)
         self.reset = False
     else:
         flow_check, fore_warp = self.check(fore, back)
         id = torch.zeros_like(self.slice[-1])
         id[[fore_warp[:, :, 0],
             fore_warp[:, :, 1]]] = self.slice[-1] * flow_check.long()
         # self.id_len[id]+=1
         new = torch.where(id > 0, self.zero,
                           self.one).nonzero(as_tuple=True)
         id[new] = torch.arange(len(new[0])) + self.max_id + 1
         # self.cut(flow_check)
         # self.id_aligned=torch.cat([self.id_aligned,id[new]])
         # self.id_len=torch.cat([self.id_len,torch.ones_like(id[new])])
         self.max_id = torch.max(id)
         self.slice.append(id)
예제 #15
0
    def __init__(self, data, embedding, layers):
        self.data = data
        self.embedding = embedding
        self.layers = []
        curr_length = data.shape[0]
        #Generate latent variables
        for i, (downsampling, latent_size, window, model,
                opt) in enumerate(layers):
            curr_length = curr_length // downsampling
            if (i < len(layers) - 1):
                next_window = layers[i + 1][2]
                offset = max(window // downsampling, next_window)
                padding = max(window // downsampling, next_window)
                latent_length = offset + curr_length + padding
            else:
                offset = window // downsampling
                padding = window // downsampling
                latent_length = offset + curr_length + padding

            if i == 0:
                pad = np.zeros((window, ), dtype=np.int32)
                self.data = np.cat((pad, data, pad))
            latent_var = LatentVar((latent_length, latent_size), offset=window)
            self.layers.append((offset, curr_length, latent_var, downsampling,
                                window, model, opt))
예제 #16
0
    def batch_seq_pool(cls, seq_emb:Tensor, lengths=List[int]):
        """
        Concatenate the mean, max and last hidden representations of a batch of sequences.

        Parameters
        ----------
        seq_emb: Tensor
            Tensor of shape (bs, sequence length, dimension)
            This tensor reprsents the hidden states of final layer of the encoder from a language model.
        lengths: List
            list of integers indicating the sequence lengths

        Returns
        -------
        Tensor
            Tensor of size (bs, 2400)
        """
        assert seq_emb.shape[0] == len(lengths), 'Number of elements in lengths should match the first dimension of seq_emb'

        # ignore information beyond the sequence length, which is padding
        embs = [seq_emb[i, :x, :] for i, x in enumerate(lengths)]

        # calculate the pooled features ignoring the padding
        features = [cat([emb.mean(axis=0), emb.max(axis=0), emb[-1,:]], axis=-1) for emb in embs]
        combined_features = stack(features)

        # check that the dimensionality of the document embedding is 3x the dimensionality of the
        # final hidden states of the encoder.
        assert combined_features.shape[-1] == (seq_emb.shape[-1] * 3)

        return combined_features
예제 #17
0
 def DWResidual(self):
     if 'matResidual' in locals():
         matResidual.mesh = self.mesh
     else:
         matResidual = discretization(self.coeff, self.mesh,
                                      self.enrichOrder)
     matGroup = matResidual.matGroup()
     A, B, BonQ, C, D, E, F, G, H, L, R = matGroup
     LHS = np.bmat([[A, -B, C], [BonQ, D, E]])
     RHS = cat((R, F))
     residual = np.zeros(self.numEle)
     numEnrich = self.numBasisFuncs + self.enrichOrder
     adjointGradState, adjointStateFace = self.separateSoln(
         self.adjointSoln)
     for i in np.arange(self.numEle):
         primalResidual = (LHS.dot(self.primalSoln) - RHS).A1
         uLength = self.numEle * numEnrich
         stepLength = i * numEnrich
         uDWR = primalResidual[stepLength:stepLength + numEnrich].dot(
             (1 - adjointGradState)[stepLength:stepLength + numEnrich])
         qDWR = primalResidual[uLength + stepLength:uLength +
                               stepLength + numEnrich]\
             .dot((1 - adjointGradState)[uLength + stepLength:uLength +
                                         stepLength + numEnrich])
         residual[i] = uDWR + qDWR
     # sort residual index
     residualIndex = np.argsort(np.abs(residual))
     # select top \theta% elements with the largest error
     theta = 0.15
     refineIndex = residualIndex[int(self.numEle *
                                     (1 - theta)):len(residual)] + 1
     return np.abs(np.sum(residual)), refineIndex
예제 #18
0
    def solveAdjoint(self):
        """Solve the adjoint problem"""
        # solve in the enriched space
        _coeff = copy(self.coeff)
        _coeff.pOrder = _coeff.pOrder + 1
        if 'matAdjoint' in locals():
            matAdjoint.mesh = self.mesh
        else:
            matAdjoint = discretization(_coeff, self.mesh)
        matGroup = matAdjoint.matGroup()
        A, B, _, C, D, E, F, G, H, L, R = matGroup
        # add adjoint LHS conditions
        F = np.zeros(len(F))
        R[-1] = -boundaryCondition('adjoint')[1]
        # assemble global matrix LHS
        LHS = np.bmat([[A, -B, C], [B.T, D, E], [C.T, G, H]])
        sLHS = csr_matrix(LHS)
        RHS = cat((R, F, L))

        # solve in one shoot using GMRES
        def invRHS(vec):
            """Construct preconditioner"""
            matVec = spla.spsolve(sLHS, vec)
            return matVec

        n = len(RHS)
        preconditioner = spla.LinearOperator((n, n), invRHS)
        soln = spla.gmres(sLHS, RHS, M=preconditioner)[0]
        # soln = np.linalg.solve(LHS.T, RHS)
        self.adjointSoln = soln
def xy_to_cxcy(boxes):
    '''
    将(x_min, y_min, x_max, y_max)形式的anchor转换成(center_x, center_y, w, h)形式的
    :param boxes:  bounding boxes in boundary coordinates, a array of size (n_boxes, 4)
    :return: bounding boxes in center-size coordinates, a array of size (n_boxes, 4)
    '''
    return np.cat([(boxes[:,2:]+boxes[:,:2])/2,boxes[:,2:]-boxes[:,:2]],axis=1)
예제 #20
0
 def fit(self,train_x,train_y):
     self.train_y = train_y
     n,dim = train_x.shape
     self.w = np.zeros(dim+1)
     ones = np.ones(n,1)
     self.train_x = np.cat((train_x,ones),dim=-1)
     for i in range(self.max_iter):
         self._update_para()
예제 #21
0
    def compute_target_part_scoremap_numpy(
        self, joint_id, coords, data_item, size, scale
    ):
        dist_thresh = float(self.cfg.pos_dist_thresh * scale)
        dist_thresh_sq = dist_thresh ** 2
        num_joints = self.cfg.num_joints

        scmap = np.zeros(cat([size, arr([num_joints])]))
        locref_size = cat([size, arr([num_joints * 2])])
        locref_mask = np.zeros(locref_size)
        locref_map = np.zeros(locref_size)

        width = size[1]
        height = size[0]
        grid = np.mgrid[:height, :width].transpose((1, 2, 0))

        i=0
        for person_id in range(len(coords)):
            for k, j_id in enumerate(joint_id[person_id]):
                joint_pt = coords[person_id][k, :]
                j_x = np.asscalar(joint_pt[0])
                j_x_sm = round((j_x - self.half_stride) / self.stride)
                j_y = np.asscalar(joint_pt[1])
                j_y_sm = round((j_y - self.half_stride) / self.stride)
                min_x = round(max(j_x_sm - dist_thresh - 1, 0))
                max_x = round(min(j_x_sm + dist_thresh + 1, width - 1))
                min_y = round(max(j_y_sm - dist_thresh - 1, 0))
                max_y = round(min(j_y_sm + dist_thresh + 1, height - 1))
                x = grid.copy()[:, :, 1]
                y = grid.copy()[:, :, 0]
                dx = j_x - x * self.stride - self.half_stride
                dy = j_y - y * self.stride - self.half_stride
                dist = dx ** 2 + dy ** 2
                mask1 = dist <= dist_thresh_sq
                mask2 = (x >= min_x) & (x <= max_x)
                mask3 = (y >= min_y) & (y <= max_y)
                mask = mask1 & mask2 & mask3
                scmap[mask, j_id] = 1
                locref_mask[mask, j_id * 2 + 0] = 1
                locref_mask[mask, j_id * 2 + 1] = 1
                locref_map[mask, j_id * 2 + 0] = (dx * self.locref_scale)[mask]
                locref_map[mask, j_id * 2 + 1] = (dy * self.locref_scale)[mask]

        weights = scmap
        #weights = self.compute_scmap_weights(scmap.shape, joint_id, data_item)
        return scmap, weights, locref_map, locref_mask
예제 #22
0
    def solvePrimal(self):
        """Solve the primal problem"""
        if 'matLocal' in locals():
            # if matLocal exists,
            # only change the mesh instead of initializing again
            matLocal.mesh = self.mesh
        else:
            matLocal = discretization(self.coeff, self.mesh)
        matGroup = matLocal.matGroup()
        A, B, _, C, D, E, F, G, H, L, R = matGroup
        # solve by exploiting the local global separation
        K = -cat((C.T, G), axis=1)\
            .dot(np.linalg.inv(np.bmat([[A, -B], [B.T, D]]))
                 .dot(cat((C, E)))) + H
        sK = csr_matrix(K)
        F_hat = np.array([L]).T - cat((C.T, G), axis=1)\
            .dot(np.linalg.inv(np.bmat([[A, -B], [B.T, D]])))\
            .dot(np.array([cat((R, F))]).T)

        def invRHS(vec):
            """Construct preconditioner"""
            matVec = spla.spsolve(sK, vec)
            return matVec

        n = len(F_hat)
        preconditioner = spla.LinearOperator((n, n), invRHS)
        stateFace = spla.gmres(sK, F_hat, M=preconditioner)[0]
        # stateFace = np.linalg.solve(K, F_hat)
        gradState = np.linalg.inv(np.asarray(np.bmat(
            [[A, -B], [B.T,
                       D]]))).dot(cat((R, F)) - cat((C, E)).dot(stateFace))
        self.primalSoln = cat((gradState, stateFace))
예제 #23
0
파일: basic.py 프로젝트: argju/cgptoolbox
def cgpstudy():
    """
    Basic example of connecting the building blocks of a cGP study.
    
    This top-level orchestration can be done in many different ways, depending 
    on personal preference and on the need for features such as storage, 
    caching, memory management or parallelization. Some examples are given in 
    :mod:`cgp.examples.hpc`.
    """
    from numpy import concatenate as cat

    gt = np.array(genotypes)
    par = cat([monogenicpar(g, hetpar=par0, absvar=absvar) for g in gt])
    ph = cat([par2ph(p) for p in par])
    agg = cat([ph2agg(p) for p in ph])
    
    summarize(gt, agg)
    plt.show()
예제 #24
0
파일: symplectic.py 프로젝트: punkdit/qupy
 def __call__(self, other):
     assert isinstance(other, CSSCode)
     assert other.n * 2 == self.n
     Lx, Lz, Hx, Tz, Hz, Tx, Gx, Gz = (other.Lx, other.Lz, other.Hx,
                                       other.Tz, other.Hz, other.Tx,
                                       other.Gx, other.Gz)
     assert Gx is None
     assert Gz is None
     A = self.A.transpose()
     LxLz = dot2(cat((Lx, Lz), axis=1), A)
     HxTz = dot2(cat((Hx, Tz), axis=1), A)
     TxHz = dot2(cat((Tx, Hz), axis=1), A)
     n = self.n // 2
     Lx, Lz = LxLz[:, :n], LxLz[:, n:]
     Hx, Tz = HxTz[:, :n], HxTz[:, n:]
     Tx, Hz = TxHz[:, :n], TxHz[:, n:]
     code = CSSCode(Lx=Lx, Lz=Lz, Hx=Hx, Tz=Tz, Hz=Hz, Tx=Tx)
     return code
예제 #25
0
파일: glue.py 프로젝트: punkdit/qupy
def glue_logops():

    m = argv.get("m", 4)
    n = argv.get("n", m + m + 1)
    dist = argv.get("dist", 3)
    N = argv.get("N", 2)
    M = argv.get("M", N)
    p = argv.get("p", 0.03)
    weight = argv.weight

    codes = []
    code = None
    for i in range(N):
        Hx, Hz = make_quantum(n, m, dist, weight)
        #Hx, Hz = make_surface()
        print("Hx, Hz:")
        print(shortstrx(Hx, Hz))
        c = CSSCode(Hx=Hx, Hz=Hz)
        codes.append(c)
        code = c if code is None else code + c

    A, B = codes
    code = A + B

    print(code)
    print("Lx, Lz:")
    print(shortstrx(code.Lx, code.Lz))
    print("Hx, Hz:")
    print(shortstrx(code.Hx, code.Hz))
    print()

    #Hx = cat((code.Lx, code.Hx))
    Hx = code.Hx
    Hz = cat((code.Lz, code.Hz))

    idxs = list(range(2 * n))
    idxs.sort(key=lambda i: (-code.Lz[:, i].sum(), ))
    Hx = Hx[:, idxs]
    Hz = Hz[:, idxs]

    print(shortstrx(Hx, Hz))

    i0 = argv.get("i0")
    i1 = argv.get("i1")
    if i0 is None:
        return


#    code = code.glue(0, n)
#    print(code)
#    print(shortstrx(code.Hx, code.Hz))

    Hx, Hz = glue1_quantum(Hx, Hz, i0, i1)
    print("Hx, Hz:")
    print(shortstrx(Hx, Hz))
예제 #26
0
파일: contour.py 프로젝트: hnmspirit/mlcb
def fig3():
    x = np.linspace(-4, 4, 50)
    y = np.linspace(-4, 4, 50)
    x, y = np.meshgrid(x, y)
    z = maximum(2 * x**2 + y**2 - x * y, abs(x) + 2 * abs(y))

    levels = cat((arange(0, 4, 1), arange(4, 16, 2)))
    plt.contour(x, y, z, levels=levels, cmap='jet')

    txt = r'$max(2x^2 + y^2 - xy, |x| + 2|y|)$'
    return txt
예제 #27
0
파일: contour.py 프로젝트: hnmspirit/mlcb
def fig5():
    x = np.linspace(0.001, 2, 50)
    y = np.linspace(0.001, 2, 50)
    x, y = np.meshgrid(x, y)
    z = x * log(x) + y * log(y)

    levels = cat((arange(-1, 0.2, 0.1), arange(0.2, 1, 0.2)))
    plt.contour(x, y, z, levels=levels, cmap='jet')

    txt = r'$xlog(x) + ylog(y)$'
    return txt
예제 #28
0
파일: contour.py 프로젝트: hnmspirit/mlcb
def fig2():
    x = np.linspace(-1, 1, 50)
    y = np.linspace(-1, 1, 50)
    x, y = np.meshgrid(x, y)
    z = x**2 + y**2

    levels = cat((arange(0, 0.1, 0.05), arange(0.1, 1, 0.15)))
    plt.contour(x, y, z, levels=levels, cmap='jet')

    txt = r'$x^2 + y^2$'
    return txt
예제 #29
0
    def gaussian_scmap(self, joint_id, coords, data_item, size, scale):
        stride = self.cfg.stride
        #dist_thresh = float(self.cfg.pos_dist_thresh * scale)
        num_joints = self.cfg.num_joints
        half_stride = stride / 2
        scmap = np.zeros(cat([size, arr([num_joints])]))
        locref_size = cat([size, arr([num_joints * 2])])
        locref_mask = np.zeros(locref_size)
        locref_map = np.zeros(locref_size)

        width = size[1]
        height = size[0]
        dist_thresh = float((width + height) / 6)
        locref_scale = 1.0 / self.cfg.locref_stdev
        dist_thresh_sq = dist_thresh**2

        std = dist_thresh / 4
        # Grid of coordinates
        grid = np.mgrid[:height, :width].transpose((1, 2, 0))
        grid = grid * stride + half_stride
        for person_id in range(len(coords)):
            for k, j_id in enumerate(joint_id[person_id]):
                joint_pt = coords[person_id][k, :]
                j_x = np.asscalar(joint_pt[0])
                j_x_sm = round((j_x - half_stride) / stride)
                j_y = np.asscalar(joint_pt[1])
                j_y_sm = round((j_y - half_stride) / stride)
                map_j = grid.copy()
                # Distance between the joint point and each coordinate
                dist = np.linalg.norm(grid - (j_y, j_x), axis=2)**2
                scmap_j = np.exp(-dist / (2 * (std**2)))
                scmap[..., j_id] = scmap_j
                locref_mask[dist <= dist_thresh_sq, j_id * 2 + 0] = 1
                locref_mask[dist <= dist_thresh_sq, j_id * 2 + 1] = 1
                dx = j_x - grid.copy()[:, :, 1]
                dy = j_y - grid.copy()[:, :, 0]
                locref_map[..., j_id * 2 + 0] = dx * locref_scale
                locref_map[..., j_id * 2 + 1] = dy * locref_scale
        weights = self.compute_scmap_weights(scmap.shape, joint_id, data_item)
        return scmap, weights, locref_map, locref_mask
예제 #30
0
def preprocess(
    train_dir="data/in-hospital-mortality/train",
    test_dir="data/in-hospital-mortality/test",
    split=False,
):
    train_reader = InHospitalMortalityReader(
        dataset_dir=train_dir, listfile=f"{train_dir}/listfile.csv")
    test_reader = InHospitalMortalityReader(
        dataset_dir=test_dir, listfile=f"{test_dir}/listfile.csv")

    train_data = []
    test_data = []

    for i in range(train_reader.get_number_of_examples()):
        data = train_reader.read_example(i)
        index = np.array([[i] * data["X"].shape[0]]).T
        label = np.array([[data["y"]] * data["X"].shape[0]]).T
        tmp = np.concatenate((data["X"], label), axis=1)
        out = np.concatenate((index, tmp), axis=1)
        train_data.append(out)

    for j in range(test_reader.get_number_of_examples()):
        data = test_reader.read_example(j)
        index = np.array([[i + j] * data["X"].shape[0]]).T
        label = np.array([[data["y"]] * data["X"].shape[0]]).T
        tmp = np.concatenate((data["X"], label), axis=1)
        out = np.concatenate((index, tmp), axis=1)
        test_data.append(out)

    # Stack training data and testing data
    train_data = np.vstack(train_data)
    test_data = np.vstack(test_data)

    if split:
        # Create dataframe
        train_df = pd.DataFrame(train_data, index=None, columns=HEADERS)
        test_df = pd.DataFrame(test_data, index=None, columns=HEADERS)
        # Preprocess coma scales
        train_df = preprocess_coma_scales(train_df)
        test_df = preprocess_coma_scales(test_df)
        return train_df, test_df

    else:
        # Create dataframe
        all_data = np.cat(X)
        df = pd.DataFrame(all_data, index=None, columns=HEADERS)
        # Preprocess coma scales
        df = preprocess_coma_scales(df)
        return df
예제 #31
0
    def bbox_transform_inv(cls, bbox, deltas):
        bbox_xywh = cls.xxyy2xywh(bbox)

        pred_ctr_x = deltas[:, 0] * bbox_xywh[:, 2] + bbox_xywh[:, 0]
        pred_ctr_y = deltas[:, 1] * bbox_xywh[:, 3] + bbox_xywh[:, 1]
        pred_w = np.exp(deltas[:, 2]) * bbox_xywh[:, 2]
        pred_h = np.exp(deltas[:, 3]) * bbox_xywh[:, 3]

        pred_ctr_x = pred_ctr_x.view(-1, 1)
        pred_ctr_y = pred_ctr_y.view(-1, 1)
        pred_w = pred_w.view(-1, 1)
        pred_h = pred_h.view(-1, 1)

        pred_box = np.cat([pred_ctr_x, pred_ctr_y, pred_w, pred_h], dim=1)

        return cls.xywh2xxyy(pred_box)
예제 #32
0
 def step(self, agents):
     """
     :param agents:
     :return:
     """
     for device, agent in zip(self.devices, agents):
         stage_state = self.state()
         device_state, available_action = device.get_state()
         state = np.cat([stage_state, device_state])
         action = agent.choose_action(state=state,
                                      available_action=available_action)
         device.act(action)
         reward = device.get_reward()  # 可补充其他奖赏逻辑
         agent.learn(state=state,
                     available_action=available_action,
                     action=action,
                     reward=reward)
예제 #33
0
def get_density(mu, var, pi, N=50, X_range=(0, 5), Y_range=(0, 5)):
    """ Get the mesh to compute the density on. """
    X = np.linspace(*X_range, N)
    Y = np.linspace(*Y_range, N)
    X, Y = np.meshgrid(X, Y)
    
    # get the design matrix
    points = np.cat([X.reshape(-1, 1), Y.reshape(-1, 1)], axis=1)
    points = Variable(torch.from_numpy(points).float())
    
    # compute the densities under each mixture
    P = get_k_likelihoods(points, mu, var)

    # sum the densities to get mixture density
    Z = torch.sum(P, dim=0).data.numpy().reshape([N, N])
    
    return X, Y, Z
예제 #34
0
    def run(self):
        trajectory = self.trajectory_generator.trajectory
        observation = self.sensor.observe(trajectory)
        time = self.time

        estimation = np.zeros(shape=(4, len(time)))
        estimation[:, 0] = cat((observation[:, 0], np.zeros(2)), axis=0)
        self.filter.init(estimation[:, 0])

        for k in range(1, len(time)):
            y = observation[:, k]
            self.filter.update(y)

            estimation[:, k] = self.filter.x

        self.estimation = estimation
        self.observation = observation
예제 #35
0
def feature_extraction(Z, N):  # function Descriptors=feature_extraction(Z,N)
    #
    #% The features are the magnitude of the Zernike moments.
    #% Collect the moments into (2*l+1)-dimensional vectors
    #% and define the rotationally invariant 3D Zernike descriptors
    #% Fnl as norms of vectors Znl.
    #
    F = zeros((N + 1, N + 1)) + NaN  # F=zeros(N+1,N+1)+NaN;
    #% Calcuate the magnitude
    for n in xrange(N + 1):  # Warn: xrange                              #for n=0:N
        for l in xrange(n + 1):  # Warn: xrange                          #    for l=0:n
            if mod(n - l, 2) == 0:  #        if mod(n-l,2)==0
                aux_1 = Z[
                    n, l, 0 : l + 1
                ]  # Warn: 0-1                       #            aux_1=Z(n+1,l+1,1:l+1); <----- need to keep the trailing l+1 for slicing
                #            aux_1=aux_1(:)';
                if l > 0:  #            if l>0
                    #                % Taking the values for m<0
                    aux_2 = conj(
                        aux_1[1 : l + 1]
                    )  # Warn: 0-1             #                aux_2=conj(aux_1(2:(l+1))); <---- Trailing +1 again
                    for m in xrange(l):  #                for m=1:l
                        aux_2[m] = aux_2[m] * power(
                            -1, m + 1
                        )  # Warn: 0-1  #                    aux_2(m)=aux_2(m)*power(-1,m);
                        #                end
                        #                % Sorting the vector
                    aux_2 = flipud(aux_2)  #                 aux_2=rot90(aux_2,2);
                    aux_1 = cat((aux_2, aux_1), axis=1)  # Warn: 0-1 axis #                aux_1=cat(2,aux_2,aux_1);
                    #            end
                F[n, l] = norm(aux_1, ord=2)  # Warn: 0-1 axis            #            F(n+1,l+1)=norm(aux_1,2);
                #        end
                #    end
                # end
                #% Generate vector of Zernike descriptors
                #% Column vector that store the magnitude
                #% of the Zernike moments for n,l.

    F = transpose(F)  # Warn: Matlab col-major
    idx = F >= 0  # idx=find(F>=0);
    return F[idx]
예제 #36
0
    def compute_targets_and_weights(self, joint_id, coords, data_item, size, scale, batch):
        stride = self.cfg.stride
        dist_thresh = self.cfg.pos_dist_thresh * scale
        num_joints = self.cfg.num_joints
        half_stride = stride / 2
        scmap = np.zeros(cat([size, arr([num_joints])]))

        locref_shape = cat([size, arr([num_joints * 2])])
        locref_mask = np.zeros(locref_shape)
        locref_map = np.zeros(locref_shape)

        pairwise_shape = cat([size, arr([num_joints * (num_joints - 1) * 2])])
        pairwise_mask = np.zeros(pairwise_shape)
        pairwise_map = np.zeros(pairwise_shape)

        dist_thresh_sq = dist_thresh ** 2

        width = size[1]
        height = size[0]

        for person_id in range(len(coords)):
            for k, j_id in enumerate(joint_id[person_id]):
                joint_pt = coords[person_id][k, :]
                j_x = np.asscalar(joint_pt[0])
                j_y = np.asscalar(joint_pt[1])

                # don't loop over entire heatmap, but just relevant locations
                j_x_sm = round((j_x - half_stride) / stride)
                j_y_sm = round((j_y - half_stride) / stride)
                min_x = round(max(j_x_sm - dist_thresh - 1, 0))
                max_x = round(min(j_x_sm + dist_thresh + 1, width - 1))
                min_y = round(max(j_y_sm - dist_thresh - 1, 0))
                max_y = round(min(j_y_sm + dist_thresh + 1, height - 1))

                for j in range(min_y, max_y + 1):  # range(height):
                    pt_y = j * stride + half_stride
                    for i in range(min_x, max_x + 1):  # range(width):
                        # pt = arr([i*stride+half_stride, j*stride+half_stride])
                        # diff = joint_pt - pt
                        # The code above is too slow in python
                        pt_x = i * stride + half_stride
                        dx = j_x - pt_x
                        dy = j_y - pt_y
                        dist = dx ** 2 + dy ** 2
                        # print(la.norm(diff))

                        if dist <= dist_thresh_sq:
                            dist = dx ** 2 + dy ** 2
                            locref_scale = 1.0 / self.cfg.locref_stdev
                            current_normalized_dist = dist * locref_scale ** 2
                            prev_normalized_dist = locref_map[j, i, j_id * 2 + 0] ** 2 + \
                                                   locref_map[j, i, j_id * 2 + 1] ** 2
                            update_scores = (scmap[j, i, j_id] == 0) or prev_normalized_dist > current_normalized_dist
                            if self.cfg.location_refinement and update_scores:
                                self.set_locref(locref_map, locref_mask, locref_scale, i, j, j_id, dx, dy)
                            if self.cfg.pairwise_predict and update_scores:
                                for k_end, j_id_end in enumerate(joint_id[person_id]):
                                    if k != k_end:
                                        self.set_pairwise_map(pairwise_map, pairwise_mask, i, j, j_id, j_id_end,
                                                              coords, pt_x, pt_y, person_id, k_end)
                            scmap[j, i, j_id] = 1

        scmap_weights = self.compute_scmap_weights(scmap.shape, joint_id, data_item)

        # Update batch
        batch.update({
            Batch.part_score_targets: scmap,
            Batch.part_score_weights: scmap_weights
        })
        if self.cfg.location_refinement:
            batch.update({
                Batch.locref_targets: locref_map,
                Batch.locref_mask: locref_mask
            })
        if self.cfg.pairwise_predict:
            batch.update({
                Batch.pairwise_targets: pairwise_map,
                Batch.pairwise_mask: pairwise_mask
            })

        return batch