Пример #1
0
    def random_choose_abc(self):
        i = tf.range(self.NP)
        a = tf.random.uniform([self.NP],
                              maxval=self.NP - 1,
                              dtype=tf.dtypes.int32)
        b = tf.random.uniform([self.NP],
                              maxval=self.NP - 2,
                              dtype=tf.dtypes.int32)
        c = tf.random.uniform([self.NP],
                              maxval=self.NP - 3,
                              dtype=tf.dtypes.int32)

        a += tf.cast(a >= i, tf.dtypes.int32)

        ia = tf.sort([i, a], 0)

        for last in ia:
            b += tf.cast(b >= last, tf.dtypes.int32)

        iab = tf.sort([i, a, b], 0)

        for last in iab:
            c += tf.cast(c >= last, tf.dtypes.int32)

        return a, b, c
    def update_sparse_weights(self, policy_gradients, task_name):
        for i in policy_gradients:
            gradient = policy_gradients[i]
            new_param = self._default_policy_params[task_name][i] + (
                np.abs(gradient)**2)
            new_mask = np.cast(
                (self._default_policy_params[task_name][i] >= 0), np.int32)

            self._default_policy_grad[task_name][i] += gradient
            new_param, new_mask = self.reactivate_random(new_mask, new_param)
            self._default_policy_params[task_name][i] = new_param
            self._default_policy_masks[task_name][i] = new_mask

        for task in self._default_policy_masks:
            if task == task_name:
                continue  # skip

            other_policy_masks = self._default_policy_masks[task_name]
            other_policy_params = self._default_policy_params[task_name]
            other_policy_grad = self._default_policy_grad[task_name]
            for i in policy_gradients:
                gradient = policy_gradients[i]
                sign = 2 * np.cast(
                    (gradient * other_policy_grad[i]) > 0, np.int32) - 1
                new_param = other_policy_params[i] + sign * (np.abs(gradient)**
                                                             2)
                new_mask = np.cast((new_param >= 0), np.int32)

                other_policy_grad[i] += gradient
                new_param, new_mask = self.reactivate_random(
                    new_mask, new_param)
                other_policy_masks[i] = new_mask
                other_policy_params[i] = new_param
def warp_keypoints_to_map(packed_arg):
    """
    Warp a map of keypoints (pixel is 1 for a keypoint and 0 else) with
    the INVERSE of the homography H.
    The inverse is used to be coherent with tf.contrib.image.transform
    Arguments:
        packed_arg: a tuple equal to (keypoints_map, H)
    Returns: a map of keypoints of the same size as the original keypoint_map.
    """
    warped_keypoints = np.int32(warp_keypoints_to_list(packed_arg))
    n_keypoints = np.shape(warped_keypoints)[0]
    shape = np.shape(packed_arg[0])

    # Remove points outside the image
    zeros = np.cast(np.zeros([n_keypoints]), dtype=np.bool)
    ones = np.cast(np.ones([n_keypoints]), dtype=np.bool)
    loc = np.logical_and(
        np.where(warped_keypoints[:, 0] >= 0, ones, zeros),
        np.where(warped_keypoints[:, 0] < shape[0], ones, zeros))
    loc = np.logical_and(loc, np.where(warped_keypoints[:, 1] >= 0, ones,
                                       zeros))
    loc = np.logical_and(
        loc, np.where(warped_keypoints[:, 1] < shape[1], ones, zeros))

    warped_keypoints = warped_keypoints.tensor(loc)

    # Output the new map of keypoints
    # new_map = np.scatter_nd(warped_keypoints,
    #                         np.ones([np.shape(warped_keypoints)[0]], dtype=np.float32),
    #                         shape)
    new_map = None
    return new_map
Пример #4
0
    def _match(self, similarity_matrix, num_valid_rows=-1):
        """Bipartite matches a collection rows and columns. A greedy bi-partite.

    TODO(rathodv): Add num_valid_columns options to match only that many columns
    with all the rows.

    Args:
      similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
        where higher values mean more similar.
      num_valid_rows: A scalar or a 1-D tensor with one element describing the
        number of valid rows of similarity_matrix to consider for the bipartite
        matching. If set to be negative, then all rows from similarity_matrix
        are used.

    Returns:
      match_results: int32 tensor of shape [M] with match_results[i]=-1
        meaning that column i is not matched and otherwise that it is matched to
        row match_results[i].
    """
        # Convert similarity matrix to distance matrix as tf.image.bipartite tries
        # to find minimum distance matches.
        distance_matrix = -1 * similarity_matrix
        _, match_results = image_ops.bipartite_match(distance_matrix,
                                                     num_valid_rows)
        match_results = np.reshape(match_results, [-1])
        match_results = np.cast(match_results, np.int32)
        return match_results
Пример #5
0
def preprocess_image(image_path):
    image = Image.open(image_path)
    image = np.array(image)
    image = np.cast(image, np.float32)
    # image = image - mean_pixel
    image = np.expand_dims(image, axis=0)
    return image
def dense_image_warp(image, flow):
    # batch_size, height, width, channels = (array_ops.shape(image)[0],
    #                                        array_ops.shape(image)[1],
    #                                        array_ops.shape(image)[2],
    #                                        array_ops.shape(image)[3])
    batch_size, height, width, channels = (np.shape(image)[0],
                                           np.shape(image)[1],
                                           np.shape(image)[2],
                                           np.shape(image)[3])

    # The flow is defined on the image grid. Turn the flow into a list of query
    # points in the grid space.
    # grid_x, grid_y = array_ops.meshgrid(
    #     math_ops.range(width), math_ops.range(height))
    # stacked_grid = math_ops.cast(
    #     array_ops.stack([grid_y, grid_x], axis=2), flow.dtype)
    # batched_grid = array_ops.expand_dims(stacked_grid, axis=0)
    # query_points_on_grid = batched_grid - flow
    # query_points_flattened = array_ops.reshape(query_points_on_grid,
    #                                            [batch_size, height * width, 2])
    grid_x, grid_y = np.meshgrid(np.range(width), np.range(height))
    stacked_grid = np.cast(np.stack([grid_y, grid_x], axis=2), flow.dtype)
    batched_grid = np.expand_dims(stacked_grid, axis=0)
    query_points_on_grid = batched_grid - flow
    query_points_flattened = np.reshape(query_points_on_grid,
                                        [batch_size, height * width, 2])
    # Compute values at the query points, then reshape the result back to the
    # image grid.
    interpolated = interp2d(image, query_points_flattened)
    interpolated = np.reshape(interpolated,
                              [batch_size, height, width, channels])
    return interpolated
Пример #7
0
def test():
    dataset = tfds.load(name=dataset_name, split=tfds.Split.TRAIN)
    dataset = dataset.shuffle(1024).batch(1).prefetch(tf.data.experimental.AUTOTUNE)
    it = dataset.make_one_shot_iterator()
    next_op = it.get_next()

    saver = tf.train.import_meta_graph('models/' + dataset_name + '.meta')
    sess = tf.Session()
    saver.restore(sess, tf.train.latest_checkpoint('models'))

    inputs = sess.graph.get_tensor_by_name('input:0')
    outputs = sess.graph.get_tensor_by_name('pred_labels:0')
    total = 0
    correct = 0
    while True:
        try:
            features = sess.run(next_op)
            images, labels = features['image']/255., features['label']
            pred_labels = sess.run(outputs, feed_dict={inputs: images})
            total += labels.shape[0]
            correct += np.sum(np.cast(pred_labels == labels, np.float))

        except Exception:
            print('accuracy:', correct / total)
            break
def warp_points(points, homography):
    """
    Warp a list of points with the INVERSE of the given homography.
    The inverse is used to be coherent with tf.contrib.image.transform
    Arguments:
        points: list of N points, shape (N, 2).
        homography: batched or not (shapes (B, 8) and (8,) respectively).
    Returns: a Tensor of shape (N, 2) or (B, N, 2) (depending on whether the homography
            is batched) containing the new coordinates of the warped points.
    """
    H = np.expand_dims(homography, axis=0) if len(
        homography.shape) == 1 else homography

    # Get the points to the homogeneous format
    num_points = np.shape(points)[0]
    points = np.cast(points, np.float32)[:, ::-1]
    points = np.concatenate(
        [points, np.ones([num_points, 1]).astype(np.float32)], -1)

    # Apply the homography
    H_inv = np.transpose(flat2mat(invert_homography(H)))
    warped_points = np.tensordot(points, H_inv, [[1], [0]])
    warped_points = warped_points[:, :2, :] / warped_points[:, 2:, :]
    warped_points = np.transpose(warped_points, [2, 0, 1])[:, :, ::-1]

    return warped_points[0] if len(homography.shape) == 1 else warped_points
Пример #9
0
def adjust_dynamic_range(images, range_in, range_out, out_dtype):
    scale = (range_out[1] - range_out[0]) / (range_in[1] - range_in[0])
    bias = range_out[0] - range_in[0] * scale
    images = np.array(images).astype('float32')
    images = images * scale + bias

    images = np.clip(images, range_out[0], range_out[1])
    images = np.cast(images, dtype=out_dtype)
    return images
Пример #10
0
 def log_likelihood(self, xs, dist_info):
     probs = dist_info["probs"]
     # Assume layout is N * T * A
     a_dim = probs.shape[-1]
     probs = probs.reshape((-1, a_dim))
     xs = xs.reshape((-1, 1))
     xs_oh = np.zeros((xs.shape[0], self.dim))
     xs_oh[xs.astype(np.int32)] = 1.0
     xs = xs_oh
     flat_logli = np.log(np.sum(probs * np.cast(xs, 'float32'), axis=-1) + TINY)
     return flat_logli.reshape(dist_info_vars["probs"].shape[:2])
Пример #11
0
	def injectFaultCast(a, b = None):
		"Inject a fault into a Cast instruction"
		logging.debug("Calling Operator Cast " + getArgs(a, b))
		# If we're given 2 parameters, treat it as the default case
		if b != None:
			res = np.cast(a, b)
		else:
			# Call the function for this type with 'a'
			res = castInto(a)
		res = condPerturb(Ops.CAST, res)
		if logReturn: logging.debug("\tReturning " + str(res) )
		return res
Пример #12
0
    def _set_values_using_indicator(self, x, indicator, val):
        """Set the indicated fields of x to val.

    Args:
      x: tensor.
      indicator: boolean with same shape as x.
      val: scalar with value to set.

    Returns:
      modified tensor.
    """
        indicator = np.cast(indicator, x.dtype)
        return np.add(np.multiply(x, 1 - indicator), val * indicator)
Пример #13
0
def calculate_accuracy(output_predictions, labels):
    std_SAME_labels = []
    for i in range(len(labels)):
        std_SAME_labels.append([0, 1.])
    std_SAME_labels = np.array(std_SAME_labels)

    # SAME准确率
    SAME_labels = np.equal(np.argmax(std_SAME_labels, 1),
                           np.argmax(labels, 1))  # 返回label是1的地方 为True
    SAME_prediction = np.equal(np.argmax(std_SAME_labels, 1),
                               np.argmax(output_predictions,
                                         1))  # 返回output是1的地方为True
    accuracy_rate = np.sum(
        np.cast(np.logical_and(SAME_labels, SAME_prediction),
                np.float32)) / (1.0 * np.sum(np.cast(SAME_labels, np.float32)))

    # 召回率(错误的被认为正确)
    DIFF_labels = np.equal(np.argmin(std_SAME_labels, 1),
                           np.argmax(labels, 1))  # 返回label是0的地方 为True
    callback_rate = np.sum(
        np.cast(np.logical_and(DIFF_labels, SAME_prediction),
                np.float32)) / (1.0 * np.sum(np.cast(DIFF_labels, np.float32)))

    return accuracy_rate, callback_rate
Пример #14
0
        def _match_when_rows_are_non_empty():
            """Performs matching when the rows of similarity matrix are non empty.

      Returns:
        matches:  int32 tensor indicating the row each column matches to.
      """
            # Matches for each column
            matches = int(np.argmax(similarity_matrix, 0))

            # Deal with matched and unmatched threshold
            if self._matched_threshold is not None:
                # Get logical indices of ignored and unmatched columns as np.int64
                matched_vals = np.max(similarity_matrix, 0)
                below_unmatched_threshold = np.greater(
                    self._unmatched_threshold, matched_vals)
                between_thresholds = np.logical_and(
                    np.greater_equal(matched_vals, self._unmatched_threshold),
                    np.greater(self._matched_threshold, matched_vals))

                if self._negatives_lower_than_unmatched:
                    matches = self._set_values_using_indicator(
                        matches, below_unmatched_threshold, -1)
                    matches = self._set_values_using_indicator(
                        matches, between_thresholds, -2)
                else:
                    matches = self._set_values_using_indicator(
                        matches, below_unmatched_threshold, -2)
                    matches = self._set_values_using_indicator(
                        matches, between_thresholds, -1)

            if self._force_match_for_each_row:
                similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
                    similarity_matrix)
                force_match_column_ids = np.argmax(similarity_matrix, 1)

                force_match_column_indicators = np.one_hot(
                    force_match_column_ids, depth=similarity_matrix_shape[1])

                force_match_row_ids = np.argmax(force_match_column_indicators,
                                                0)

                force_match_column_mask = np.cast(
                    np.max(force_match_column_indicators, 0), np.bool)
                final_matches = np.where(force_match_column_mask,
                                         force_match_row_ids, matches)
                return final_matches
            else:
                return matches
Пример #15
0
def preprocessing(sample):

    image = sample['image']

    bbox = swap_xy(sample["object"]["bbox"])

    class_id = np.cast(sample["object"]["label"])

    image_flip = flip_horizontal(image, bbox)

    image, image_shape, _ = resize_and_pad(image)

    bbox = np.stack([
        bbox[:,0] * image_shape[1],
        bbox[:,1] * image_shape[0],
        bbox[:,2] * image_shape[1], 
        bbox[:,3] * image_shape[0]],axis = -1)

    bbox = convert_to_xywh(bbox)

    return bbox, image, class_id
Пример #16
0
def segmentation_overlay_summary(name,
                                 img,
                                 segmentation,
                                 alpha=0.5,
                                 gamma_factor=2.2,
                                 color=[1.0, 0.0, 0.0]):
    with tf.name_scope(name):
        minv = tf.reduce_min(img, axis=[1, 2, 3], keepdims=True)
        maxv = tf.reduce_max(img, axis=[1, 2, 3], keepdims=True)
        img = (img - minv) / (maxv - minv)
        img = tf.concat(3 * [img], axis=-1)
        color = np.cast(color, 'float32')
        color /= np.sum(color)
        color = np.reshape(color, [1, 1, 1, 3])
        color = tf.convert_to_tensor(color)

        img_rgb_pow = img**gamma_factor

        out_rgb_pow = color * alpha * segmentation + img_rgb_pow * (
            1. - alpha * segmentation)
        out_rgb = out_rgb_pow**(1. / gamma_factor)
        tf.summary.image(name, out_rgb)
Пример #17
0
 def __array__(self, dtype=None):
     if dtype:
         return np.cast(self.cropped, out=self.cropped, dtype=dtype)
     else:
         return self.cropped
Пример #18
0
def compute_mean_embedding(inputs):
    not_pad = tf.math.count_nonzero(inputs, axis=-1)
    n_words = tf.math.count_nonzero(not_pad, axis=-1, keepdims=True)
    sqrt_n_words = tf.math.sqrt(tf.cast(n_words, tf.float32))
    return tf.reduce_mean(inputs, axis=1) * sqrt_n_words
Пример #19
0
 def _reshape_and_cast(self, t):
     return np.cast(np.reshape(t, [-1]), np.int32)
Пример #20
0
 def random_choose_axis(self, X):
     random = tf.random.uniform(X.shape)
     random2 = tf.random.uniform(X.shape)
     maximum = tf.reduce_max(random, axis=1, keepdims=True)
     return tf.cast(tf.logical_or(random == maximum, random2 < self.CR),
                    tf.dtypes.float32)
Пример #21
0
def cast32(x):
    return np.cast(x, 'float32')
Пример #22
0
def _fit_ridge_alpha(trn_fs,trn_data,val_fs,val_data,alphas=DEFAULT_ALPHAS,
    chunk_sz=5000,is_efficient=True,dtype=np.single, is_verbose=False, pthr=0.005,
    square_alpha=False,return_resids=False):
    """Get prediction correlations for a set of alphas on val_data, without ever computing weights on trn_fs

    Uses ridge regression to find a linear transformation of `trn_fs` that approximates `trn_data`.
    Then tests by comparing the transformation of `val_fs` to `val_data`. This procedure is repeated
    for each regularization parameter (alpha) in `alphas`. The correlation between each prediction and
    each response for each alpha is returned. Note that the regression weights are NOT returned.
    
    This is more efficient than full ridge regression (with weight computation); it is meant to be 
    used inside other ridge functions (after data has been split into bootstrap / cross-validation 
    splits) to find optimal alpha values. 
    

    Parameters
    ----------
    trn_fs : array_like, shape (TR, N)
        Training stimuli with TR time points and N features. Each feature should be Z-scored across time.
    trn_data : array_like, shape (TR, M)
        Training responses with TR time points and M responses (voxels, neurons, what-have-you).
        Each response should be Z-scored across time.
    val_fs : array_like, shape (TP, N)
        Test stimuli with TP time points and N features. Each feature should be Z-scored across time.
    val_data : array_like, shape (TP, M)
        Test responses with TP time points and M responses.
    alphas : list or array_like, shape (A,)
        Ridge parameters to be tested. Should probably be log-spaced. np.logspace(0, 3, 20) works well.
    normalpha : boolean
        Whether ridge parameters should be normalized by the Frobenius norm of trn_fs. Good for rigorously
        comparing models with different numbers of parameters.
    dtype : np.dtype
        All data will be cast as this dtype for computation. np.single is used by default for memory
        efficiency.
    singcutoff : float [WIP: not implemented yet]
        The first step in ridge regression is computing the singular value decomposition (SVD) of the
        stimulus trn_fs. If trn_fs is not full rank, some singular values will be approximately equal
        to zero and the corresponding singular vectors will be noise. These singular values/vectors
        should be removed both for speed (the fewer multiplications the better!) and accuracy. Any
        singular values less than singcutoff will be removed.

    Returns
    -------
    trn_corrs : array_like, shape (A, M)
        The correlation between each predicted response and each column of val_data for each alpha.
    
    """    
    n_tps,n_voxels = trn_data.shape
    n_chunks = np.ceil(n_voxels/np.float(chunk_sz)).astype(np.int32)
    cc = np.zeros((n_voxels,len(alphas)),dtype=dtype)
    if return_resids:
        resids = np.zeros((n_tps,n_voxels,len(alphas)),dtype=dtype)
    pred_A = []
    if is_efficient:
        # Efficient Ridge regression from A. Huth, Part (1):
        # Full multiplication for validation (here, random split of
        # training data) prediction is: 
        # pred = (Xval*Vx) * Dx * (pinv(Ux)*Ychunk)   # NOTE: pinv(Ux) = Ux'
        # We will pre-compute the first and third terms in parentheses:
        # pred =   XvalVx  * Dx *  UxYchunk
        if is_verbose: 
            print('->Doing SVD of stimulus design matrix')
            t0 = time.time()
            #time.sleep(.01); # To ensure printing?
        m,n = trn_fs.shape
        if m>n:
            Ux,Sx,Vx = _utils._svd(trn_fs,full_matrices=False)
        else:
            Vx,Sx,Ux = _utils._svd(trn_fs.T,full_matrices=False)
            # Switcheroo of Vx and Ux due to transpose of input matrix
            Ux = Ux.T
            Vx = Vx.T

        if is_verbose:
            t1 = time.time()
            print('->Done with SVD in %0.2f sec'%(t0-t1))
        # For more efficient computation:
        #k = len(Sx) 
        ## OR: 
        ## singcutoff = (XX);
        ## k = sum(sx > singcutoff);
        ## sx = sx(1:k);
        XvalVx = val_fs.dot(Vx.T) # NOTE: IN MATLAB, No Vx', because Matlab leaves V in transposed form!
    else:
        raise NotImplementedError("Sorry, not done yet!")

    for iChunk in range(n_chunks):
        print('Running chunk %d of %d...\n'%(iChunk+1,n_chunks))
        ChIdx = np.arange(chunk_sz) + chunk_sz*iChunk
        ChIdx = ChIdx[ChIdx<n_voxels] # clip extra voxels in last run.
        Ychunk = trn_data[:,ChIdx]

        # Fit model with all lambdas (for subset of voxels)
        if not is_efficient:
            raise Exception('LAME! no slow reliable ridge implemented.')
            #[Wt L] = ridgemulti(X,Ychunk,params.lambdas);
        else:
            # Efficient Ridge regression from A. Huth, part (2)
            # NOTE: weights are never explicitly computed!
            UxYchunk = Ux.T.dot(Ychunk)
        
        if is_verbose:
            print('Checking model predictions...')
        for iA,A in enumerate(alphas):
            if not is_efficient:
                pred = np.cast(np.single)[Xval.dot(Wt[:,:,iA])]
            else:
                # Efficient Ridge regression from A. Huth, part (3)
                # Normalize lambda by Frobenius norm for stim matrix
                aX = A # * norm(X,'fro'); # ... or not
                # Need to decide for final whether aX**2 or not
                if square_alpha:
                    Dx = Sx/(Sx**2 + aX**2) 
                else:
                    Dx = Sx/(Sx**2 + aX) 
                # Compute predicitons (XvalVx and UxYchunk computed above)
                # (mult diag is slightly faster than matrix multiplication in timing tests)
                pred = _utils.mult_diag(Dx, XvalVx, left=False).dot(UxYchunk) 
            # Compute prediction accuracy (correlations)
            cc[ChIdx,iA]=_sutils.column_corr(pred,val_data[:,ChIdx])
            if return_resids:
                resids[:,ChIdx,iA] = val_data[:,ChIdx]-pred
    if return_resids:
        return cc,resids
    else:
        return cc
Пример #23
0
def _fit_ridge_alpha(trn_fs,
                     trn_data,
                     val_fs,
                     val_data,
                     alphas=DEFAULT_ALPHAS,
                     chunk_sz=5000,
                     is_efficient=True,
                     dtype=np.single,
                     is_verbose=False,
                     pthr=0.005,
                     square_alpha=False,
                     return_resids=False):
    """Get prediction correlations for a set of alphas on val_data, without ever computing weights on trn_fs

    Uses ridge regression to find a linear transformation of `trn_fs` that approximates `trn_data`.
    Then tests by comparing the transformation of `val_fs` to `val_data`. This procedure is repeated
    for each regularization parameter (alpha) in `alphas`. The correlation between each prediction and
    each response for each alpha is returned. Note that the regression weights are NOT returned.
    
    This is more efficient than full ridge regression (with weight computation); it is meant to be 
    used inside other ridge functions (after data has been split into bootstrap / cross-validation 
    splits) to find optimal alpha values. 
    

    Parameters
    ----------
    trn_fs : array_like, shape (TR, N)
        Training stimuli with TR time points and N features. Each feature should be Z-scored across time.
    trn_data : array_like, shape (TR, M)
        Training responses with TR time points and M responses (voxels, neurons, what-have-you).
        Each response should be Z-scored across time.
    val_fs : array_like, shape (TP, N)
        Test stimuli with TP time points and N features. Each feature should be Z-scored across time.
    val_data : array_like, shape (TP, M)
        Test responses with TP time points and M responses.
    alphas : list or array_like, shape (A,)
        Ridge parameters to be tested. Should probably be log-spaced. np.logspace(0, 3, 20) works well.
    normalpha : boolean
        Whether ridge parameters should be normalized by the Frobenius norm of trn_fs. Good for rigorously
        comparing models with different numbers of parameters.
    dtype : np.dtype
        All data will be cast as this dtype for computation. np.single is used by default for memory
        efficiency.
    singcutoff : float [WIP: not implemented yet]
        The first step in ridge regression is computing the singular value decomposition (SVD) of the
        stimulus trn_fs. If trn_fs is not full rank, some singular values will be approximately equal
        to zero and the corresponding singular vectors will be noise. These singular values/vectors
        should be removed both for speed (the fewer multiplications the better!) and accuracy. Any
        singular values less than singcutoff will be removed.

    Returns
    -------
    trn_corrs : array_like, shape (A, M)
        The correlation between each predicted response and each column of val_data for each alpha.
    
    """
    n_tps, n_voxels = trn_data.shape
    n_chunks = np.ceil(n_voxels / np.float(chunk_sz)).astype(np.int32)
    cc = np.zeros((n_voxels, len(alphas)), dtype=dtype)
    if return_resids:
        resids = np.zeros((n_tps, n_voxels, len(alphas)), dtype=dtype)
    pred_A = []
    if is_efficient:
        # Efficient Ridge regression from A. Huth, Part (1):
        # Full multiplication for validation (here, random split of
        # training data) prediction is:
        # pred = (Xval*Vx) * Dx * (pinv(Ux)*Ychunk)   # NOTE: pinv(Ux) = Ux'
        # We will pre-compute the first and third terms in parentheses:
        # pred =   XvalVx  * Dx *  UxYchunk
        if is_verbose:
            print('->Doing SVD of stimulus design matrix')
            t0 = time.time()
            #time.sleep(.01); # To ensure printing?
        m, n = trn_fs.shape
        if m > n:
            Ux, Sx, Vx = _utils._svd(trn_fs, full_matrices=False)
        else:
            Vx, Sx, Ux = _utils._svd(trn_fs.T, full_matrices=False)
            # Switcheroo of Vx and Ux due to transpose of input matrix
            Ux = Ux.T
            Vx = Vx.T

        if is_verbose:
            t1 = time.time()
            print('->Done with SVD in %0.2f sec' % (t0 - t1))
        # For more efficient computation:
        #k = len(Sx)
        ## OR:
        ## singcutoff = (XX);
        ## k = sum(sx > singcutoff);
        ## sx = sx(1:k);
        XvalVx = val_fs.dot(
            Vx.T
        )  # NOTE: IN MATLAB, No Vx', because Matlab leaves V in transposed form!
    else:
        raise NotImplementedError("Sorry, not done yet!")

    for iChunk in range(n_chunks):
        print('Running chunk %d of %d...\n' % (iChunk + 1, n_chunks))
        ChIdx = np.arange(chunk_sz) + chunk_sz * iChunk
        ChIdx = ChIdx[ChIdx < n_voxels]  # clip extra voxels in last run.
        Ychunk = trn_data[:, ChIdx]

        # Fit model with all lambdas (for subset of voxels)
        if not is_efficient:
            raise Exception('LAME! no slow reliable ridge implemented.')
            #[Wt L] = ridgemulti(X,Ychunk,params.lambdas);
        else:
            # Efficient Ridge regression from A. Huth, part (2)
            # NOTE: weights are never explicitly computed!
            UxYchunk = Ux.T.dot(Ychunk)

        if is_verbose:
            print('Checking model predictions...')
        for iA, A in enumerate(alphas):
            if not is_efficient:
                pred = np.cast(np.single)[Xval.dot(Wt[:, :, iA])]
            else:
                # Efficient Ridge regression from A. Huth, part (3)
                # Normalize lambda by Frobenius norm for stim matrix
                aX = A  # * norm(X,'fro'); # ... or not
                # Need to decide for final whether aX**2 or not
                if square_alpha:
                    Dx = Sx / (Sx**2 + aX**2)
                else:
                    Dx = Sx / (Sx**2 + aX)
                # Compute predicitons (XvalVx and UxYchunk computed above)
                # (mult diag is slightly faster than matrix multiplication in timing tests)
                pred = _utils.mult_diag(Dx, XvalVx, left=False).dot(UxYchunk)
            # Compute prediction accuracy (correlations)
            cc[ChIdx, iA] = _sutils.column_corr(pred, val_data[:, ChIdx])
            if return_resids:
                resids[:, ChIdx, iA] = val_data[:, ChIdx] - pred
    if return_resids:
        return cc, resids
    else:
        return cc
Пример #24
0
 def error(self):
     mistakes = tf.not_equal(tf.argmax(self.targets, 1), tf.argmax(self.predictions, 1))
     mistakes = tf.reduce_mean(tf.cast(mistakes, tf.float32))
     return mistakes
Пример #25
0
    def _synchronous_act(self, actions):
        """
        Action map:
        0: up
        1: right
        2: down
        3: left

        Args:
            actions (Optional[int,Dict[str,int]]):
                For "udlr": An integer 0-3 that describes the next action.
                For "ftj": A dict with keys: "turn" (0 (turn left), 1 (no turn), 2 (turn right)), "forward"
                    (0 (backward), 1(stay), 2 (forward)) and "jump" (0/False (no jump) and 1/True (jump)).

            #set_discrete_pos (Optional[int]): An integer to set the current discrete position to before acting.

        Returns:
            tuple: State Space (Space), reward (float), is_terminal (bool), info (usually None).
        """
        # Process possible manual setter instruction.
        #if set_discrete_pos is not None:
        #    assert isinstance(set_discrete_pos, int) and 0 <= set_discrete_pos < self.state_space.flat_dim
        #    self.discrete_pos = set_discrete_pos

        # Forward, turn, jump container action.
        moves = None
        # Up, down, left, right actions.
        if self.action_type == "udlr":
            moves = actions
        else:
            actions = self._translate_action(actions)
            # Turn around (0 (left turn), 1 (no turn), 2 (right turn)).
            if "turn" in actions:
                self.orientations += (actions["turn"] - 1) * 90
                self.orientations %= 360  # re-normalize orientation

            # Forward (0=move back, 1=don't move, 2=move forward).
            if "forward" in actions:
                moves = []
                # Translate into classic grid world action (0=up, 1=right, 2=down, 3=left).
                # We are actually moving in some direction.
                for slot in actions["forward"].shape[0]:
                    forward = actions["forward"][slot]
                    if forward != 1:
                        if self.orientations[slot] == 0 and forward == 2 or self.orientations[slot] == 180 and \
                                forward == 0:
                            moves.append(0)  # up
                        elif self.orientations[slot] == 90 and forward == 2 or self.orientations[slot] == 270 and \
                                forward == 0:
                            moves.append(1)  # right
                        elif self.orientations[slot] == 180 and forward == 2 or self.orientations[slot] == 0 and \
                                forward == 0:
                            moves.append(2)  # down
                        else:
                            moves.append(3)  # left

        if moves is not None:
            moves = np.array(moves)
            # determine the next state based on the transition function
            next_positions, next_positions_probs = self._get_possible_next_positions(
                self.discrete_pos, moves)
            next_state_indices = np.array([
                np.random.choice(len(c), p=p)
                for c, p in zip(next_positions, next_positions_probs)
            ])
            # Update our pos.
            self.discrete_pos = next_positions[
                np.arange(len(next_state_indices)), next_state_indices]

        # Jump? -> Move two fields forward (over walls/fires/holes w/o any damage).
        if self.action_type == "ftj" and "jump" in actions:
            assert actions["jump"] == 0 or actions["jump"] == 1 or actions[
                "jump"] is True or actions["jump"] is False
            if actions["jump"]:  # 1 or True
                # Translate into "classic" grid world action (0=up, ..., 3=left) and execute that action twice.
                actions = np.cast(self.orientations / 90, np.int32)
                for i in range(2):
                    # Determine the next state based on the transition function.
                    next_positions, next_positions_probs = self._get_possible_next_positions(
                        self.discrete_pos, actions, in_air=(i == 1))
                    next_state_idx = np.random.choice(len(next_positions),
                                                      p=next_positions_probs)
                    # Update our pos.
                    self.discrete_pos = next_positions[next_state_idx][0]

        next_x, next_y = self._get_x_y(self.discrete_pos)

        # Determine reward and done flag.
        next_state_types = self.worlds[np.arange(len(next_x)), next_y, next_x]
        states, rewards, terminals = [], [], []
        for i, next_state_type in enumerate(next_state_types):
            if next_state_type == "H":
                terminals.append(True)
                rewards.append(-5 if self.reward_function == "sparse" else -10)
                states.append(self._single_reset(i))
            elif next_state_type == "F":
                terminals.append(False)
                rewards.append(-3 if self.reward_function == "sparse" else -10)
                states.append(self._refresh_state(i))
            elif next_state_type in [" ", "S"]:
                terminals.append(False)
                rewards.append(-0.1)
                states.append(self._refresh_state(i))
            elif next_state_type == "G":
                terminals.append(True)
                rewards.append(1 if self.reward_function == "sparse" else 50)
                states.append(self._single_reset(i))
            else:
                raise NotImplementedError

        return states, rewards, terminals
Пример #26
0
def linear(z):
    return np.cast(np.argsort(np.array(z)),np.float32)/float(len(z))
def compute_metrics_with_RandomForest(latents,
                                      factors,
                                      err_fn=nrmse,
                                      params={
                                          "n_estimators": 10,
                                          "max_depth": 8
                                      },
                                      cont_mask=None):
    """
    :param latents: (N, z_dim). They use E_q(z|x)[z]
    :param factors: (N, K)
    :param err_fn: Error function
    :param params: Parameters of LASSO
    :return:
    """

    assert len(latents.shape) == len(factors.shape) == 2, \
        "'latents' and 'factors' must be 2D arrays!"
    assert len(latents) == len(
        factors), "'latents' and 'factors' must have the same length!"

    num_factors = factors.shape[1]

    R = []
    train_errors = []

    if not cont_mask:
        cont_mask = [True] * num_factors
    else:
        assert len(cont_mask) == num_factors, "len(cont_mask)={}".format(
            len(cont_mask))

    print(
        "Training Random Forest regressor for {} factors!".format(num_factors))
    for k in tqdm(range(num_factors)):
        if cont_mask:
            print("Factor {} is continuous. Process it!".format(k))

            # (N, )
            factors_k = factors[:, k]
            model = RandomForestRegressor(**params)
            model.fit(latents, factors_k)

            # (N, )
            factors_k_pred = model.predict(latents)

            # Scalar
            train_errors.append(err_fn(factors_k_pred, factors_k))

            # Get the weight of the linear regressor, whose shape is (num_latents, 1)
            R.append(np.abs(model.feature_importances_[:, None]))
        else:
            print("Factor {} is not continuous. Do not process it!".format(k))

    # (num_latents, num_factors)
    R = np.concatenate(R, axis=1)
    assert R.shape[1] == np.sum(np.cast(cont_mask, dtype=np.int32)), \
        "R.shape={} while #cont={}".format(
            R.shape[1], np.sum(np.cast(cont_mask, dtype=np.int32)))

    # Disentanglement: (num_latents,)
    disentanglement_scores = entropic_scores(R.T)
    c_rel_importance = np.sum(R, axis=1) / np.sum(
        R)  # relative importance of each code variable
    assert 1 - 1e-4 < np.sum(c_rel_importance) < 1 + 1e-4, \
        "c_rel_importance: {}".format(c_rel_importance)
    disentanglement = np.sum(disentanglement_scores * c_rel_importance)

    # Completeness
    completeness_scores = entropic_scores(R)
    completeness = np.mean(completeness_scores)

    # Informativeness
    train_avg_error = np.mean(train_errors)

    results = {
        'importance_matrix': R,
        'disentanglement_scores': disentanglement_scores,
        'disentanglement': disentanglement,
        'completeness_scores': completeness_scores,
        'completeness': completeness,
        'train_errors': train_errors,
        'train_avg_error': train_avg_error,
    }

    return results
Пример #28
0
 def get_acc(self, label):
     return np.mean(
         np.cast(np.equal(np.argmax(label, 1), np.argmax(self.predict, 1)),
                 np.float))