コード例 #1
0
ファイル: test_rollimg.py プロジェクト: rfdougherty/nipy
def test_modify():
    shape = (3, 5, 7, 12)
    x = np.random.standard_normal(shape)
    affine = np.eye(5)
    affine[:3, :3] = np.random.standard_normal((3, 3))
    affine[:4, 4] = np.random.standard_normal((4,))
    im = Image(x, AT(CS('ijkq'), MNI4, affine))

    def nullmodify(d):
        pass

    def meanmodify(d):
        d[:] = d.mean()

    for i, o, n in zip('ijkq', MNI3.coord_names + ('q',), range(4)):
        for a in i, o, n:
            nullim = image_modify(im, nullmodify, a)
            meanim = image_modify(im, meanmodify, a)
            assert_array_equal(nullim.get_data(), im.get_data())
            assert_array_equal(xyz_affine(im), xyz_affine(nullim))
            assert_equal(nullim.axes, im.axes)
            # yield assert_equal, nullim, im
            assert_array_equal(xyz_affine(im), xyz_affine(meanim))
            assert_equal(meanim.axes, im.axes)
        # Make sure that meanmodify works as expected
        d = im.get_data()
        d = np.rollaxis(d, n)
        meand = meanim.get_data()
        meand = np.rollaxis(meand, n)
        for i in range(d.shape[0]):
            assert_almost_equal(meand[i], d[i].mean())
コード例 #2
0
ファイル: image.py プロジェクト: antonmbk/keras
def apply_transform(x,
                    transform_matrix,
                    channel_axis=0,
                    fill_mode='nearest',
                    cval=0.):
    """Apply the image transformation specified by a matrix.

    # Arguments
        x: 2D numpy array, single image.
        transform_matrix: Numpy array specifying the geometric transformation.
        channel_axis: Index of axis for channels in the input tensor.
        fill_mode: Points outside the boundaries of the input
            are filled according to the given mode
            (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
        cval: Value used for points outside the boundaries
            of the input if `mode='constant'`.

    # Returns
        The transformed version of the input.
    """
    x = np.rollaxis(x, channel_axis, 0)
    final_affine_matrix = transform_matrix[:2, :2]
    final_offset = transform_matrix[:2, 2]
    channel_images = [ndi.interpolation.affine_transform(
        x_channel,
        final_affine_matrix,
        final_offset,
        order=0,
        mode=fill_mode,
        cval=cval) for x_channel in x]
    x = np.stack(channel_images, axis=0)
    x = np.rollaxis(x, 0, channel_axis + 1)
    return x
コード例 #3
0
ファイル: image.py プロジェクト: ArchaeoPY/ArchaeoPY
def lum2png(comp,filename):
    L = 255
    A = np.zeros(np.shape(comp))
    A.fill(255)

    masked = np.ma.masked_array(comp,np.isnan(comp))
    masked2 = np.ma.masked_array(A, mask=np.isnan(comp), fill_value=0)
    masked2 = masked2.filled(0)
    #masked2 = np.flipud(masked2)

    np.array(masked)
    #comp = np.flipud(comp)
    Min = np.nanmin(comp)
    Max = np.nanmax(comp)
    print Min, Max
    Range = float(Max - Min)
    scale = 255.0/Range
    comp = comp - Min
    comp = comp*scale
    comp = comp + (255 - 2*comp)

    l  = comp

    new_comp = np.array([l,masked2])
    new_comp = np.rollaxis(new_comp,-1)
    new_comp = np.rollaxis(new_comp,-1)

    #print new_comp.shape
    im = Image.fromarray(np.uint8(new_comp), "LA" )
    
    png_info = im.info

    im.save(filename, quality=100, **png_info)
コード例 #4
0
ファイル: array.py プロジェクト: StevenLOL/sharedmem
    def call(self, args, axis=0, out=None, chunksize=1024 * 1024, **kwargs):
        """ axis is the axis to chop it off.
            if self.altreduce is set, the results will
            be reduced with altreduce and returned
            otherwise will be saved to out, then return out.
        """
        if self.altreduce is not None:
            ret = [None]
        else:
            if out is None :
                if self.outdtype is not None:
                    dtype = self.outdtype
                else:
                    try:
                        dtype = numpy.result_type(*[args[i] for i in self.ins] * 2)
                    except:
                        dtype = None
                out = sharedmem.empty(
                        numpy.broadcast(*[args[i] for i in self.ins] * 2).shape,
                        dtype=dtype)
        if axis != 0:
            for i in self.ins:
                args[i] = numpy.rollaxis(args[i], axis)
            out = numpy.rollaxis(out, axis)
        size = numpy.max([len(args[i]) for i in self.ins])
        with sharedmem.MapReduce() as pool:
            def work(i):
                sl = slice(i, i+chunksize)
                myargs = args[:]
                for j in self.ins:
                    try: 
                        tmp = myargs[j][sl]
                        a, b, c = sl.indices(len(args[j]))
                        myargs[j] = tmp
                    except Exception as e:
                        print tmp
                        print j, e
                        pass
                if b == a: return None
                rt = self.ufunc(*myargs, **kwargs)
                if self.altreduce is not None:
                    return rt
                else:
                    out[sl] = rt
            def reduce(rt):
                if self.altreduce is None:
                    return
                if ret[0] is None:
                    ret[0] = rt
                elif rt is not None:
                    ret[0] = self.altreduce(ret[0], rt)

            pool.map(work, range(0, size, chunksize), reduce=reduce)

        if self.altreduce is None:
            if axis != 0:
                out = numpy.rollaxis(out, 0, axis + 1)
            return out                
        else:
            return ret[0]
コード例 #5
0
ファイル: ripple.py プロジェクト: csb60/hyperspy
def write_raw(filename, signal, record_by):
    """Writes the raw file object

    Parameters:
    -----------
    filename : string
        the filename, either with the extension or without it
    record_by : string
     'vector' or 'image'

        """
    filename = os.path.splitext(filename)[0] + '.raw'
    dshape = signal.data.shape
    data = signal.data
    if len(dshape) == 3:
        if record_by == 'vector':
            np.rollaxis(
                data, signal.axes_manager._slicing_axes[0].index_in_array, 3
                        ).ravel().tofile(filename)
        elif record_by == 'image':
            data = np.rollaxis(
                data, signal.axes_manager._non_slicing_axes[0].index_in_array, 0
                        ).ravel().tofile(filename)
    elif len(dshape) == 2:
        if record_by == 'vector':
            np.rollaxis(
                data, signal.axes_manager._slicing_axes[0].index_in_array, 2
                        ).ravel().tofile(filename)
        elif record_by in ('image', 'dont-care'):
            data.ravel().tofile(filename)
    elif len(dshape) == 1:
        data.ravel().tofile(filename)
コード例 #6
0
ファイル: projectable.py プロジェクト: pytroll/satpy
    def resample(self, destination_area, **kwargs):
        """Resample the current projectable and return the resampled one.

        Args:
            destination_area: The destination onto which to project the data, either a full blown area definition or
            a string corresponding to the name of the area as defined in the area file.
            **kwargs: The extra parameters to pass to the resampling functions.

        Returns:
            A resampled projectable, with updated .info["area"] field
        """
        # avoid circular imports, this is just a convenience function anyway
        from satpy.resample import resample, get_area_def
        # call the projection stuff here
        source_area = self.info["area"]

        if isinstance(source_area, (str, six.text_type)):
            source_area = get_area_def(source_area)
        if isinstance(destination_area, (str, six.text_type)):
            destination_area = get_area_def(destination_area)

        if self.ndim == 3:
            data = np.rollaxis(self, 0, 3)
        else:
            data = self
        new_data = resample(source_area, data, destination_area, **kwargs)

        if new_data.ndim == 3:
            new_data = np.rollaxis(new_data, 2)

        # FIXME: is this necessary with the ndarray subclass ?
        res = Projectable(new_data, **self.info)
        res.info["area"] = destination_area
        return res
コード例 #7
0
ファイル: frame_align.py プロジェクト: losonczylab/sima
def shifted_corr(reference, image, displacement):
    """Calculate the correlation between the reference and the image shifted
    by the given displacement.

    Parameters
    ----------
    reference : np.ndarray
    image : np.ndarray
    displacement : np.ndarray

    Returns
    -------
    correlation : float

    """

    ref_cuts = np.maximum(0, displacement)
    ref = reference[ref_cuts[0]:, ref_cuts[1]:, ref_cuts[2]:]
    im_cuts = np.maximum(0, -displacement)
    im = image[im_cuts[0]:, im_cuts[1]:, im_cuts[2]:]
    s = np.minimum(im.shape, ref.shape)
    ref = ref[:s[0], :s[1], :s[2]]
    im = im[:s[0], :s[1], :s[2]]
    ref -= nanmean(ref.reshape(-1, ref.shape[-1]), axis=0)
    ref = np.nan_to_num(ref)
    im -= nanmean(im.reshape(-1, im.shape[-1]), axis=0)
    im = np.nan_to_num(im)
    assert np.all(np.isfinite(ref)) and np.all(np.isfinite(im))
    corr = nanmean(
        [old_div(np.sum(i * r), np.sqrt(np.sum(i * i) * np.sum(r * r))) for
         i, r in zip(np.rollaxis(im, -1), np.rollaxis(ref, -1))])
    return corr
コード例 #8
0
    def test_exceptions(self):
        # test axis must be in bounds
        for ndim in [1, 2, 3]:
            a = np.ones((1,)*ndim)
            np.concatenate((a, a), axis=0)  # OK
            assert_raises(IndexError, np.concatenate, (a, a), axis=ndim)
            assert_raises(IndexError, np.concatenate, (a, a), axis=-(ndim + 1))

        # Scalars cannot be concatenated
        assert_raises(ValueError, concatenate, (0,))
        assert_raises(ValueError, concatenate, (np.array(0),))

        # test shapes must match except for concatenation axis
        a = np.ones((1, 2, 3))
        b = np.ones((2, 2, 3))
        axis = list(range(3))
        for i in range(3):
            np.concatenate((a, b), axis=axis[0])  # OK
            assert_raises(ValueError, np.concatenate, (a, b), axis=axis[1])
            assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
            a = np.rollaxis(a, -1)
            b = np.rollaxis(b, -1)
            axis.append(axis.pop(0))

        # No arrays to concatenate raises ValueError
        assert_raises(ValueError, concatenate, ())
コード例 #9
0
ファイル: misc.py プロジェクト: cdla/nipype
 def _run_interface(self, runtime):
     img = nb.load(self.inputs.in_file[0])
     header = img.get_header().copy()
     vollist = [nb.load(filename) for filename in self.inputs.in_file]
     data = np.concatenate([vol.get_data().reshape(
         vol.get_shape()[:3] + (-1,)) for vol in vollist], axis=3)
     if data.dtype.kind == 'i':
         header.set_data_dtype(np.float32)
         data = data.astype(np.float32)
     if isdefined(self.inputs.regress_poly):
         timepoints = img.get_shape()[-1]
         X = np.ones((timepoints, 1))
         for i in range(self.inputs.regress_poly):
             X = np.hstack((X, legendre(
                 i + 1)(np.linspace(-1, 1, timepoints))[:, None]))
         betas = np.dot(np.linalg.pinv(X), np.rollaxis(data, 3, 2))
         datahat = np.rollaxis(np.dot(X[:, 1:],
                                      np.rollaxis(
                                          betas[1:, :, :, :], 0, 3)),
                               0, 4)
         data = data - datahat
         img = nb.Nifti1Image(data, img.get_affine(), header)
         nb.save(img, self._gen_output_file_name('detrended'))
     meanimg = np.mean(data, axis=3)
     stddevimg = np.std(data, axis=3)
     tsnr = meanimg / stddevimg
     img = nb.Nifti1Image(tsnr, img.get_affine(), header)
     nb.save(img, self._gen_output_file_name())
     img = nb.Nifti1Image(meanimg, img.get_affine(), header)
     nb.save(img, self._gen_output_file_name('mean'))
     img = nb.Nifti1Image(stddevimg, img.get_affine(), header)
     nb.save(img, self._gen_output_file_name('stddev'))
     return runtime
コード例 #10
0
ファイル: deepflow2.py プロジェクト: CansenJIANG/deepFlowTrk
def deepflow2( im1=None, im2=None, match=None, options=""):
    """
    flow = deepflow2.deepflow2(image1, image2, match=None, options='')
    Compute the flow between two images, eventually using given matches.
    Images must be HxWx3 numpy arrays (convert to float32).
    Match is an optional numpy array argument (None by default, ie no input match), where each row starts by x1 y1 x2 y2.
    Options is an optional string argument ('' by default), to set the options. Type deepflow2() to see the list of available options.
    The function returns the optical flow as a HxWx2 numpy array."""
#convert images
    if None in (im1,im2):
        usage_python()
        return
    assert im1.shape == im2.shape, "images must have the same shape"
    if im1.dtype != float32:
        im1 = im1.astype(float32)
    if im2.dtype != float32:
        im2 = im2.astype(float32)
    h, w, nchannels = im1.shape
    assert nchannels==3, "images must have 3 channels"
    stride = 4*((w+3)//4)
    im1 = pad( rollaxis(im1,2), ((0,0),(0,0),(0, stride-w)), 'constant')
    im2 = pad( rollaxis(im2,2), ((0,0),(0,0),(0, stride-w)), 'constant')
# allocate flow
    flowx = empty((h,stride), dtype=float32)
    flowy = empty((h,stride), dtype=float32)
# compute flow
    if match is not None:
        assert match.shape[1]>=4
        match = ascontiguousarray(match[:,:4], dtype=float32)
    deepflow2_numpy( w, flowx, flowy, im1, im2, match, options)
    return concatenate ( (flowx[:,:w,None], flowy[:,:w,None]), axis=2)
コード例 #11
0
ファイル: ckn.py プロジェクト: jiajunshen/MultipleDetection
    def extract(self, X, batch_size = 500):
        assert self._z is not None, "Must be trained before calling extract"
        X_size, X_w, X_h, X_channel = X.shape
        total_dimension = np.prod(self._part_shape)
        
        coded_result = np.zeros((X_size, X_w - self._part_shape[0] + 1, X_h - self._part_shape[1] + 1, self._num_features))
        
        X_input = tf.placeholder("float32", [None, total_dimension + 1], name = "X_input")
        fc_1 = tf.matmul(X_input, tf.transpose(self._z, [1, 0], name = "transpose"))
        
        code_function = tf.exp(fc_1)
        
        for i in range(X_size // batch_size):
            end = min((i + 1) * batch_size, X_size)
            start = i * batch_size
            X_select = X[start: end]
            code_x = np.ones((end-start, coded_result.shape[1], coded_result.shape[2], total_dimension + 1))
            norm_x = np.zeros((end-start, coded_result.shape[1], coded_result.shape[2]))
            for m in range(coded_result.shape[1]):
                for n in range(coded_result.shape[2]):
                    selected_patches = X_select[:,m:m+self._part_shape[0], n:n+self._part_shape[1], :].reshape(-1, total_dimension)
                    patches_norm = np.sqrt(np.sum(selected_patches ** 2, axis = 1))
                    patches_norm = np.clip(patches_norm, a_min = 0.00001, a_max = 10)
                    code_x[:, m, n, :total_dimension] = np.array([selected_patches[k] / patches_norm[k] for k in range(end-start)])
                    norm_x[:, m, n] = patches_norm
            code_x = code_x.reshape(-1, total_dimension + 1)
            feed_dict = {}
            feed_dict[X_input] = code_x
            tmp_coded_result = self._sess.run(code_function, feed_dict = feed_dict)
            reshape_result = tmp_coded_result.reshape(end-start, coded_result.shape[1], coded_result.shape[2], self._num_features)
            coded_result[start:end] = np.rollaxis(np.rollaxis(reshape_result, 3, 0) / norm_x, 0, 4)
            print norm_x

        return coded_result
コード例 #12
0
ファイル: test_decomposition.py プロジェクト: woozey/hyperspy
 def setup_method(self, method):
     # Create three signals with dimensions:
     # s1 : <BaseSignal, title: , dimensions: (4, 3, 2|2, 3)>
     # s2 : <BaseSignal, title: , dimensions: (2, 3|4, 3, 2)>
     # s12 : <BaseSignal, title: , dimensions: (2, 3|4, 3, 2)>
     # Where s12 data is transposed in respect to s2
     dc1 = np.random.random((2, 3, 4, 3, 2))
     dc2 = np.rollaxis(np.rollaxis(dc1, -1), -1)
     s1 = signals.BaseSignal(dc1.copy())
     s2 = signals.BaseSignal(dc2)
     s12 = signals.BaseSignal(dc1.copy())
     for i, axis in enumerate(s1.axes_manager._axes):
         if i < 3:
             axis.navigate = True
         else:
             axis.navigate = False
     for i, axis in enumerate(s2.axes_manager._axes):
         if i < 2:
             axis.navigate = True
         else:
             axis.navigate = False
     for i, axis in enumerate(s12.axes_manager._axes):
         if i < 3:
             axis.navigate = False
         else:
             axis.navigate = True
     self.s1 = s1
     self.s2 = s2
     self.s12 = s12
コード例 #13
0
ファイル: trainSimple.py プロジェクト: TAAdSM/evolveFlyNet
def loadData(folder):
	data = dp.DataProcessor()
	#plotData(data)
	nsp = data.normalizedSequencesPerCell()
	'''Select last 5 genes as target values'''
	oSamples = np.array(nsp[:,1:,3:7], dtype='float32')
	'''Bring array into shape (n_steps, n_samples, n_genes)'''
	oSamples = np.rollaxis(oSamples, 0, 2)
	'''Select first time point of last 5 genes as initial condition'''
	c0Samples = np.array(nsp[:,0,3:7], dtype='float32')
	
	stepsPerUnit = 1/dt
	numUnits = oSamples.shape[1]
	totalSteps = numUnits*stepsPerUnit
	
	'''Create input genes array'''
	interpGenes = []
	for g in xrange(3):
		interpGenes.append(interpolateSingleGene(nsp[:,:,g], totalSteps))
	
	iSamples = np.array(interpGenes, dtype='float32')
	
	'''Bring array into shape (n_steps, n_samples, n_genes)'''
	iSamples = np.rollaxis(iSamples, 0, 3).clip(min = 0)
	iSamples = np.rollaxis(iSamples, 0, 2)
	
	np.save(folder+"data/simpleInputSequence.npy", iSamples)
	np.save(folder+"data/simpleOutputSequence.npy", oSamples)
	np.save(folder+"data/simpleStartSequence.npy", c0Samples)
	
	return iSamples, oSamples, c0Samples
コード例 #14
0
ファイル: ein.py プロジェクト: wilywampa/vimconfig
def mtimesm(a, b, axisa=0, axisb=0, axisc=0,
            transposea=False, transposeb=False, transposec=False, **kwargs):
    """Matrix/matrix multiplication along specified axes of ndarrays."""
    if not hasattr(_np, 'einsum'):
        import nein
        return nein.mtimesm(a, b, axisa, axisb, axisc,
                            transposea, transposeb, transposec, **kwargs)
    a, b = _asarray(a, b)
    axisa, axisb = _normalize_indices(a, b, axisa, axisb)
    if a.shape[axisa + 1] != b.shape[axisb]:
        raise ValueError(_error(a, b, axisa, axisb))
    transposea = kwargs.get('ta', transposea)
    transposeb = kwargs.get('tb', transposeb)
    transposec = kwargs.get('tc', transposec)
    stra = '%s%s%s' % (_LS[:axisa], 'ji' if transposea else 'ij',
                       _LS[axisa:len(a.shape) - 2])
    strb = '%s%s%s' % (_LS[:axisb], 'kj' if transposeb else 'jk',
                       _LS[axisb:len(b.shape) - 2])
    series = ''.join(sorted(x for x in _LS if x in stra or x in strb))
    if axisc < 0:
        axisc += len(series) + 2
    strc = '%s%s%s' % (series[:axisc], 'ki' if transposec else 'ik',
                       series[axisc:])
    mask = None
    for x, ax in (a, axisa), (b, axisb):
        if isinstance(x, _np.ma.MaskedArray):
            if x.ndim > 2:
                x = _np.rollaxis(_np.rollaxis(a, ax), ax + 1, 1)
                for row in x:
                    mask = _collapse_mask(row, mask)
            elif x.mask.any():
                mask = True
            elif mask is None:
                mask = False
    return _ein(a, b, stra, strb, strc, mask=mask, mask_axes=(axisc, axisc + 1))
コード例 #15
0
def load_data(trainingData, trainingLabel,
              testingData, testingLabel,
              resize = False, size = 100, dataset = "IKEA_PAIR"):
    trainingData = os.environ[dataset] + trainingData
    trainingLabel = os.environ[dataset] + trainingLabel
    testingData = os.environ[dataset] + testingData
    testingLabel = os.environ[dataset] + testingLabel

    X_train = np.array(np.load(trainingData),
                       dtype = np.float32)
    Y_train = np.array(np.load(trainingLabel),
                       dtype = np.uint8)
    X_test = np.array(np.load(testingData),
                      dtype = np.float32)
    Y_test = np.array(np.load(testingLabel),
                      dtype = np.uint8)

    print("resizing....")
    if resize:
        X_train = np.array([misc.imresize(X_train[i],
                                          size = (size, size, 3)) /255.0
                            for i in range(X_train.shape[0])], dtype=np.float32)
        X_test = np.array([misc.imresize(X_test[i],
                                         size = (size, size, 3)) /255.0
                           for i in range(X_test.shape[0])], dtype=np.float32)
        np.save(trainingData + "_100.npy", X_train)
        np.save(testingData + "_100.npy", X_test)

    X_train = np.rollaxis(X_train, 3, 1)
    X_test = np.rollaxis(X_test, 3, 1)

    print("downresizing....")


    return X_train, Y_train, X_test, Y_test
コード例 #16
0
ファイル: helper.py プロジェクト: drufat/dec
def interweave(a, b, axis=-1):
    ''' Interweave two arrays.
    >>> interweave([0, 1, 2], [3, 4, 5])
    array([0, 3, 1, 4, 2, 5])
    >>> interweave([0, 1, 2], [3, 4])
    array([0, 3, 1, 4, 2])
    >>> interweave([[0,1],[2,3]],[[4,5],[6,7]])
    array([[0, 4, 1, 5],
           [2, 6, 3, 7]])
    >>> interweave([[0,1],[2,3]],[[4,5],[6,7]], axis=0)
    array([[0, 1],
           [4, 5],
           [2, 3],
           [6, 7]])
    '''
    a = np.asarray(a)
    b = np.asarray(b)

    a = np.rollaxis(a, axis, len(a.shape))
    b = np.rollaxis(b, axis, len(b.shape))

    shape = np.array(a.shape)
    shape[-1] = a.shape[-1] + b.shape[-1]

    c = np.empty(shape, dtype=b.dtype).reshape(-1)
    c[0::2] = a.reshape(-1)
    c[1::2] = b.reshape(-1)

    c = c.reshape(shape)
    c = np.rollaxis(c, len(c.shape) - 1, axis)

    return c
コード例 #17
0
ファイル: ein.py プロジェクト: wilywampa/vimconfig
def cross(a, b, axisa=0, axisb=0, axisc=0):
    """Vector cross product along specified axes of ndarrays."""
    if not hasattr(_np, 'einsum'):
        import nein
        return nein.cross(a, b, axisa, axisb, axisc)
    a, b = _asarray(a, b)
    if (a.ndim != b.ndim and
            a.shape not in [(2,), (3,)] and
            b.shape not in [(2,), (3,)]):
        return _np.cross(a, b, axisa=axisa, axisb=axisb, axisc=axisc)
    axisa, axisb = _normalize_indices(a, b, axisa, axisb)
    n = a.shape[axisa]
    if n not in [2, 3]:
        raise NotImplementedError(
            "Only 2D and 3D cross products are implemented")
    if n != b.shape[axisb]:
        raise ValueError(_error(a, b, axisa, axisb))
    if n == 2:
        return _cross2d(a, b, axisa, axisb, axisc)
    strb = '%sj%s' % (_LS[:axisa], _LS[axisa:len(a.shape) - 1])
    strc = 'ik%s' % strb.replace('j', '')
    a = _ein(eijk, a, 'ijk', strb, strc)
    stra = strc
    strb = '%sk%s' % (_LS[:axisb], _LS[axisb:len(b.shape) - 1])
    series = ''.join(sorted(x for x in _LS if x in stra or x in strb))
    if axisc < 0:
        axisc += len(series) + 1
    strc = '%si%s' % (series[:axisc], series[axisc:])
    mask = _collapse_mask(_np.rollaxis(a, axisa),
                          _collapse_mask(_np.rollaxis(b, axisb)))
    return _ein(a, b, stra, strb, strc, mask=mask, mask_axes=(axisc,))
コード例 #18
0
ファイル: deepmatching.py プロジェクト: BoAdBo/AlphaPose
def deepmatching( im1=None, im2=None, options=""):
    """
    matches = deepmatching.deepmatching(image1, image2, options='')
    Compute the 'DeepMatching' between two images.
    Images must be HxWx3 numpy arrays (converted to float32).
    Options is an optional string argument ('' by default), to set the options.
    The function returns a numpy array with 6 columns, each row being x1 y1 x2 y2 score index.
     (index refers to the local maximum from which the match was retrieved)
    Version 1.2"""
    if None in (im1,im2):
      usage_python()
      return

# convert images
    if im1.dtype != float32:
        im1 = im1.astype(float32)
    if im2.dtype != float32:
        im2 = im2.astype(float32)
    assert len(im1.shape)==3 and len(im2.shape)==3, "images must have 3 dimensions"
    h, w, nchannels = im1.shape
    assert nchannels==3, "images must have 3 channels"
    im1 = ascontiguousarray(rollaxis(im1,2))
    im2 = ascontiguousarray(rollaxis(im2,2))
    corres = deepmatching_numpy( im1, im2, options)
    return corres
コード例 #19
0
ファイル: mpinew.py プロジェクト: uqngibbo/cfd
def V(U):
    """ Spatial Viscous Fluxes at cell centres """
    rho, momx, momy, E = rollaxis(U, U.ndim-1) # shape to (neq,nx,ny)
    rho = rho.copy() # GASMODEL needs contiguous arrays
    u = momx/rho 
    v = momy/rho
    e = E/rho - 0.5*(u**2+v**2) # Gas internal energy (J/kg)
    T,Xi = GASMODEL.decode_conserved(rho, e)

    mu = 8.6412e-6*(T/288.16)**1.5*(398.16)/(T+110) # Sutherland viscosity law
    k = mu*14320.0/0.70 # This thing is important and hard to calculate

    dudx,dudy = gradient(u,dx,dy)
    dvdx,dvdy = gradient(v,dx,dy)
    dTdx,dTdy = gradient(T,dx,dy)
    tauxx = 2.0/3.0*mu*(2*dudx-dvdy)
    tauxy = mu*(dudy + dvdx)
    qx = -k*dTdx

    nx,ny,neq = U.shape
    C = zeros((nx,ny,neq)) # Input is cells, output is faces
    Q = rollaxis(C,C.ndim-1) # View of C with nicer indexing
    Q[1] = -tauxx
    Q[2] = -tauxy
    Q[3] = -u*tauxx -v*tauxy + qx
    return C
コード例 #20
0
def cirs_to_icrs(cirs_coo, icrs_frame):
    srepr = cirs_coo.represent_as(UnitSphericalRepresentation)
    cirs_ra = srepr.lon.to(u.radian).value
    cirs_dec = srepr.lat.to(u.radian).value

    # set up the astrometry context for ICRS<->cirs and then convert to
    # astrometric coordinate direction
    astrom, eo = erfa.apci13(*get_jd12(cirs_coo.obstime, 'tdb'))
    i_ra, i_dec = aticq(cirs_ra, cirs_dec, astrom)

    if cirs_coo.data.get_name() == 'unitspherical' or cirs_coo.data.to_cartesian().x.unit == u.one:
        # if no distance, just use the coordinate direction to yield the
        # infinite-distance/no parallax answer
        newrep = UnitSphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),
                                             lon=u.Quantity(i_ra, u.radian, copy=False),
                                             copy=False)
    else:
        # When there is a distance, apply the parallax/offset to the SSB as the
        # last step - ensures round-tripping with the icrs_to_cirs transform

        # the distance in intermedrep is *not* a real distance as it does not
        # include the offset back to the SSB
        intermedrep = SphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),
                                              lon=u.Quantity(i_ra, u.radian, copy=False),
                                              distance=cirs_coo.distance,
                                              copy=False)

        newxyz = intermedrep.to_cartesian().xyz
        # roll xyz to last axis and add the barycentre position
        newxyz = np.rollaxis(newxyz, 0, newxyz.ndim) + astrom['eb'] * u.au
        # roll xyz back to the first axis
        newxyz = np.rollaxis(newxyz, -1, 0)
        newrep = CartesianRepresentation(newxyz).represent_as(SphericalRepresentation)

    return icrs_frame.realize_frame(newrep)
コード例 #21
0
def getCropBoundaries(labelNode):
    sitkLabelNode = su.PullFromSlicer(labelNode.GetName())
    labelArray = sitk.GetArrayFromImage(sitkLabelNode)
    
    zmin = 0
    zmax = labelArray.shape[0]
    zmin = minfinder(labelArray)
    zmax = maxfinder(labelArray)

    Xmat = np.rollaxis(labelArray,2)
    xmin = 0
    xmax = Xmat.shape[0]
    xmin = minfinder( Xmat )
    xmax = maxfinder( Xmat )
    
    Ymat = np.rollaxis(labelArray,1)
    ymin = 0
    ymax = Ymat.shape[0]
    ymin = minfinder( Ymat)
    ymax = maxfinder( Ymat )
    
    cube = (200.00,200.00,200.00) # lung
    # use (100.00,100.00,100.00) pad cube for brain tumors
    
    dims = tuple(map( lambda x: x-1, labelNode.GetImageData().GetDimensions() ))
    spacing = labelNode.GetSpacing()
    
    minCoordinates = (xmin, ymin, zmin)
    maxCoordinates = (xmax, ymax, zmax)

    minCoordinates, maxCoordinates = padXYZ(dims, spacing, minCoordinates, maxCoordinates, cube=cube)
    lbound = minCoordinates
    hbound = tuple(map(lambda (x,y): x-y, zip(dims,maxCoordinates)))

    return lbound, hbound
コード例 #22
0
ファイル: svhn.py プロジェクト: victor-estrade/DANN
def load_svhn_src(roll=True, batchsize=600):
    """
    TODO : read again and rework it !
    """
    data = io.loadmat(os.path.join(data_dir,'train_32x32.mat'))
    X = data['X']
    y = data['y']
    X = np.rollaxis(X, 3)
    if roll:
        X = np.rollaxis(X, 3, 1)

    s1 = 50000
    s2 = 60000
    X_train, X_val, X_test = X[:s1], X[s1:s2], X[s2:]
    y_train, y_val, y_test = y[:s1], y[s1:s2], y[s2:]

    data = {
            'X_train': X_train,
            'y_train': y_train,
            'X_val': X_val,
            'y_val': y_val,
            'X_test': X_test,
            'y_test': y_test,
            'batchsize': batchsize,
            }
    return data
コード例 #23
0
ファイル: display.py プロジェクト: soft-matter/pims
def _to_rgb_uint8(image, autoscale):
    if autoscale is None:
        autoscale = image.dtype != np.uint8

    if autoscale:
        image = (normalize(image) * 255).astype(np.uint8)
    elif image.dtype != np.uint8:
        if np.issubdtype(image.dtype, np.integer):
            max_value = np.iinfo(image.dtype).max
            # sometimes 12-bit images are stored as unsigned 16-bit
            if max_value == 2**16 - 1 and image.max() < 2**12:
                max_value = 2**12 - 1
            image = (image / max_value * 255).astype(np.uint8)
        else:
            image = (image * 255).astype(np.uint8)

    ndim = image.ndim
    shape = image.shape
    if ndim == 3 and shape.count(3) == 1:
        # This is a color image. Ensure that the color axis is axis 2.
        color_axis = shape.index(3)
        image = np.rollaxis(image, color_axis, 3)
    elif image.ndim == 3 and shape.count(4) == 1:
        # This is an RGBA image. Ensure that the color axis is axis 2, and 
        # drop the A values.
        color_axis = shape.index(4)
        image = np.rollaxis(image, color_axis, 3)[:, :, :3]
    elif ndim == 2:
        # Expand into color to satisfy moviepy's expectation
        image = np.repeat(image[:, :, np.newaxis], 3, axis=2)
    else:
        raise ValueError("Images have the wrong shape.")

    return np.asarray(image)
コード例 #24
0
ファイル: utils.py プロジェクト: khairy/scikits.fitting
 def vec_func(p, x):
     # Move last dimension to front (becomes sequence of column arrays)
     column_seq_x = np.rollaxis(np.asarray(x), -1)
     # Get corresponding sequence of output column arrays
     column_seq_y = np.array([func(p, xx) for xx in column_seq_x])
     # Move column dimension back to the end
     return np.rollaxis(column_seq_y, 0, len(column_seq_y.shape))
コード例 #25
0
ファイル: helper.py プロジェクト: adlyons/AWOT
def time_subset_awot_dict(time, data, start_time, end_time, time_axis=0):
    '''
    Get the variable from the fields dictionary.
    Subset the time when in time series format.

    Parameters
     ----------
    time : dict
        AWOT time dictionary
    data : dict
        AWOT data dictionary.
    start_time : str
        UTC time to use as start time for subsetting in datetime format.
        (e.g. 2014-08-20 12:30:00)
    end_time : str
        UTC time to use as an end time for subsetting in datetime format.
        (e.g. 2014-08-20 16:30:00)
    '''
    # Check to see if time is subsetted
    dt_start = _get_start_datetime(time, start_time)
    dt_end = _get_end_datetime(time, end_time)
    datasub = data.copy()
    if time_axis > 0:
        np.rollaxis(datasub['data'], time_axis)
    datasub['data'] = data['data'][(time['data'] >= dt_start) &
                                   (time['data'] <= dt_end), ...]
    return datasub
コード例 #26
0
ファイル: delta_e.py プロジェクト: TheArindham/scikit-image
def get_dH2(lab1, lab2):
    """squared hue difference term occurring in deltaE_cmc and deltaE_ciede94

    Despite its name, "dH" is not a simple difference of hue values.  We avoid
    working directly with the hue value, since differencing angles is
    troublesome.  The hue term is usually written as:
        c1 = sqrt(a1**2 + b1**2)
        c2 = sqrt(a2**2 + b2**2)
        term = (a1-a2)**2 + (b1-b2)**2 - (c1-c2)**2
        dH = sqrt(term)

    However, this has poor roundoff properties when a or b is dominant.
    Instead, ab is a vector with elements a and b.  The same dH term can be
    re-written as:
        |ab1-ab2|**2 - (|ab1| - |ab2|)**2
    and then simplified to:
        2*|ab1|*|ab2| - 2*dot(ab1, ab2)
    """
    lab1 = np.asarray(lab1)
    lab2 = np.asarray(lab2)
    a1, b1 = np.rollaxis(lab1, -1)[1:3]
    a2, b2 = np.rollaxis(lab2, -1)[1:3]

    # magnitude of (a, b) is the chroma
    C1 = np.hypot(a1, b1)
    C2 = np.hypot(a2, b2)

    term = (C1 * C2) - (a1 * a2 + b1 * b2)
    return 2 * term
コード例 #27
0
ファイル: bedges.py プロジェクト: kolchinski/amitgroup
def bspread(X, spread='box', radius=1, first_axis=False):
    """
    Spread binary edges.

    Parameters
    ----------
    X : ndarray  (3D or 4D)
        Binary edges to spread. Shape should be ``(rows, cols, A)`` or ``(N, rows, cols, A)``, where `A` is the number of edge features.
    first_axis: bool
         If True, the images will be assumed to be ``(A, rows, cols)`` or ``(N, A, rows, cols)``. 
    spread : 'box', 'orthogonal', None 
        If set to `'box'` and `radius` is set to 1, then an edge will appear if any of the 8 neighboring pixels detected an edge. This is equivalent to inflating the edges area with 1 pixel. The size of the box is dictated by `radius`. 
        If `'orthogonal'`, then the features will be extended by `radius` perpendicular to the direction of the edge feature (i.e. along the gradient).
    radius : int
        Controls the extent of the inflation, see above.
    """
    single = X.ndim == 3
    if single:
        X = X.reshape((1,) + X.shape) 
    if not first_axis:
        X = np.rollaxis(X, 3, start=1)

    Xnew = array_bspread(X, spread, radius)

    if not first_axis:
        Xnew = np.rollaxis(Xnew, 1, start=4)

    if single:
        Xnew = Xnew.reshape(Xnew.shape[1:]) 

    return Xnew
コード例 #28
0
def read_data_sets(data_dir, distortion=True, dtype=np.float32, training_num=18000):
    global NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
    NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = training_num

    train_image = np.array(np.load(os.path.join(data_dir, "real_background_10_class_train.npy")).reshape(-1, 32, 32, 3), dtype=dtype)
    train_image = np.rollaxis(train_image, 3, 1)
    train_labels = np.array(np.load(os.path.join(data_dir, "10_class_train_label.npy")),dtype=dtype)
    total_num_train = train_image.shape[0]
    random_index = np.arange(total_num_train)
    np.random.shuffle(random_index)
    train_image = train_image[random_index]
    train_labels = train_labels[random_index]
    
    train_image = train_image[:NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN]
    train_labels = train_labels[:NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN]
    print(train_image.shape, train_labels.shape)

    test_image = np.array(np.load(os.path.join(data_dir, "real_background_10_class_test.npy")).reshape(-1, 32, 32, 3), dtype=dtype)
    test_image = np.rollaxis(test_image, 3, 1)
    test_labels = np.array(np.load(os.path.join(data_dir, "10_class_test_label.npy")), dtype=dtype)

    print(test_image.shape, test_labels.shape)

    train = DataSet(train_image, train_labels, distortion=distortion)
    test = DataSet(test_image, test_labels, test=True)

    Datasets = collections.namedtuple('Datasets', ['train', 'test'])

    return Datasets(train = train, test = test)
コード例 #29
0
ファイル: delta_e.py プロジェクト: TheArindham/scikit-image
def deltaE_cie76(lab1, lab2):
    """Euclidean distance between two points in Lab color space

    Parameters
    ----------
    lab1 : array_like
        reference color (Lab colorspace)
    lab2 : array_like
        comparison color (Lab colorspace)

    Returns
    -------
    dE : array_like
        distance between colors `lab1` and `lab2`

    References
    ----------
    .. [1] https://en.wikipedia.org/wiki/Color_difference
    .. [2] A. R. Robertson, "The CIE 1976 color-difference formulae,"
           Color Res. Appl. 2, 7-11 (1977).
    """
    lab1 = np.asarray(lab1)
    lab2 = np.asarray(lab2)
    L1, a1, b1 = np.rollaxis(lab1, -1)[:3]
    L2, a2, b2 = np.rollaxis(lab2, -1)[:3]
    return np.sqrt((L2 - L1) ** 2 + (a2 - a1) ** 2 + (b2 - b1) ** 2)
コード例 #30
0
def icrs_to_cirs(icrs_coo, cirs_frame):
    # first set up the astrometry context for ICRS<->CIRS
    astrom, eo = erfa.apci13(*get_jd12(cirs_frame.obstime, 'tdb'))

    if icrs_coo.data.get_name() == 'unitspherical' or icrs_coo.data.to_cartesian().x.unit == u.one:
        # if no distance, just do the infinite-distance/no parallax calculation
        usrepr = icrs_coo.represent_as(UnitSphericalRepresentation)
        i_ra = usrepr.lon.to(u.radian).value
        i_dec = usrepr.lat.to(u.radian).value
        cirs_ra, cirs_dec = atciqz(i_ra, i_dec, astrom)

        newrep = UnitSphericalRepresentation(lat=u.Quantity(cirs_dec, u.radian, copy=False),
                                             lon=u.Quantity(cirs_ra, u.radian, copy=False),
                                             copy=False)
    else:
        # When there is a distance,  we first offset for parallax to get the
        # astrometric coordinate direction and *then* run the ERFA transform for
        # no parallax/PM. This ensures reversiblity and is more sensible for
        # inside solar system objects
        newxyz = icrs_coo.cartesian.xyz
        newxyz = np.rollaxis(newxyz, 0, newxyz.ndim) - astrom['eb'] * u.au
        # roll xyz back to the first axis
        newxyz = np.rollaxis(newxyz, -1, 0)
        newcart = CartesianRepresentation(newxyz)

        srepr = newcart.represent_as(SphericalRepresentation)
        i_ra = srepr.lon.to(u.radian).value
        i_dec = srepr.lat.to(u.radian).value
        cirs_ra, cirs_dec = atciqz(i_ra, i_dec, astrom)

        newrep = SphericalRepresentation(lat=u.Quantity(cirs_dec, u.radian, copy=False),
                                         lon=u.Quantity(cirs_ra, u.radian, copy=False),
                                         distance=srepr.distance, copy=False)

    return cirs_frame.realize_frame(newrep)
コード例 #31
0
ファイル: plot.py プロジェクト: tammojan/losoto
def run(soltab, axesInPlot, axisInTable='', axisInCol='', axisDiff='', NColFig=0, figSize=[0,0], minmax=[0,0], log='', \
               plotFlag=False, doUnwrap=False, refAnt='', soltabsToAdd='', makeAntPlot=False, makeMovie=False, prefix='', ncpu=0):
    """
    This operation for LoSoTo implements basic plotting
    WEIGHT: flag-only compliant, no need for weight

    Parameters
    ----------
    axesInPlot : array of str
        1- or 2-element array which says the coordinates to plot (2 for 3D plots).

    axisInTable : str, optional
        the axis to plot on a page - e.g. ant to get all antenna's on one file. By default ''.

    axisInCol : str, optional
        The axis to plot in different colours - e.g. pol to get correlations with different colors. By default ''.

    axisDiff : str, optional
        This must be a len=2 axis and the plot will have the differential value - e.g. 'pol' to plot XX-YY. By default ''.

    NColFig : int, optional
        Number of columns in a multi-table image. By default is automatically chosen.

    figSize : array of int, optional
        Size of the image [x,y], if one of the values is 0, then it is automatically chosen. By default automatic set.

    minmax : array of float, optional
        Min max value for the independent variable (0 means automatic). By default 0.

    log : bool, optional
        Use Log='XYZ' to set which axes to put in Log. By default ''.

    plotFlag : bool, optional
        Whether to plot also flags as red points in 2D plots. By default False.
    
    doUnwrap : bool, optional
        Unwrap phases. By default False.
    
    refAnt : str, optional
        Reference antenna for phases. By default None.
    
    soltabsToAdd : str, optional
        Tables to "add" (e.g. 'sol000/tec000'), it works only for tec and clock to be added to phases. By default None.
    
    makeAntPlot : bool, optional
        Make a plot containing antenna coordinates in x,y and in color the value to plot, axesInPlot must be [ant]. By default False.
    
    makeMovie : bool, optional
        Make a movie summing up all the produced plots, by default False.
    
    prefix : str, optional
        Prefix to add before the self-generated filename, by default None.
    
    ncpu : int, optional
        Number of cpus, by default all available.
    """
    import os, random
    import numpy as np
    from losoto.lib_unwrap import unwrap, unwrap_2d

    logging.info("Plotting soltab: "+soltab.name)

    # input check

    # str2list
    if axisInTable == '': axisInTable = []
    else: axisInTable = [axisInTable]
    if axisInCol == '': axisInCol = []
    else: axisInCol = [axisInCol]
    if axisDiff == '': axisDiff = []
    else: axisDiff = [axisDiff]

    if len(set(axisInTable+axesInPlot+axisInCol+axisDiff)) != len(axisInTable+axesInPlot+axisInCol+axisDiff):
        logging.error('Axis defined multiple times.')
        return 1

    # just because we use lists, check that they are 1-d
    if len(axisInTable) > 1 or len(axisInCol) > 1 or len(axisDiff) > 1:
        logging.error('Too many TableAxis/ColAxis/DiffAxis, they must be at most one each.')
        return 1

    for axis in axesInPlot:
        if axis not in soltab.getAxesNames():
            logging.error('Axis \"'+axis+'\" not found.')
            return 1

    if makeMovie: 
        prefix = prefix+'__tmp__'

    if os.path.dirname(prefix) != '' and not os.path.exists(os.path.dirname(prefix)):
        logging.debug('Creating '+os.path.dirname(prefix)+'.')
        os.makedirs(os.path.dirname(prefix))

    if refAnt == '': refAnt = None
    elif not refAnt in soltab.getAxisValues('ant'):
        logging.error('Reference antenna '+refAnt+' not found. Using: '+soltab.getAxisValues('ant')[1])
        refAnt = soltab.getAxisValues('ant')[1]

    minZ, maxZ = minmax

    solset = soltab.getSolset()
    soltabsToAdd = [ solset.getSoltab(soltabName) for soltabName in soltabsToAdd ]

    if ncpu == 0:
        import multiprocessing
        ncpu = multiprocessing.cpu_count()

    cmesh = False
    if len(axesInPlot) == 2:
        cmesh = True
        # not color possible in 3D
        axisInCol = []
    elif len(axesInPlot) != 1:
        logging.error('Axes must be a len 1 or 2 array.')
        return 1
    # end input check

    # all axes that are not iterated by anything else
    axesInFile = soltab.getAxesNames()
    for axis in axisInTable+axesInPlot+axisInCol+axisDiff:
        axesInFile.remove(axis)

    # set subplots scheme
    if axisInTable != []:
        Nplots = soltab.getAxisLen(axisInTable[0])
    else:
        Nplots = 1

    # prepare antennas coord in makeAntPlot case
    if makeAntPlot:
        if axesInPlot != ['ant']:
            logging.error('If makeAntPlot is selected the "Axes" values must be "ant"')
            return 1
        antCoords = [[],[]]
        for ant in soltab.getAxisValues('ant'): # select only user-selected antenna in proper order
            antCoords[0].append(+1*soltab.getSolset().getAnt()[ant][1])
            antCoords[1].append(-1*soltab.getSolset().getAnt()[ant][0])

    else:
        antCoords = []
        
    datatype = soltab.getType()

    # start processes for multi-thread
    mpm = multiprocManager(ncpu, _plot)

    # cycle on files
    if makeMovie: pngs = [] # store png filenames
    for vals, coord, selection in soltab.getValuesIter(returnAxes=axisDiff+axisInTable+axisInCol+axesInPlot):
       
        # set filename
        filename = ''
        for axis in axesInFile:
            filename += axis+str(coord[axis])+'_'
        filename = filename[:-1] # remove last _
        if prefix+filename == '': filename = 'plot'

        # axis vals (they are always the same, regulat arrays)
        xvals = coord[axesInPlot[0]]
        # if plotting antenna - convert to number
        if axesInPlot[0] == 'ant':
            xvals = np.arange(len(xvals))
        
        # if plotting time - convert in h/min/s
        xlabelunit=''
        if axesInPlot[0] == 'time':
            if xvals[-1] - xvals[0] > 3600:
                xvals = (xvals-xvals[0])/3600.  # hrs
                xlabelunit = ' [hr]'
            elif xvals[-1] - xvals[0] > 60:
                xvals = (xvals-xvals[0])/60.   # mins
                xlabelunit = ' [min]'
            else:
                xvals = (xvals-xvals[0])  # sec
                xlabelunit = ' [s]'
        # if plotting freq convert in MHz
        elif axesInPlot[0] == 'freq': 
            xvals = xvals/1.e6 # MHz
            xlabelunit = ' [MHz]'

        if cmesh:
            # axis vals (they are always the same, regular arrays)
            yvals = coord[axesInPlot[1]]
            # same as above but for y-axis
            if axesInPlot[1] == 'ant':
                yvals = np.arange(len(yvals))

            if len(xvals) <= 1 or len(yvals) <=1:
                logging.error('3D plot must have more then one value per axes.')
                mpm.wait()
                return 1

            ylabelunit=''
            if axesInPlot[1] == 'time':
                if yvals[-1] - yvals[0] > 3600:
                    yvals = (yvals-yvals[0])/3600.  # hrs
                    ylabelunit = ' [hr]'
                elif yvals[-1] - yvals[0] > 60:
                    yvals = (yvals-yvals[0])/60.   # mins
                    ylabelunit = ' [min]'
                else:
                    yvals = (yvals-yvals[0])  # sec
                    ylabelunit = ' [s]'
            elif axesInPlot[1] == 'freq':  # Mhz
                yvals = yvals/1.e6
                ylabelunit = ' [MHz]'
        else: 
            yvals = None
            if datatype == 'clock':
                datatype = 'Clock'
                ylabelunit = ' (s)'
            elif datatype == 'tec':
                datatype = 'dTEC'
                ylabelunit = ' (TECU)'
            elif datatype == 'rotationmeasure':
                datatype = 'dRM'
                ylabelunit = r' (rad m$^{-2}$)'
            elif datatype == 'tec3rd':
                datatype = r'dTEC$_3$'
                ylabelunit = r' (rad m$^{-3}$)'
            else:
                ylabelunit = ''

        # cycle on tables
        soltab1Selection = soltab.selection # save global selection and subselect only axex to iterate
        soltab.selection = selection
        titles = []
        dataCube = []
        weightCube = []
        for Ntab, (vals, coord, selection) in enumerate(soltab.getValuesIter(returnAxes=axisDiff+axisInCol+axesInPlot)):
            dataCube.append([])
            weightCube.append([])

            # set tile
            titles.append('')
            for axis in coord:
                if axis in axesInFile+axesInPlot+axisInCol: continue
                titles[Ntab] += axis+':'+str(coord[axis])+' '
            titles[Ntab] = titles[Ntab][:-1] # remove last ' '

            # cycle on colors
            soltab2Selection = soltab.selection
            soltab.selection = selection
            for Ncol, (vals, weight, coord, selection) in enumerate(soltab.getValuesIter(returnAxes=axisDiff+axesInPlot, weight=True, reference=refAnt)):
                dataCube[Ntab].append([])
                weightCube[Ntab].append([])

                # differential plot
                if axisDiff != []:
                    # find ordered list of axis
                    names = [axis for axis in soltab.getAxesNames() if axis in axisDiff+axesInPlot]
                    if axisDiff[0] not in names:
                        logging.error("Axis to differentiate (%s) not found." % axisDiff[0])
                        mpm.wait()
                        return 1
                    if len(coord[axisDiff[0]]) != 2:
                        logging.error("Axis to differentiate (%s) has too many values, only 2 is allowed." % axisDiff[0])
                        mpm.wait()
                        return 1

                    # find position of interesting axis
                    diff_idx = names.index(axisDiff[0])
                    # roll to first place
                    vals = np.rollaxis(vals,diff_idx,0)
                    vals = vals[0] - vals[1]
                    weight = np.rollaxis(weight,diff_idx,0)
                    weight[0][ weight[1]==0 ] = 0
                    weight = weight[0]
                    del coord[axisDiff[0]]

                # add tables if required (e.g. phase/tec)
                for soltabToAdd in soltabsToAdd:
                    newCoord = {}
                    for axisName in coord.keys():
                        if axisName in soltabToAdd.getAxesNames():
                            if type(coord[axisName]) is np.ndarray:
                                newCoord[axisName] = coord[axisName]
                            else:
                                newCoord[axisName] = [coord[axisName]] # avoid being interpreted as regexp, faster
                    soltabToAdd.setSelection(**newCoord)
                    valsAdd = np.squeeze(soltabToAdd.getValues(retAxesVals=False, weight=False, reference=refAnt))
                    if soltabToAdd.getType() == 'clock':
                        valsAdd = 2. * np.pi * valsAdd * newCoord['freq']
                    elif soltabToAdd.getType() == 'tec':
                        valsAdd = -8.44797245e9 * valsAdd / newCoord['freq']
                    else:
                        logging.warning('Only Clock or TEC can be added to solutions. Ignoring: '+soltabToAdd.getType()+'.')
                        continue

                    # If clock/tec are single pol then duplicate it (TODO)
                    # There still a problem with commonscalarphase and pol-dependant clock/tec
                    #but there's not easy way to combine them
                    if not 'pol' in soltabToAdd.getAxesNames() and 'pol' in soltab.getAxesNames():
                        # find pol axis positions
                        polAxisPos = soltab.getAxesNames().key_idx('pol')
                        # create a new axes for the table to add and duplicate the values
                        valsAdd = np.addaxes(valsAdd, polAxisPos)

                    if valsAdd.shape != vals.shape:
                        logging.error('Cannot combine the table '+soltabToAdd.getType()+' with '+soltab.getType()+'. Wrong shape.')
                        mpm.wait()
                        return 1

                    vals += valsAdd

                # normalize
                if (soltab.getType() == 'phase' or soltab.getType() == 'scalarphase'):
                    vals = normalize_phase(vals)
                if (soltab.getType() == 'rotation'):
                    vals = np.mod(vals + np.pi/2., np.pi) - np.pi/2.

                # is user requested axis in an order that is different from h5parm, we need to transpose
                if len(axesInPlot) == 2:
                    if soltab.getAxesNames().index(axesInPlot[0]) < soltab.getAxesNames().index(axesInPlot[1]):
                        vals = vals.T
                        weight = weight.T

                # unwrap if required
                if (soltab.getType() == 'phase' or soltab.getType() == 'scalarphase') and doUnwrap:
                    if len(axesInPlot) == 1:
                        vals = unwrap(vals)
                    else:
                        flags = np.array((weight == 0), dtype=bool)
                        if not (flags == True).all():
                            vals = unwrap_2d(vals, flags, coord[axesInPlot[0]], coord[axesInPlot[1]])
                
                dataCube[Ntab][Ncol] = np.ma.masked_array(vals, mask=(weight == 0.))
            
            soltab.selection = soltab2Selection
            ### end cycle on colors

        # if dataCube too large (> 500 MB) do not go parallel
        if np.array(dataCube).nbytes > 1024*1024*500: 
            logging.debug('Big plot, parallel not possible.')
            _plot(Nplots, NColFig, figSize, cmesh, axesInPlot, axisInTable, xvals, yvals, xlabelunit, ylabelunit, datatype, prefix+filename, titles, log, dataCube, minZ, maxZ, plotFlag, makeMovie, antCoords, None)
        else:
            mpm.put([Nplots, NColFig, figSize, cmesh, axesInPlot, axisInTable, xvals, yvals, xlabelunit, ylabelunit, datatype, prefix+filename, titles, log, dataCube, minZ, maxZ, plotFlag, makeMovie, antCoords])
        if makeMovie: pngs.append(prefix+filename+'.png')

        soltab.selection = soltab1Selection
        ### end cycle on tables
    mpm.wait()

    if makeMovie:
        def long_substr(strings):
            """
            Find longest common substring
            """
            substr = ''
            if len(strings) > 1 and len(strings[0]) > 0:
                for i in range(len(strings[0])):
                    for j in range(len(strings[0])-i+1):
                        if j > len(substr) and all(strings[0][i:i+j] in x for x in strings):
                            substr = strings[0][i:i+j]
            return substr
        movieName = long_substr(pngs)
        assert movieName != '' # need a common prefix, use prefix keyword in case
        logging.info('Making movie: '+movieName)
        # make every movie last 20 sec, min one second per slide
        fps = np.ceil(len(pngs)/200.)
        ss="mencoder -ovc lavc -lavcopts vcodec=mpeg4:vpass=1:vbitrate=6160000:mbd=2:keyint=132:v4mv:vqmin=3:lumi_mask=0.07:dark_mask=0.2:"+\
                "mpeg_quant:scplx_mask=0.1:tcplx_mask=0.1:naq -mf type=png:fps="+str(fps)+" -nosound -o "+movieName.replace('__tmp__','')+".mpg mf://"+movieName+"*  > mencoder.log 2>&1"
        os.system(ss)
        print(ss)
        #for png in pngs: os.system('rm '+png)

    return 0
コード例 #32
0
def to_plot(img):
    if K.image_dim_ordering() == 'tf':
        return np.rollaxis(img, 0, 1).astype(np.uint8)
    else:
        return np.rollaxis(img, 0, 3).astype(np.uint8)
コード例 #33
0
def gray(img):
    if K.image_dim_ordering() == 'tf':
        return np.rollaxis(img, 0, 1).dot(to_bw)
    else:
        return np.rollaxis(img, 0, 3).dot(to_bw)
コード例 #34
0
ファイル: utils.py プロジェクト: ali-design/gan_workspace
def to_json(output_path, *layers):
  with open(output_path, "w") as layer_f:
    lines = ""
    for w, b, bn in layers:
      layer_idx = w.name.split('/')[0].split('h')[1]

      B = b.eval()

      if "lin/" in w.name:
        W = w.eval()
        depth = W.shape[1]
      else:
        W = np.rollaxis(w.eval(), 2, 0)
        depth = W.shape[0]

      biases = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(B)]}
      if bn != None:
        gamma = bn.gamma.eval()
        beta = bn.beta.eval()

        gamma = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(gamma)]}
        beta = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(beta)]}
      else:
        gamma = {"sy": 1, "sx": 1, "depth": 0, "w": []}
        beta = {"sy": 1, "sx": 1, "depth": 0, "w": []}

      if "lin/" in w.name:
        fs = []
        for w in W.T:
          fs.append({"sy": 1, "sx": 1, "depth": W.shape[0], "w": ['%.2f' % elem for elem in list(w)]})

        lines += """
          var layer_%s = {
            "layer_type": "fc", 
            "sy": 1, "sx": 1, 
            "out_sx": 1, "out_sy": 1,
            "stride": 1, "pad": 0,
            "out_depth": %s, "in_depth": %s,
            "biases": %s,
            "gamma": %s,
            "beta": %s,
            "filters": %s
          };""" % (layer_idx.split('_')[0], W.shape[1], W.shape[0], biases, gamma, beta, fs)
      else:
        fs = []
        for w_ in W:
          fs.append({"sy": 5, "sx": 5, "depth": W.shape[3], "w": ['%.2f' % elem for elem in list(w_.flatten())]})

        lines += """
          var layer_%s = {
            "layer_type": "deconv", 
            "sy": 5, "sx": 5,
            "out_sx": %s, "out_sy": %s,
            "stride": 2, "pad": 1,
            "out_depth": %s, "in_depth": %s,
            "biases": %s,
            "gamma": %s,
            "beta": %s,
            "filters": %s
          };""" % (layer_idx, 2**(int(layer_idx)+2), 2**(int(layer_idx)+2),
               W.shape[0], W.shape[3], biases, gamma, beta, fs)
    layer_f.write(" ".join(lines.replace("'","").split()))
コード例 #35
0
ファイル: test_adapt_rgb.py プロジェクト: fisher1981/hpr
def test_each_channel_with_filter_argument():
    filtered = smooth_each(COLOR_IMAGE, SIGMA)
    for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):
        assert_allclose(channel, smooth(COLOR_IMAGE[:, :, i]))
コード例 #36
0
ファイル: test_adapt_rgb.py プロジェクト: fisher1981/hpr
def test_each_channel():
    filtered = edges_each(COLOR_IMAGE)
    for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):
        expected = img_as_float(filters.sobel(COLOR_IMAGE[:, :, i]))
        assert_allclose(channel, expected)
コード例 #37
0
def create_test_train_data(data_dir_list, data_path, labelmap):
    train_data = []
    test_data = []
    train_label = []
    test_label = []
    SEED = 2

    for dataset in data_dir_list:
        if dataset.startswith('.DS'):
            continue

        img_data_list = []
        img_list = os.listdir(data_path + '/' + dataset)
        labels = np.zeros(len(img_list))


        print('Loaded the images of dataset-' + '{}\n'.format(dataset))
        idx = 0
        for img in img_list:
            if img.startswith('.DS'):
                continue
            img_path = data_path + '/' + dataset + '/' + img
            img = image.load_img(img_path, target_size=(224, 224))
            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)

            img_data_list.append(x)
            labels[idx] = labelmap[dataset]
            idx += 1

        #--- Split to get the test and train for this folder
        img_data = np.array(img_data_list)
        print(img_data.shape)
        img_data = np.rollaxis(img_data, 1, 0)
        print(img_data.shape)
        img_data = img_data[0]
        print(img_data.shape)

        # convert class labels to on-hot encoding
        num_classes = len(list(set(labelmap.values())))
        Y = np_utils.to_categorical(labels, num_classes)

        # Shuffle the dataset
        x, y = shuffle(img_data, Y, random_state=2)
        # Split the dataset
        X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=SEED)

        train_data.append(X_train)
        test_data.append(X_test)
        train_label.append(y_train)
        test_label.append(y_test)

        # pickle the mean value by category
        Xtrain_mean = np.mean(X_train, axis=0)
        np.save('Avg/{}_mean'.format(dataset), Xtrain_mean)

    #--- train data
    trainX_final = np.vstack(train_data)
    print(trainX_final.shape)
    trainY_final = np.vstack(train_label)
    print(trainY_final.shape)

    #--- test data
    testX_final = np.vstack(test_data)
    print(testX_final.shape)
    testY_final = np.vstack(test_label)
    print(testY_final.shape)

    # Shuffle Training Set
    trainX_final_s, trainY_final_s = shuffle(trainX_final, trainY_final, random_state=SEED)

    #---pickle the average of all training data
    trainX_final_mean = np.mean(trainX_final_s, axis=0)
    np.save('Avg/trainX_final_mean', trainX_final_mean)

    return trainX_final_s, trainY_final_s, testX_final, testY_final
コード例 #38
0
ファイル: mri.py プロジェクト: Soletmons/PyMVPA
def _get_xyzt_shaped(arr):
    # we get the data as [t,]x,y,z but we want to have the time axis last
    # if any
    if len(arr.shape) == 4:
        arr = np.rollaxis(arr, 0, 4)
    return arr
コード例 #39
0
    dtype = rio.int16
if dtype == 'uint32':
    dtype = rio.uint32
if dtype == 'int32':
    dtype = rio.int32
if dtype == 'float32':
    dtype = rio.float32

if len(np.shape(non_georef_img)) > 2:
    profile.update(dtype=dtype, count=3)

    new_non_georef_img = np.zeros(
        (non_georef_img.shape[0], non_georef_img.shape[1], 3))
    new_non_georef_img.fill(255)
    new_non_georef_img[:, :, :] = non_georef_img[:, :, :]

else:
    profile.update(dtype=dtype, count=1)

    #non_georef_img[non_georef_img < 1] = 0
    #non_georef_img[non_georef_img > 1] = 1

    new_non_georef_img = np.zeros(
        (non_georef_img.shape[0], non_georef_img.shape[1], 1))
    new_non_georef_img.fill(255)
    new_non_georef_img[:, :, 0] = non_georef_img[:, :]

with rio.Env():
    with rio.open('ex.tif', 'w', **profile) as dst:
        new_non_georef_img = np.rollaxis(new_non_georef_img, 2)
        dst.write(new_non_georef_img.astype(dtype))
コード例 #40
0
def c3(s):  # Picture 3D is turned into array and rotate.
    if s.ndim == 2:
        s3 = np.dstack([s, s, s])
    else:
        s3 = s
    return np.rollaxis(s3, 2, 0)[None, :, :, :]
コード例 #41
0
videolist = os.listdir('/home/dhaval/piyush/ViIDWIN/Datasets_VIDWIN/Videosonbasisofmotion')

motion_data = []

for video in videolist:
    print('video:', video)
    motion_data.append(np.array(extract_motion('/home/dhaval/piyush/ViIDWIN/Datasets_VIDWIN/Videosonbasisofmotion/'+video)))

#plotgraph(motion_data)

data = [data.reshape((data.size,1)) for data in motion_data]

newarray = np.dstack(data)
print(newarray.shape)
# To get the shape to be Nx10x10, you could  use rollaxis:
newarray = np.rollaxis(newarray,-1)
print(newarray.shape)
seed = 0
# Keep only 50 time series
X_train = TimeSeriesScalerMeanVariance().fit_transform(newarray[:280])
# Make time series shorter
#X_train = TimeSeriesResampler(sz=40).fit_transform(X_train)
sz = X_train.shape[1]


# Euclidean k-means
print("Euclidean k-means")
km = TimeSeriesKMeans(n_clusters=4, verbose=True, random_state=seed)
y_pred = km.fit_predict(X_train)

plt.figure()
コード例 #42
0
ファイル: mri.py プロジェクト: Soletmons/PyMVPA
def _get_txyz_shaped(arr):
    # we get the data as x,y,z[,t] but we want to have the time axis first
    # if any
    if len(arr.shape) == 4:
        arr = np.rollaxis(arr, -1)
    return arr
コード例 #43
0
    def build_dense(self, layer):
        """

        Parameters
        ----------
        layer : keras.layers.Dense

        Returns
        -------

        """

        if layer.activation.__name__ == 'softmax':
            warnings.warn(
                "Activation 'softmax' not implemented. Using 'relu' "
                "activation instead.", RuntimeWarning)
        all_weights = layer.get_weights()
        if len(all_weights) == 2:
            weights, biases = all_weights
        elif len(all_weights) == 3:
            weights, biases, masks = all_weights
            weights = weights * masks
            print("Building a Sparse layer having", np.count_nonzero(masks),
                  "non-zero entries in its mask")
        else:
            raise ValueError("Layer {} was expected to contain "
                             "weights, biases and, in rare cases,"
                             "masks.".format(layer.name))
        weights = self.scale_weights(weights)
        print(weights.shape)
        n = int(np.prod(layer.output_shape[1:]) / len(biases))
        biases = np.repeat(biases, n).astype('float64')

        self.set_biases(np.array(biases, 'float64'))
        delay = self.config.getfloat('cell', 'delay')
        if len(self.flatten_shapes) == 1:
            flatten_name, shape = self.flatten_shapes.pop()
            y_in = 1
            if self.data_format == 'channels_last':
                print("Not swapping data_format of Flatten layer.")
                if len(shape) == 2:
                    x_in, f_in = shape
                    #weights = weights.flatten()
                else:
                    y_in, x_in, f_in = shape
                '''output_neurons = weights.shape[1]
                weights = weights.reshape((x_in, y_in, f_in, output_neurons), order ='C')
                weights = np.rollaxis(weights, 1, 0)
                weights = weights.reshape((y_in*x_in*f_in, output_neurons), order ='C')
                '''
            else:
                print("Swapping data_format of Flatten layer.")
                if len(shape) == 3:
                    f_in, y_in, x_in = shape
                    output_neurons = weights.shape[1]
                    weights = weights.reshape(
                        (y_in, x_in, f_in, output_neurons), order='F')
                    weights = np.rollaxis(weights, 2, 0)
                    weights = weights.reshape(
                        (y_in * x_in * f_in, output_neurons), order='F')
                elif len(shape) == 2:
                    f_in, x_in = shape
                    weights = np.rollaxis(weights, 1, 0)
                    #weights = np.flatten(weights)
                else:
                    print(
                        "The input weight matrix did not have the expected dimesnions"
                    )
            exc_connections = []
            inh_connections = []
            for i in range(weights.shape[0]):  # Input neurons
                # Sweep across channel axis of feature map. Assumes that each
                # consecutive input neuron lies in a different channel. This is
                # the case for channels_last, but not for channels_first.
                f = i % f_in
                # Sweep across height of feature map. Increase y by one if all
                # rows along the channel axis were seen.
                y = i // (f_in * x_in)
                # Sweep across width of feature map.
                x = (i // f_in) % x_in
                new_i = f * x_in * y_in + x_in * y + x
                for j in range(weights.shape[1]):  # Output neurons
                    c = (new_i, j, weights[i, j], delay)
                    if c[2] > 0.0:
                        exc_connections.append(c)
                    elif c[2] < 0.0:
                        inh_connections.append(c)
        elif len(self.flatten_shapes) > 1:
            raise RuntimeWarning("Not all Flatten layers have been consumed.")
        else:
            exc_connections = [(i, j, weights[i, j], delay)
                               for i, j in zip(*np.nonzero(weights > 0))]
            inh_connections = [(i, j, weights[i, j], delay)
                               for i, j in zip(*np.nonzero(weights < 0))]

        if self.config.getboolean('tools', 'simulate'):
            self.connections.append(
                self.sim.Projection(
                    self.layers[-2],
                    self.layers[-1],
                    self.sim.FromListConnector(exc_connections,
                                               ['weight', 'delay']),
                    receptor_type='excitatory',
                    label=self.layers[-1].label + '_excitatory'))

            self.connections.append(
                self.sim.Projection(
                    self.layers[-2],
                    self.layers[-1],
                    self.sim.FromListConnector(inh_connections,
                                               ['weight', 'delay']),
                    receptor_type='inhibitory',
                    label=self.layers[-1].label + '_inhibitory'))
        else:
            # The spinnaker implementation of Projection.save() is not working
            # yet, so we do save the connections manually here.
            filepath = os.path.join(self.config.get('paths', 'path_wd'),
                                    self.layers[-1].label)
            # noinspection PyTypeChecker
            np.savetxt(filepath + '_excitatory',
                       np.array(exc_connections),
                       ['%d', '%d', '%.18f', '%.3f'],
                       header="columns = ['i', 'j', 'weight', 'delay']")
            # noinspection PyTypeChecker
            np.savetxt(filepath + '_inhibitory',
                       np.array(inh_connections),
                       ['%d', '%d', '%.18f', '%.3f'],
                       header="columns = ['i', 'j', 'weight', 'delay']")
コード例 #44
0
ファイル: util.py プロジェクト: sealhuang/brainCodingToolbox
def nifti4pycortex(nifti_file):
    """Load nifti file for pycortex visualization."""
    data = nib.load(nifti_file).get_data()
    ndata = np.rollaxis(data, 0, 3)
    ndata = np.rollaxis(ndata, 0, 2)
    return ndata
コード例 #45
0
    if i ==0 :
        temp1 = []
        for img_file in image_files :
            hist = histogram_calculation(fpath,img_file)
            temp1.append(hist)
        temp1 = np.array(temp1)
        temp1 = np.transpose(temp1)
    else :
        temp = []
        for img_file in image_files :
            hist = histogram_calculation(fpath,img_file)
            temp.append(hist)
        temp = np.array(temp)
        temp = np.transpose(temp)
        temp1 = np.dstack((temp1,temp))
        final = np.rollaxis(temp1,2)
        
final_tensor = np.rollaxis(final,1)


#NDVI Calculation .

def NDVI_calc(b04,b08):
    sum_mat = b04 + b08
    #sum_mat[sum_mat==0] = 1    
    diff_mat = b08 - b04
    #diff_mat[diff_mat==0] = 1
    sum_mat.astype('float32')
    diff_mat.astype('float32')
    final_mat = np.divide(diff_mat,sum_mat)
    return final_mat
コード例 #46
0
ファイル: subcube.py プロジェクト: vlas-sokolov/multicube
    def best_guess(self, model_grid=None, sn_cut=None, memory_limit=None,
                   from_file=None, pbar_inc=1000, **kwargs):
        """
        For a grid of initial guesses, determine the optimal one based 
        on the preliminary residual of the specified spectral model.

        Parameters
        ----------
        model_grid : numpy.array; A model grid to choose from.

        use_cube : boolean; If true, every xy-slice of a cube will
                   be compared to every model from the model_grid.
                   sn_cut (see below) is still applied.

        sn_cut : float; do not consider model selection for pixels
                 below this signal-to-noise ratio cutoff.

        memory_limit : float; How many gigabytes of RAM could be used for
                       broadcasting. If estimated usage goes over this
                       number, best_guess switches to a slower method.

        from_file : string; if not None then the models grid will be
                    read from a file using np.load, which additional
                    arguments, like mmap_mode, passed along to it

        pbar_inc : int; Number of steps in which the progress bar is
                   updated. The default should be sensible for modern
                   machines. Prevents the progress bar from consiming
                   too much computational power.

        Output
        ------
        best_guesses : a cube of best models corresponding to xy-grid
                       (saved as a SubCube attribute)

        best_guess : a most commonly found best guess

        best_snr_guess : the model for the least residual at peak SNR
                         (saved as a SubCube attribute)

        """
        if model_grid is None:
            if from_file is not None:
                model_grid = np.load(from_file, **kwargs)
            elif self.model_grid is None:
                raise TypeError('sooo the model_grid is empty, '
                                'did you run generate_model()?')
            else:
                model_grid = self.model_grid

        # TODO: allow for all the possible outputs from generate_model()
        if model_grid.shape[-1]!=self.cube.shape[0]:
            raise ValueError("Invalid shape for the guess_grid, "
                             "check the docsting for details.")
        if len(model_grid.shape)>2:
            raise NotImplementedError("Complex model girds aren't supported.")

        log.info("Calculating residuals for generated models . . .")

        try: # TODO: move this out into an astro_toolbox function
            import psutil
            mem = psutil.virtual_memory().available
        except ImportError:
            import os
            try:
                memgb = os.popen("free -g").readlines()[1].split()[3]
            except IndexError: # would happen on Macs/Windows
                memgb = 8
                log.warn("Can't get the free RAM "
                         "size, assuming %i GB" % memgb)
            memgb = memory_limit or memgb
            mem = int(memgb) * 2**30

        # allow for 50% computational overhead
        threshold = self.cube.nbytes*model_grid.shape[0]*2
        if mem < threshold:
            log.warn("The available free memory might not be enough for "
                     "broadcasting model grid to the spectral cube. Will "
                     "iterate over all the XY pairs instead. Coffee time!")

            try:
                if type(model_grid) is not np.ndarray: # assume memmap type
                    raise MemoryError("This will take ages, skipping to "
                                      "the no-broadcasting scenario.")
                residual_rms = np.empty(shape=((model_grid.shape[0],)+
                                                self.cube.shape[1:]   ))
                with ProgressBar(np.prod(self.cube.shape[1:])) as bar:
                    for (y,x) in np.ndindex(self.cube.shape[1:]):
                        residual_rms[:,y,x] = (self.cube[None,:,y,x] -
                                                    model_grid).std(axis=1)
                        bar.update()
            except MemoryError: # catching memory errors could be really bad!
                log.warn("Not enough memory to broadcast model grid to the "
                         "XY grid. This is bad for a number of reasons, the "
                         "formost of which: the running time just went "
                         "through the roof. Leave it overnight maybe?")
                best_map = np.empty(shape=(self.cube.shape[1:]))
                rmsmin_map = np.empty(shape=(self.cube.shape[1:]))
                if sn_cut:
                    snr_mask = self.snr_map > sn_cut
                # TODO: this takes ages! refactor this through hdf5
                # "chunks" of acceptable size, and then broadcast them!
                with ProgressBar(np.prod((model_grid.shape[0],)+
                                          self.cube.shape[1:]  )) as bar:
                    for (y,x) in np.ndindex(self.cube.shape[1:]):
                        if sn_cut:
                            if not snr_mask[y,x]:
                                best_map[y,x],rmsmin_map[y,x] = np.nan,np.nan
                                bar.update(bar._current_value+model_grid.shape[0])
                                continue
                        resid_rms_xy = np.empty(shape=model_grid.shape[0])
                        for model_id in np.ndindex(model_grid.shape[0]):
                            resid_rms_xy[model_id] = (self.cube[:,y,x] -
                                                  model_grid[model_id]).std()
                            if not model_id[0] % pbar_inc:
                                bar.update(bar._current_value+pbar_inc)
                        best_map[y,x] = np.argmin(resid_rms_xy)
                        rmsmin_map[y,x] = np.nanmin(resid_rms_xy)
        else:
            # NOTE: broadcasting below is a much faster way to compute
            #       cube - model residuals. But for big model sizes this
            #       will cause memory overflows.
            #       The code above tried to catch this before it happens
            #       and run things in a slower fashion.
            residual_rms = (self.cube[None,:,:,:]-
                                model_grid[:,:,None,None]).std(axis=1)

        if sn_cut:
            snr_mask = self.snr_map > sn_cut
            residual_rms[self.get_slice_mask(snr_mask)] = np.inf

        best_map   = np.argmin(residual_rms, axis=0)
        rmsmin_map = residual_rms.min(axis=0)
        self._best_map    = best_map
        self._best_rmsmap = rmsmin_map
        self.best_guesses = np.rollaxis(self.guess_grid[best_map],-1)

        from scipy.stats import mode
        model_mode = mode(best_map)
        best_model_num = model_mode[0][0,0]
        best_model_freq = model_mode[1][0,0]
        best_model_frac = (float(best_model_freq) /
                            np.prod(self.cube.shape[1:]))
        if best_model_frac < .05:
            log.warn("Selected model is best only for less than %5 "
                     "of the cube, consider using the map of guesses.")
        self._best_model = best_model_num
        self.best_guess  = self.guess_grid[best_model_num]
        log.info("Overall best model: selected #%i %s" % (best_model_num,
                 self.guess_grid[best_model_num].round(2)))

        try:
            best_snr = np.argmax(self.snr_map)
            best_snr = np.unravel_index(best_snr, self.snr_map.shape)
            self.best_snr_guess = self.guess_grid[best_map[best_snr]]
            log.info("Best model @ highest SNR: #%i %s" %
                     (best_map[best_snr], self.best_snr_guess.round(2)))
        except AttributeError:
            log.warn("Can't find the SNR map, best guess at "
                     "highest SNR pixel will not be stored.")
コード例 #47
0
def preprocess(net, img):
    #print np.float32(img).shape
    return np.float32(np.rollaxis(img, 2)[::-1]) - net.transformer.mean['data']
コード例 #48
0
ファイル: test_screen.py プロジェクト: Raniac/NEURO-LEARN
def test_screen():
    img = ni.load_image(funcfile)
    # rename third axis to slice to match default of screen
    # This avoids warnings about future change in default; see the tests for
    # slice axis below
    img = img.renamed_axes(k='slice')
    res = screen(img)
    assert_equal(res['mean'].ndim, 3)
    assert_equal(res['pca'].ndim, 4)
    assert_equal(sorted(res.keys()),
                 ['max', 'mean', 'min',
                  'pca', 'pca_res',
                  'std', 'ts_res'])
    data = img.get_data()
    # Check summary images
    assert_array_equal(np.max(data, axis=-1), res['max'].get_data())
    assert_array_equal(np.mean(data, axis=-1), res['mean'].get_data())
    assert_array_equal(np.min(data, axis=-1), res['min'].get_data())
    assert_array_equal(np.std(data, axis=-1), res['std'].get_data())
    pca_res = pca(data, axis=-1, standardize=False, ncomp=10)
    # On windows, there seems to be some randomness in the PCA output vector
    # signs; this routine sets the basis vectors to have first value positive,
    # and therefore standardizes the signs
    pca_res = res2pos1(pca_res)
    _check_pca(res, pca_res)
    _check_ts(res, data, 3, 2)
    # Test that screens accepts and uses time axis
    data_mean = data.mean(axis=-1)
    res = screen(img, time_axis='t')
    assert_array_equal(data_mean, res['mean'].get_data())
    _check_pca(res, pca_res)
    _check_ts(res, data, 3, 2)
    res = screen(img, time_axis=-1)
    assert_array_equal(data_mean, res['mean'].get_data())
    _check_pca(res, pca_res)
    _check_ts(res, data, 3, 2)
    t0_img = rollimg(img, 't')
    t0_data = np.rollaxis(data, -1)
    res = screen(t0_img, time_axis='t')
    t0_pca_res = pca(t0_data, axis=0, standardize=False, ncomp=10)
    t0_pca_res = res2pos1(t0_pca_res)
    assert_array_equal(data_mean, res['mean'].get_data())
    _check_pca(res, t0_pca_res)
    _check_ts(res, t0_data, 0, 3)
    res = screen(t0_img, time_axis=0)
    assert_array_equal(data_mean, res['mean'].get_data())
    _check_pca(res, t0_pca_res)
    _check_ts(res, t0_data, 0, 3)
    # Check screens uses slice axis
    s0_img = rollimg(img, 2, 0)
    s0_data = np.rollaxis(data, 2, 0)
    res = screen(s0_img, slice_axis=0)
    _check_ts(res, s0_data, 3, 0)
    # And defaults to named slice axis
    # First re-show that when we don't specify, we get the default
    res = screen(img)
    _check_ts(res, data, 3, 2)
    assert_raises(AssertionError, _check_ts, res, data, 3, 0)
    # Then specify, get non-default
    slicey_img = img.renamed_axes(slice='k', i='slice')
    res = screen(slicey_img)
    _check_ts(res, data, 3, 0)
    assert_raises(AssertionError, _check_ts, res, data, 3, 2)
コード例 #49
0
def main():
    seeding()
    # number of parallel agents
    parallel_envs = 4
    # number of training episodes.
    # change this to higher number to experiment. say 30000.
    number_of_episodes = 1000
    episode_length = 80
    batchsize = 1000
    # how many episodes to save policy and gif
    save_interval = 1000
    t = 0
    
    # amplitude of OU noise
    # this slowly decreases to 0
    noise = 2
    noise_reduction = 0.9999

    # how many episodes before update
    episode_per_update = 2 * parallel_envs

    log_path = os.getcwd()+"/log"
    model_dir= os.getcwd()+"/model_dir"
    
    os.makedirs(model_dir, exist_ok=True)

    torch.set_num_threads(parallel_envs)
    env = envs.make_parallel_env(parallel_envs)
    
    # keep 5000 episodes worth of replay
    buffer = ReplayBuffer(int(5000*episode_length))
    
    # initialize policy and critic
    maddpg = MADDPG()
    logger = SummaryWriter(log_dir=log_path)
    agent0_reward = []
    agent1_reward = []
    agent2_reward = []

    # training loop
    # show progressbar
    import progressbar as pb
    widget = ['episode: ', pb.Counter(),'/',str(number_of_episodes),' ', 
              pb.Percentage(), ' ', pb.ETA(), ' ', pb.Bar(marker=pb.RotatingMarker()), ' ' ]
    
    timer = pb.ProgressBar(widgets=widget, maxval=number_of_episodes).start()

    # use keep_awake to keep workspace from disconnecting
    for episode in range(0, number_of_episodes, parallel_envs):

        timer.update(episode)


        reward_this_episode = np.zeros((parallel_envs, 3))
        all_obs = env.reset() #
        obs, obs_full = transpose_list(all_obs)

        #for calculating rewards for this particular episode - addition of all time steps

        # save info or not
        save_info = ((episode) % save_interval < parallel_envs or episode==number_of_episodes-parallel_envs)
        frames = []
        tmax = 0
        
        if save_info:
            frames.append(env.render('rgb_array'))


        
        for episode_t in range(episode_length):

            t += parallel_envs
            

            # explore = only explore for a certain number of episodes
            # action input needs to be transposed
            actions = maddpg.act(transpose_to_tensor(obs), noise=noise)
            noise *= noise_reduction
            
            actions_array = torch.stack(actions).detach().numpy()

            # transpose the list of list
            # flip the first two indices
            # input to step requires the first index to correspond to number of parallel agents
            actions_for_env = np.rollaxis(actions_array,1)
            
            # step forward one frame
            next_obs, next_obs_full, rewards, dones, info = env.step(actions_for_env)
            print(next_obs.shape)
            print(next_obs_full.shape)
            
            # add data to buffer
            transition = (obs, obs_full, actions_for_env, rewards, next_obs, next_obs_full, dones)
            
            buffer.push(transition)
            
            reward_this_episode += rewards

            obs, obs_full = next_obs, next_obs_full
            
            # save gif frame
            if save_info:
                frames.append(env.render('rgb_array'))
                tmax+=1
        
        # update once after every episode_per_update
        if len(buffer) > batchsize and episode % episode_per_update < parallel_envs:
            for a_i in range(3):
                samples = buffer.sample(batchsize)
                maddpg.update(samples, a_i, logger)
            maddpg.update_targets() #soft update the target network towards the actual networks

        
        
        for i in range(parallel_envs):
            agent0_reward.append(reward_this_episode[i,0])
            agent1_reward.append(reward_this_episode[i,1])
            agent2_reward.append(reward_this_episode[i,2])

        if episode % 100 == 0 or episode == number_of_episodes-1:
            avg_rewards = [np.mean(agent0_reward), np.mean(agent1_reward), np.mean(agent2_reward)]
            agent0_reward = []
            agent1_reward = []
            agent2_reward = []
            for a_i, avg_rew in enumerate(avg_rewards):
                logger.add_scalar('agent%i/mean_episode_rewards' % a_i, avg_rew, episode)

        #saving model
        save_dict_list =[]
        if save_info:
            for i in range(3):

                save_dict = {'actor_params' : maddpg.maddpg_agent[i].actor.state_dict(),
                             'actor_optim_params': maddpg.maddpg_agent[i].actor_optimizer.state_dict(),
                             'critic_params' : maddpg.maddpg_agent[i].critic.state_dict(),
                             'critic_optim_params' : maddpg.maddpg_agent[i].critic_optimizer.state_dict()}
                save_dict_list.append(save_dict)

                torch.save(save_dict_list, 
                           os.path.join(model_dir, 'episode-{}.pt'.format(episode)))
                
            # save gif files
            imageio.mimsave(os.path.join(model_dir, 'episode-{}.gif'.format(episode)), 
                            frames, duration=.04)

    env.close()
    logger.close()
    timer.finish()
コード例 #50
0
ファイル: zmass_estimator.py プロジェクト: peterrwi/LensPop
    def fastMCMC(self, niter, nburn, nthin=1):
        from Sampler import SimpleSample as sample
        from scipy import interpolate
        import pymc, numpy, time
        import ndinterp

        models = self.model.models
        data = self.data
        filters = data.keys()
        t = time.time()
        T1 = models[filters[0]] * 0.
        T2 = 0.
        for f in filters:
            T1 += (models[f] - data[f]['mag']) / data[f]['sigma']**2
            T2 += 2.5 / self.data[f]['sigma']**2
        M = T1 / T2
        logp = 0.
        for f in filters:
            logp += -0.5 * (-2.5 * M + models[f] -
                            data[f]['mag'])**2 / data[f]['sigma']**2
        t = time.time()
        axes = {}
        i = 0
        ax = {}
        ind = numpy.unravel_index(logp.argmax(), logp.shape)
        best = []
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            axes[i] = interpolate.splrep(a, numpy.arange(a.size), k=1, s=0)
            ax[key] = i
            best.append(a[ind[i]])
            i += 1

        print logp.max()
        logpmodel = ndinterp.ndInterp(axes, logp, order=1)
        massmodel = ndinterp.ndInterp(axes, M, order=1)

        pars = [self.priors[key] for key in self.names]

        doExp = []
        cube2par = []
        i = 0
        for key in self.names:
            if key.find('log') == 0:
                pntkey = key.split('log')[1]
                #self.priors[key].value = numpy.log10(best[ax[pntkey]])
                doExp.append(True)
            else:
                pntkey = key
                doExp.append(False)
                #self.priors[key].value = best[ax[pntkey]]
            cube2par.append(ax[pntkey])
        doExp = numpy.array(doExp) == True
        par2cube = numpy.argsort(cube2par)

        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        i = 0
        wmean = numpy.empty(p.ndim)
        axarr = []
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            p0 = numpy.rollaxis(p, i, p.ndim)
            wmean[i] = (a * p0).sum()
            axarr.append(numpy.rollaxis(a + p0 * 0, p.ndim - 1, i))
            i += 1
        cov = numpy.empty((p.ndim, p.ndim))
        #for i in range(p.ndim):
        #    for j in range(i,p.ndim):
        #        cov[i,j] = (p*(axarr[i]-wmean[i])*(axarr[j]-wmean[j])).sum()
        #        cov[j,i] = cov[i,j]
        for i in range(p.ndim):
            k = cube2par[i]
            for j in range(i, p.ndim):
                l = cube2par[j]
                cov[i, j] = (p * (axarr[k] - wmean[k]) *
                             (axarr[l] - wmean[l])).sum()
                cov[j, i] = cov[i, j]
        cov /= 1. - (p**2).sum()
        #for key in self.names:
        #    if key.find('log')==0:
        #        pntkey = key.split('log')[1]
        #        self.priors[key].value = numpy.log10(wmean[ax[pntkey]])
        #    else:
        #        self.priors[key].value = wmean[ax[key]]

        #self.priors['redshift'].value = 0.1
        pnt = numpy.empty((len(self.priors), 1))

        @pymc.deterministic
        def mass_and_logp(value=0., pars=pars):
            p = numpy.array(pars)
            p[doExp] = 10**p[doExp]
            p = numpy.atleast_2d(p[par2cube])
            mass = massmodel.eval(p)
            if mass == 0.:
                return [0., -1e200]
            logp = logpmodel.eval(p)
            return [mass, logp]

        @pymc.observed
        def loglikelihood(value=0., lp=mass_and_logp):
            return lp[1]

        """
        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        i = 0
        wmean = numpy.empty(p.ndim)
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            p0 = numpy.rollaxis(p,i,p.ndim)
            wmean[i] = (a*p0).sum()
            i += 1

        

        cov = []
        for key in self.names:
            if key=='age':
                cov.append(0.5)
            elif key=='logage':
                cov.append(0.03)
            elif key=='tau':
                cov.append(0.1)
            elif key=='logtau':
                cov.append(0.03)
            elif key=='tau_V':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logtau_V':
                cov.append(0.1)
            elif key=='Z':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logZ':
                cov.append(0.03)
            elif key=='redshift':
                cov.append(0.1)
        cov = numpy.array(cov)
        """

        from SampleOpt import Sampler, AMAOpt
        S = AMAOpt(pars, [loglikelihood], [mass_and_logp], cov=cov)
        S.sample(nburn)
        logps, trace, dets = S.result()
        print logps.max()

        S = Sampler(pars, [loglikelihood], [mass_and_logp])
        S.setCov(cov)
        S.sample(nburn / 2)

        logps, trace, dets = S.result()
        cov = numpy.cov(trace.T)

        S = Sampler(pars, [loglikelihood], [mass_and_logp])
        S.setCov(cov)
        S.sample(niter)

        logps, trace, dets = S.result()
        mass, logL = dets['mass_and_logp'][:, :, 0].T
        o = {'logP': logps, 'logL': logL, 'logmass': mass}
        cnt = 0
        for key in self.names:
            o[key] = trace[:, cnt].copy()
            cnt += 1
        return o

        arg = logp.argmax()
        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        print p.max()
        i = 0
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            if key == 'redshift':
                a = a[::5]
            p0 = numpy.rollaxis(p, i, p.ndim)
            print key, (a * p0).sum()
            i += 1

        print numpy.unravel_index(arg, logp.shape)
        logp -= max
        print(M * numpy.exp(logp)).sum() / numpy.exp(logp).sum()
        z = (M * 0. + 1) * self.model.axes['redshift']['points'][::5]
        print(z * numpy.exp(logp)).sum() / numpy.exp(logp).sum()
        f = open('check', 'wb')
        import cPickle
        cPickle.dump([M, logp], f, 2)
        f.close()
        mod = ndinterp.ndInterp(self.models.axes, logp)
コード例 #51
0
        skybg_wvs = skybg_arr[:, 0] / 1000.
        skybg_spec = skybg_arr[:, 1]
        selec_skybg = np.where((skybg_wvs > wvs[0] - (wvs[-1] - wvs[0]) / 2) *
                               (skybg_wvs < wvs[-1] + (wvs[-1] - wvs[0]) / 2))
        skybg_wvs = skybg_wvs[selec_skybg]
        skybg_spec = skybg_spec[selec_skybg]
        # skybg_spec = convolve_spectrum(skybg_wvs,skybg_spec,R)

    ccf_arr_list = []
    wvshift_arr_list = []
    for filename in filelist:
        print(filename)
        # continue
        hdulist = pyfits.open(filename)
        prihdr = hdulist[0].header
        skycube = np.rollaxis(np.rollaxis(hdulist[0].data, 2), 2, 1)
        skycube_badpix = np.rollaxis(np.rollaxis(hdulist[2].data, 2), 2, 1)
        nz, ny, nx = skycube.shape
        print(skycube.shape)

        if 1:
            import ctypes
            dtype = ctypes.c_float
            original_imgs = mp.Array(dtype, np.size(skycube))
            original_imgs_shape = skycube.shape
            original_imgs_np = _arraytonumpy(original_imgs,
                                             original_imgs_shape,
                                             dtype=dtype)
            original_imgs_np[:] = skycube
            badpix_imgs = mp.Array(dtype, np.size(skycube_badpix))
            badpix_imgs_shape = skycube_badpix.shape
コード例 #52
0
def test(iter_num,
         gpu_id,
         vol_size=(160, 192, 224),
         nf_enc=[16, 32, 32, 32],
         nf_dec=[32, 32, 32, 32, 32, 16, 16, 3]):
    gpu = '/gpu:' + str(gpu_id)

    # Anatomical labels we want to evaluate
    labels = sio.loadmat('../data/labels.mat')['labels'][0]

    # read atlas
    atlas_vol1, atlas_seg1 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990114_vc722.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990114_vc722.npz'
    )  # [1,160,192,224,1]
    atlas_seg1 = atlas_seg1[0, :, :, :,
                            0]  # reduce the dimension to [160,192,224]

    atlas_vol2, atlas_seg2 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990210_vc792.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990210_vc792.npz'
    )
    atlas_seg2 = atlas_seg2[0, :, :, :, 0]

    #gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        net = networks.unet(vol_size, nf_enc, nf_dec)
        net.load_weights('/home/ys895/MAS2_Models/' + str(iter_num) + '.h5')
        #net.load_weights('../models/' + model_name + '/' + str(iter_num) + '.h5')

    xx = np.arange(vol_size[1])
    yy = np.arange(vol_size[0])
    zz = np.arange(vol_size[2])
    grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0,
                       4)  # (160, 192, 224, 3)
    #X_vol, X_seg = datagenerators.load_example_by_name('../data/test_vol.npz', '../data/test_seg.npz')
    X_vol1, X_seg1 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/981216_vc681.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/981216_vc681.npz'
    )

    X_vol2, X_seg2 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990205_vc783.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990205_vc783.npz'
    )

    X_vol3, X_seg3 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990525_vc1024.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990525_vc1024.npz'
    )

    X_vol4, X_seg4 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/991025_vc1379.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/991025_vc1379.npz'
    )

    X_vol5, X_seg5 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/991122_vc1463.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/991122_vc1463.npz'
    )

    # change the direction of the atlas data and volume data
    # pred[0].shape (1, 160, 192, 224, 1)
    # pred[1].shape (1, 160, 192, 224, 3)
    # X1
    with tf.device(gpu):
        pred1 = net.predict([atlas_vol1, X_vol1])
        pred2 = net.predict([atlas_vol2, X_vol1])
        #pred3 = net.predict([atlas_vol3, X_vol1])
        #pred4 = net.predict([atlas_vol4, X_vol1])
        #pred5 = net.predict([atlas_vol5, X_vol1])
    # Warp segments with flow
    flow1 = pred1[1][0, :, :, :, :]  # (1, 160, 192, 224, 3)
    flow2 = pred2[1][0, :, :, :, :]
    #flow3 = pred3[1][0, :, :, :, :]
    #flow4 = pred4[1][0, :, :, :, :]
    #flow5 = pred5[1][0, :, :, :, :]

    sample1 = flow1 + grid
    sample1 = np.stack(
        (sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)
    sample2 = flow2 + grid
    sample2 = np.stack(
        (sample2[:, :, :, 1], sample2[:, :, :, 0], sample2[:, :, :, 2]), 3)
    #sample3 = flow3+grid
    #sample3 = np.stack((sample3[:, :, :, 1], sample3[:, :, :, 0], sample3[:, :, :, 2]), 3)
    #sample4 = flow4+grid
    #sample4 = np.stack((sample4[:, :, :, 1], sample4[:, :, :, 0], sample4[:, :, :, 2]), 3)
    #sample5 = flow5+grid
    #sample5 = np.stack((sample5[:, :, :, 1], sample5[:, :, :, 0], sample5[:, :, :, 2]), 3)

    warp_seg1 = interpn((yy, xx, zz),
                        atlas_seg1[:, :, :],
                        sample1,
                        method='nearest',
                        bounds_error=False,
                        fill_value=0)  # (160, 192, 224)
    warp_seg2 = interpn((yy, xx, zz),
                        atlas_seg2[:, :, :],
                        sample2,
                        method='nearest',
                        bounds_error=False,
                        fill_value=0)
    #warp_seg3 = interpn((yy, xx, zz), atlas_seg3[:, :, :], sample3, method='nearest', bounds_error=False, fill_value=0)
    #warp_seg4 = interpn((yy, xx, zz), atlas_seg4[:, :, :], sample4, method='nearest', bounds_error=False, fill_value=0)
    #warp_seg5 = interpn((yy, xx, zz), atlas_seg5[:, :, :], sample5, method='nearest', bounds_error=False, fill_value=0)

    # label fusion: get the final warp_seg
    warp_seg = np.empty((160, 192, 224))
    for x in range(0, 160):
        for y in range(0, 192):
            for z in range(0, 224):
                warp_arr = np.array([warp_seg1[x, y, z], warp_seg2[x, y, z]])
                #print(warp_arr)
                warp_seg[x, y, z] = stats.mode(warp_arr)[0]

    vals, _ = dice(warp_seg, X_seg1[0, :, :, :, 0], labels=labels, nargout=2)
    mean1 = np.mean(vals)

    # X2
    with tf.device(gpu):
        pred1 = net.predict([atlas_vol1, X_vol2])
        pred2 = net.predict([atlas_vol2, X_vol2])
        #pred3 = net.predict([atlas_vol3, X_vol2])
        #pred4 = net.predict([atlas_vol4, X_vol2])
        #pred5 = net.predict([atlas_vol5, X_vol2])
    # Warp segments with flow
    flow1 = pred1[1][0, :, :, :, :]  # (1, 160, 192, 224, 3)
    flow2 = pred2[1][0, :, :, :, :]
    #flow3 = pred3[1][0, :, :, :, :]
    #flow4 = pred4[1][0, :, :, :, :]
    #flow5 = pred5[1][0, :, :, :, :]

    sample1 = flow1 + grid
    sample1 = np.stack(
        (sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)
    sample2 = flow2 + grid
    sample2 = np.stack(
        (sample2[:, :, :, 1], sample2[:, :, :, 0], sample2[:, :, :, 2]), 3)
    #sample3 = flow3+grid
    #sample3 = np.stack((sample3[:, :, :, 1], sample3[:, :, :, 0], sample3[:, :, :, 2]), 3)
    #sample4 = flow4+grid
    #sample4 = np.stack((sample4[:, :, :, 1], sample4[:, :, :, 0], sample4[:, :, :, 2]), 3)
    #sample5 = flow5+grid
    #sample5 = np.stack((sample5[:, :, :, 1], sample5[:, :, :, 0], sample5[:, :, :, 2]), 3)

    warp_seg1 = interpn((yy, xx, zz),
                        atlas_seg1[:, :, :],
                        sample1,
                        method='nearest',
                        bounds_error=False,
                        fill_value=0)  # (160, 192, 224)
    warp_seg2 = interpn((yy, xx, zz),
                        atlas_seg2[:, :, :],
                        sample2,
                        method='nearest',
                        bounds_error=False,
                        fill_value=0)
    #warp_seg3 = interpn((yy, xx, zz), atlas_seg3[:, :, :], sample3, method='nearest', bounds_error=False, fill_value=0)
    #warp_seg4 = interpn((yy, xx, zz), atlas_seg4[:, :, :], sample4, method='nearest', bounds_error=False, fill_value=0)
    #warp_seg5 = interpn((yy, xx, zz), atlas_seg5[:, :, :], sample5, method='nearest', bounds_error=False, fill_value=0)

    # label fusion: get the final warp_seg
    warp_seg = np.empty((160, 192, 224))
    for x in range(0, 160):
        for y in range(0, 192):
            for z in range(0, 224):
                warp_arr = np.array([warp_seg1[x, y, z], warp_seg2[x, y, z]])
                #print(warp_arr)
                warp_seg[x, y, z] = stats.mode(warp_arr)[0]

    vals, _ = dice(warp_seg, X_seg2[0, :, :, :, 0], labels=labels, nargout=2)
    mean2 = np.mean(vals)
    #print(np.mean(vals), np.std(vals))

    # X3
    with tf.device(gpu):
        pred1 = net.predict([atlas_vol1, X_vol3])
        pred2 = net.predict([atlas_vol2, X_vol3])
        #pred3 = net.predict([atlas_vol3, X_vol1])
        #pred4 = net.predict([atlas_vol4, X_vol1])
        #pred5 = net.predict([atlas_vol5, X_vol1])
    # Warp segments with flow
    flow1 = pred1[1][0, :, :, :, :]  # (1, 160, 192, 224, 3)
    flow2 = pred2[1][0, :, :, :, :]
    #flow3 = pred3[1][0, :, :, :, :]
    #flow4 = pred4[1][0, :, :, :, :]
    #flow5 = pred5[1][0, :, :, :, :]

    sample1 = flow1 + grid
    sample1 = np.stack(
        (sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)
    sample2 = flow2 + grid
    sample2 = np.stack(
        (sample2[:, :, :, 1], sample2[:, :, :, 0], sample2[:, :, :, 2]), 3)
    #sample3 = flow3+grid
    #sample3 = np.stack((sample3[:, :, :, 1], sample3[:, :, :, 0], sample3[:, :, :, 2]), 3)
    #sample4 = flow4+grid
    #sample4 = np.stack((sample4[:, :, :, 1], sample4[:, :, :, 0], sample4[:, :, :, 2]), 3)
    #sample5 = flow5+grid
    #sample5 = np.stack((sample5[:, :, :, 1], sample5[:, :, :, 0], sample5[:, :, :, 2]), 3)

    warp_seg1 = interpn((yy, xx, zz),
                        atlas_seg1[:, :, :],
                        sample1,
                        method='nearest',
                        bounds_error=False,
                        fill_value=0)  # (160, 192, 224)
    warp_seg2 = interpn((yy, xx, zz),
                        atlas_seg2[:, :, :],
                        sample2,
                        method='nearest',
                        bounds_error=False,
                        fill_value=0)
    #warp_seg3 = interpn((yy, xx, zz), atlas_seg3[:, :, :], sample3, method='nearest', bounds_error=False, fill_value=0)
    #warp_seg4 = interpn((yy, xx, zz), atlas_seg4[:, :, :], sample4, method='nearest', bounds_error=False, fill_value=0)
    #warp_seg5 = interpn((yy, xx, zz), atlas_seg5[:, :, :], sample5, method='nearest', bounds_error=False, fill_value=0)

    # label fusion: get the final warp_seg
    warp_seg = np.empty((160, 192, 224))
    for x in range(0, 160):
        for y in range(0, 192):
            for z in range(0, 224):
                warp_arr = np.array([warp_seg1[x, y, z], warp_seg2[x, y, z]])
                #print(warp_arr)
                warp_seg[x, y, z] = stats.mode(warp_arr)[0]

    vals, _ = dice(warp_seg, X_seg3[0, :, :, :, 0], labels=labels, nargout=2)
    mean3 = np.mean(vals)

    # X4
    with tf.device(gpu):
        pred1 = net.predict([atlas_vol1, X_vol4])
        pred2 = net.predict([atlas_vol2, X_vol4])
        #pred3 = net.predict([atlas_vol3, X_vol1])
        #pred4 = net.predict([atlas_vol4, X_vol1])
        #pred5 = net.predict([atlas_vol5, X_vol1])
    # Warp segments with flow
    flow1 = pred1[1][0, :, :, :, :]  # (1, 160, 192, 224, 3)
    flow2 = pred2[1][0, :, :, :, :]
    #flow3 = pred3[1][0, :, :, :, :]
    #flow4 = pred4[1][0, :, :, :, :]
    #flow5 = pred5[1][0, :, :, :, :]

    sample1 = flow1 + grid
    sample1 = np.stack(
        (sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)
    sample2 = flow2 + grid
    sample2 = np.stack(
        (sample2[:, :, :, 1], sample2[:, :, :, 0], sample2[:, :, :, 2]), 3)
    #sample3 = flow3+grid
    #sample3 = np.stack((sample3[:, :, :, 1], sample3[:, :, :, 0], sample3[:, :, :, 2]), 3)
    #sample4 = flow4+grid
    #sample4 = np.stack((sample4[:, :, :, 1], sample4[:, :, :, 0], sample4[:, :, :, 2]), 3)
    #sample5 = flow5+grid
    #sample5 = np.stack((sample5[:, :, :, 1], sample5[:, :, :, 0], sample5[:, :, :, 2]), 3)

    warp_seg1 = interpn((yy, xx, zz),
                        atlas_seg1[:, :, :],
                        sample1,
                        method='nearest',
                        bounds_error=False,
                        fill_value=0)  # (160, 192, 224)
    warp_seg2 = interpn((yy, xx, zz),
                        atlas_seg2[:, :, :],
                        sample2,
                        method='nearest',
                        bounds_error=False,
                        fill_value=0)
    #warp_seg3 = interpn((yy, xx, zz), atlas_seg3[:, :, :], sample3, method='nearest', bounds_error=False, fill_value=0)
    #warp_seg4 = interpn((yy, xx, zz), atlas_seg4[:, :, :], sample4, method='nearest', bounds_error=False, fill_value=0)
    #warp_seg5 = interpn((yy, xx, zz), atlas_seg5[:, :, :], sample5, method='nearest', bounds_error=False, fill_value=0)

    # label fusion: get the final warp_seg
    warp_seg = np.empty((160, 192, 224))
    for x in range(0, 160):
        for y in range(0, 192):
            for z in range(0, 224):
                warp_arr = np.array([warp_seg1[x, y, z], warp_seg2[x, y, z]])
                #print(warp_arr)
                warp_seg[x, y, z] = stats.mode(warp_arr)[0]

    vals, _ = dice(warp_seg, X_seg4[0, :, :, :, 0], labels=labels, nargout=2)
    mean4 = np.mean(vals)

    # X5
    with tf.device(gpu):
        pred1 = net.predict([atlas_vol1, X_vol5])
        pred2 = net.predict([atlas_vol2, X_vol5])
        #pred3 = net.predict([atlas_vol3, X_vol1])
        #pred4 = net.predict([atlas_vol4, X_vol1])
        #pred5 = net.predict([atlas_vol5, X_vol1])
    # Warp segments with flow
    flow1 = pred1[1][0, :, :, :, :]  # (1, 160, 192, 224, 3)
    flow2 = pred2[1][0, :, :, :, :]
    #flow3 = pred3[1][0, :, :, :, :]
    #flow4 = pred4[1][0, :, :, :, :]
    #flow5 = pred5[1][0, :, :, :, :]

    sample1 = flow1 + grid
    sample1 = np.stack(
        (sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)
    sample2 = flow2 + grid
    sample2 = np.stack(
        (sample2[:, :, :, 1], sample2[:, :, :, 0], sample2[:, :, :, 2]), 3)
    #sample3 = flow3+grid
    #sample3 = np.stack((sample3[:, :, :, 1], sample3[:, :, :, 0], sample3[:, :, :, 2]), 3)
    #sample4 = flow4+grid
    #sample4 = np.stack((sample4[:, :, :, 1], sample4[:, :, :, 0], sample4[:, :, :, 2]), 3)
    #sample5 = flow5+grid
    #sample5 = np.stack((sample5[:, :, :, 1], sample5[:, :, :, 0], sample5[:, :, :, 2]), 3)

    warp_seg1 = interpn((yy, xx, zz),
                        atlas_seg1[:, :, :],
                        sample1,
                        method='nearest',
                        bounds_error=False,
                        fill_value=0)  # (160, 192, 224)
    warp_seg2 = interpn((yy, xx, zz),
                        atlas_seg2[:, :, :],
                        sample2,
                        method='nearest',
                        bounds_error=False,
                        fill_value=0)
    #warp_seg3 = interpn((yy, xx, zz), atlas_seg3[:, :, :], sample3, method='nearest', bounds_error=False, fill_value=0)
    #warp_seg4 = interpn((yy, xx, zz), atlas_seg4[:, :, :], sample4, method='nearest', bounds_error=False, fill_value=0)
    #warp_seg5 = interpn((yy, xx, zz), atlas_seg5[:, :, :], sample5, method='nearest', bounds_error=False, fill_value=0)

    # label fusion: get the final warp_seg
    warp_seg = np.empty((160, 192, 224))
    for x in range(0, 160):
        for y in range(0, 192):
            for z in range(0, 224):
                warp_arr = np.array([warp_seg1[x, y, z], warp_seg2[x, y, z]])
                #print(warp_arr)
                warp_seg[x, y, z] = stats.mode(warp_arr)[0]

    vals, _ = dice(warp_seg, X_seg5[0, :, :, :, 0], labels=labels, nargout=2)
    mean5 = np.mean(vals)

    # compute mean of dice score
    sum = mean1 + mean2 + mean3 + mean4 + mean5
    mean_dice = sum / 5
    print(mean_dice)
コード例 #53
0
def predict_id(id,
               model,
               trs,
               dims,
               size=1600,
               mins=None,
               maxs=None,
               use_sample_weights=False,
               raw=False,
               means=None):
    """
    Predicts a single test image by predicting for all 160x160 crops in the larger image.
    """
    x = M(id, dims=dims, size=size)
    h, w = x.shape[0], x.shape[1]

    def min_max_normalize(bands, mins, maxs):
        out = np.zeros_like(bands).astype(np.float32)
        n = bands.shape[2]
        for i in range(n):
            a = 0  # np.min(band)
            b = 1  # np.max(band)
            c = mins[i]
            d = maxs[i]
            t = a + (bands[:, :, i] - c) * (b - a) / (d - c)
            t[t < a] = a
            t[t > b] = b
            out[:, :, i] = t
        return out.astype(np.float32)

    # Normalization: Scale with Min/Max
    x = min_max_normalize(x, mins, maxs)

    pixels = size
    rows = int(size / 160)
    cnv = np.zeros((pixels, pixels, dims)).astype(np.float32)
    prd = np.zeros((10, pixels, pixels)).astype(np.float32)
    cnv[:h, :w, :] = x

    line = []
    for i in range(0, rows):
        # we slide through 160x160 crops and append them for prediction after
        for j in range(0, rows):
            line.append(cnv[i * CROP_SIZE:(i + 1) * CROP_SIZE,
                            j * CROP_SIZE:(j + 1) * CROP_SIZE])

    x = np.transpose(line, (0, 3, 1, 2))
    if means is not None:
        for k in range(dims):
            x[:, k] -= means[k]
    tmp = model.predict(x, batch_size=16)
    if use_sample_weights:
        # Output is (None, 160*160, 10), reshape to (None, 10, 160, 160)
        tmp = np.rollaxis(tmp, 2, 1)
        tmp = tmp.reshape(tmp.shape[0], 10, 160, 160)
    k = 0
    for i in range(rows):
        for j in range(rows):
            prd[:, i * CROP_SIZE:(i + 1) * CROP_SIZE,
                j * CROP_SIZE:(j + 1) * CROP_SIZE] = tmp[k]
            k += 1
    if raw:
        pass
    else:
        for i in range(10):
            prd[i] = prd[i] >= trs[i]
    return prd
コード例 #54
0
    def getData(inp_path, img_size, dataAug, testing, num_channel=1):
        if (inp_path == None):
            PATH = os.getcwd()
            data_path = PATH + '/data'
            data_dir_list = os.listdir(data_path)
        else:
            data_dir_list = os.listdir(inp_path)

        num_samples = len(data_dir_list)
        print(num_samples)
        img_data_list = []
        for img in data_dir_list:
            input_img = cv2.imread(data_path + '/' + img)
            input_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
            input_img_resize = cv2.resize(input_img, img_size)
            img_data_list.append(input_img_resize)
        label_list = np.ones((num_samples, ), dtype=int)
        label_list[0:59] = 0
        label_list[59:122] = 1
        label_list[122:194] = 2
        label_list[194:267] = 3
        label_list[267:323] = 4
        label_list[323:385] = 5
        label_list[385:437] = 6
        label_list[437:496] = 7
        label_list[496:551] = 8
        label_list[551:616] = 9
        label_list[616:666] = 10
        label_list[666:729] = 11
        label_list[729:781] = 12
        label_list[781:846] = 13
        label_list[846:906] = 14
        label_list[906:962] = 15
        label_list[962:1039] = 16
        label_list[1039:1101] = 17
        label_list[1101:1162] = 18
        label_list[1162:1228] = 19
        label_list[1228:1288] = 20
        label_list[1288:1343] = 21
        label_list[1343:1398] = 22
        label_list[1398:1463] = 23
        label_list[1463:1517] = 24
        label_list[1517:1569] = 25
        label_list[1569:1622] = 26
        label_list[1622:1677] = 27
        label_list[1677:1734] = 28
        label_list[1734:1798] = 29
        label_list[1798:1851] = 30
        label_list[1851:1907] = 31

        img_data = np.array(img_data_list)
        img_data = img_data.astype('float32')
        img_data /= 255
        Y = np_utils.to_categorical(label_list, 32)
        if dataAug:
            datagen = ImageDataGenerator(featurewise_center=True,
                                         featurewise_std_normalization=True,
                                         rotation_range=20,
                                         width_shift_range=0.2,
                                         height_shift_range=0.2,
                                         horizontal_flip=True)
        else:
            datagen = None
        if num_channel == 1:
            if K.image_dim_ordering() == 'th':
                img_data = np.expand_dims(img_data, axis=1)
                print(img_data.shape)
            else:
                img_data = np.expand_dims(img_data, axis=4)
                print(img_data.shape)

        else:
            if K.image_dim_ordering() == 'th':
                img_data = np.rollaxis(img_data, 3, 1)
                print(img_data.shape)

        images, labels = shuffle(img_data, Y)
        if testing:
            X_test, y_test = images, labels
            X_train, y_train = None, None
        else:
            X_train, X_test, y_train, y_test = train_test_split(images,
                                                                labels,
                                                                test_size=0.2)

        return datagen, X_train, X_test, y_train, y_test, 32
コード例 #55
0
ファイル: utils.py プロジェクト: getterk96/ppnn
def split_and_scale(raw_data,
                    train_dates_idxs,
                    test_dates_idxs,
                    verbose=1,
                    seq_len=None,
                    fill_value=None,
                    full_ensemble_t=False,
                    add_current_error=False,
                    fclt=48,
                    current_error_len=1):
    """
    """

    # Unpack raw_data
    targets, features, dates, station_id, feature_names = raw_data

    if add_current_error:
        feature_names.extend(['curr_t2m_fc_obs', 'curr_err'])
        if current_error_len > 1:
            for i in range(1, current_error_len, 1):
                feature_names.extend(
                    ['curr_t2m_fc_obs_m%i' % i,
                     'curr_err_m%i' % i])
        assert full_ensemble_t is False, 'Current error not compatible with full ensemble.'

    data_sets = []
    for set_name, dates_idxs in zip(['train', 'test'],
                                    [train_dates_idxs, test_dates_idxs]):

        # Split data set:
        if verbose == 1:
            print('%s set contains %i days' %
                  (set_name, dates_idxs[1] - dates_idxs[0]))

        if seq_len is None:
            #pdb.set_trace()
            t = targets[dates_idxs[0]:dates_idxs[1]]  # [date, station]
            f = features[:, dates_idxs[0]:
                         dates_idxs[1]]  # [feature, date, station]

            if add_current_error:
                didx = int(fclt / 24)
                new_f_list = []
                for i in range(current_error_len):
                    d = didx + i
                    curr_obs = targets[dates_idxs[0] - d:dates_idxs[1] -
                                       d].copy()
                    curr_fc = features[0, dates_idxs[0] - d:dates_idxs[1] - d]
                    # Replace missing observations with forecast values
                    # [date_shifted, station]
                    curr_obs[np.isnan(curr_obs)] = curr_fc[np.isnan(curr_obs)]
                    curr_err = curr_obs - curr_fc
                    new_f_list.extend([curr_obs, curr_err])

                new_f = np.stack(new_f_list, axis=0)
                # [new features, date_shifted, station]
                f = np.concatenate((f, new_f), axis=0)

            # Ravel arrays, combine dates and stations --> instances
            t = np.reshape(t, (-1))  # [instances]
            f = np.reshape(f, (f.shape[0], -1))  # [features, instances]

            # Swap feature axes
            f = np.rollaxis(f, 1, 0)  # [instances, features]

            # Get nan mask from target
            nan_mask = np.isfinite(t)
        else:
            assert add_current_error is False, 'Current error not compatible with sequence'
            t = targets[dates_idxs[0] - seq_len +
                        1:dates_idxs[1]]  # [date, station]
            f = features[:, dates_idxs[0] - seq_len +
                         1:dates_idxs[1]]  # [feature, date, station]

            # Stack time steps for sequences
            # [time_step, feature, day, station]
            t = np.stack(
                [t[i:-(seq_len - i - 1) or None] for i in range(seq_len)])
            # [time_step, day, station]
            f = np.stack(
                [f[:, i:-(seq_len - i - 1) or None] for i in range(seq_len)])

            # Ravel arrays [seq, feature, instance]
            t = np.reshape(t, (seq_len, -1))
            f = np.reshape(f, (seq_len, f.shape[1], -1))

            # Roll arrays[sample, time step, feature]
            t = np.rollaxis(t, 1, 0)
            f = np.rollaxis(f, 2, 0)
            t = np.atleast_3d(t)

            # Get nan mask from last entry of target
            nan_mask = np.isfinite(t[:, -1, 0])

        # Apply NaN mask
        f = f[nan_mask]
        t = t[nan_mask]

        # Scale features
        if set_name == 'train':  # Get maximas
            if seq_len is None:
                features_max = np.nanmax(f, axis=0)
            else:
                features_max = np.nanmax(f, axis=(0, 1))
        if full_ensemble_t:
            # Scale all temeperature members with same max
            n_ens = 51  # ATTENTION: hard-coded
            features_max[:n_ens] = np.max(features_max[:n_ens])
        f /= features_max

        # Replace NaNs with fill value is requested
        if fill_value is not None:
            assert seq_len is not None, 'fill value only implemented for sequences.'
            weights = np.array(np.isfinite(t[:, :, 0]), dtype=np.float32)
            t[np.isnan(t)] = fill_value
        else:
            weights = None

        # Get additional data
        cont_ids = get_cont_ids(features, nan_mask, dates_idxs, seq_len)
        station_ids = get_station_ids(features, station_id, nan_mask,
                                      dates_idxs)
        date_strs = get_date_strs(features, dates, nan_mask, dates_idxs)

        # Put in data container
        data_sets.append(
            DataContainer(t, f, cont_ids, station_ids, date_strs,
                          feature_names, weights, features_max))

    return data_sets
コード例 #56
0
ファイル: test_image.py プロジェクト: xuesn/nilearn
def test__smooth_array():
    """Test smoothing of images: _smooth_array()"""
    # Impulse in 3D
    data = np.zeros((40, 41, 42))
    data[20, 20, 20] = 1

    # fwhm divided by any test affine must be odd. Otherwise assertion below
    # will fail. ( 9 / 0.6 = 15 is fine)
    fwhm = 9
    test_affines = (np.eye(4), np.diag((1, 1, -1, 1)),
                    np.diag((.6, 1, .6, 1)))
    for affine in test_affines:
        filtered = image._smooth_array(data, affine,
                                         fwhm=fwhm, copy=True)
        assert not np.may_share_memory(filtered, data)

        # We are expecting a full-width at half maximum of
        # fwhm / voxel_size:
        vmax = filtered.max()
        above_half_max = filtered > .5 * vmax
        for axis in (0, 1, 2):
            proj = np.any(np.any(np.rollaxis(above_half_max,
                          axis=axis), axis=-1), axis=-1)
            np.testing.assert_equal(proj.sum(),
                                    fwhm / np.abs(affine[axis, axis]))

    # Check that NaNs in the data do not propagate
    data[10, 10, 10] = np.NaN
    filtered = image._smooth_array(data, affine, fwhm=fwhm,
                                   ensure_finite=True, copy=True)
    assert np.all(np.isfinite(filtered))

    # Check copy=False.
    for affine in test_affines:
        data = np.zeros((40, 41, 42))
        data[20, 20, 20] = 1
        image._smooth_array(data, affine, fwhm=fwhm, copy=False)

        # We are expecting a full-width at half maximum of
        # fwhm / voxel_size:
        vmax = data.max()
        above_half_max = data > .5 * vmax
        for axis in (0, 1, 2):
            proj = np.any(np.any(np.rollaxis(above_half_max,
                          axis=axis), axis=-1), axis=-1)
            np.testing.assert_equal(proj.sum(),
                                    fwhm / np.abs(affine[axis, axis]))

    # Check fwhm='fast'
    for affine in test_affines:
        np.testing.assert_equal(image._smooth_array(data, affine, fwhm='fast'),
                                image._fast_smooth_array(data))

    # Check corner case when fwhm=0. See #1537
    # Test whether function _smooth_array raises a warning when fwhm=0.
    with pytest.warns(UserWarning):
        image._smooth_array(data, affine, fwhm=0.)

    # Test output equal when fwhm=None and fwhm=0
    out_fwhm_none = image._smooth_array(data, affine, fwhm=None)
    out_fwhm_zero = image._smooth_array(data, affine, fwhm=0.)
    assert_array_equal(out_fwhm_none, out_fwhm_zero)
コード例 #57
0
img_data = np.array(img_data_list)
img_data = img_data.astype('float32')
img_data /= 255
print(img_data.shape)
if num_channel == 1:
    if K.image_dim_ordering() == 'th':
        img_data = np.expand_dims(img_data, axis=1)
        print(img_data.shape)
    else:
        img_data = np.expand_dims(img_data, axis=4)
        print(img_data.shape)

else:
    if K.image_dim_ordering() == 'th':
        img_data = np.rollaxis(img_data, 3, 1)
        print(img_data.shape)
USE_SKLEARN_PREPROCESSING = False

if USE_SKLEARN_PREPROCESSING:
    # using sklearn for preprocessing
    from sklearn import preprocessing

    def image_to_feature_vector(image, size=(128, 128)):
        # resize the image to a fixed size, then flatten the image into
        # a list of raw pixel intensities
        return cv2.resize(image, size).flatten()

    img_data_list = []
    for dataset in data_dir_list:
        img_list = os.listdir(data_path + '/' + dataset)
コード例 #58
0
ファイル: ft_ao.py プロジェクト: xlzan/pyscf
def _ft_aopair_kpts(cell,
                    Gv,
                    shls_slice=None,
                    aosym='s1',
                    b=None,
                    gxyz=None,
                    Gvbase=None,
                    q=numpy.zeros(3),
                    kptjs=numpy.zeros((1, 3)),
                    intor='GTO_ft_ovlp_sph',
                    comp=1,
                    out=None):
    r'''
    FT transform AO pair
    \sum_T exp(-i k_j * T) \int exp(-i(G+q)r) i(r) j(r-T) dr^3

    The return array holds the AO pair
    corresponding to the kpoints given by kptjs
    '''
    q = numpy.reshape(q, 3)
    kptjs = numpy.asarray(kptjs, order='C').reshape(-1, 3)
    nGv = Gv.shape[0]
    GvT = numpy.asarray(Gv.T, order='C')
    GvT += q.reshape(-1, 1)

    if (gxyz is None or b is None or Gvbase is None or (abs(q).sum() > 1e-9)
            # backward compatibility for pyscf-1.2, in which the argument Gvbase is gs
            or (Gvbase is not None and isinstance(Gvbase[0],
                                                  (int, numpy.integer)))):
        p_gxyzT = lib.c_null_ptr()
        p_gs = (ctypes.c_int * 3)(0, 0, 0)
        p_b = (ctypes.c_double * 1)(0)
        eval_gz = 'GTO_Gv_general'
    else:
        if abs(b - numpy.diag(b.diagonal())).sum() < 1e-8:
            eval_gz = 'GTO_Gv_orth'
        else:
            eval_gz = 'GTO_Gv_nonorth'
        gxyzT = numpy.asarray(gxyz.T, order='C', dtype=numpy.int32)
        p_gxyzT = gxyzT.ctypes.data_as(ctypes.c_void_p)
        b = numpy.hstack((b.ravel(), q) + Gvbase)
        p_b = b.ctypes.data_as(ctypes.c_void_p)
        p_gs = (ctypes.c_int * 3)(*[len(x) for x in Gvbase])

    Ls = cell.get_lattice_Ls()
    expkL = numpy.exp(1j * numpy.dot(kptjs, Ls.T))

    atm, bas, env = gto.conc_env(cell._atm, cell._bas, cell._env, cell._atm,
                                 cell._bas, cell._env)
    ao_loc = gto.moleintor.make_loc(bas, intor)
    if shls_slice is None:
        shls_slice = (0, cell.nbas, cell.nbas, cell.nbas * 2)
    else:
        shls_slice = (shls_slice[0], shls_slice[1], cell.nbas + shls_slice[2],
                      cell.nbas + shls_slice[3])
    ni = ao_loc[shls_slice[1]] - ao_loc[shls_slice[0]]
    nj = ao_loc[shls_slice[3]] - ao_loc[shls_slice[2]]
    nkpts = len(kptjs)
    nimgs = len(Ls)
    shape = (nkpts, comp, ni, nj, nGv)

    # Theoretically, hermitian symmetry can be also found for kpti == kptj:
    #       f_ji(G) = \int f_ji exp(-iGr) = \int f_ij^* exp(-iGr) = [f_ij(-G)]^*
    # hermi operation needs reordering the axis-0.  It is inefficient.
    if aosym == 's1hermi':  # Symmetry for Gamma point
        assert (is_zero(q) and is_zero(kptjs) and ni == nj)
    elif aosym == 's2':
        i0 = ao_loc[shls_slice[0]]
        i1 = ao_loc[shls_slice[1]]
        nij = i1 * (i1 + 1) // 2 - i0 * (i0 + 1) // 2
        shape = (nkpts, comp, nij, nGv)

    drv = libpbc.PBC_ft_latsum_drv
    intor = getattr(libpbc, intor)
    eval_gz = getattr(libpbc, eval_gz)
    if nkpts == 1:
        fill = getattr(libpbc, 'PBC_ft_fill_nk1' + aosym)
    else:
        fill = getattr(libpbc, 'PBC_ft_fill_k' + aosym)
    out = numpy.ndarray(shape, dtype=numpy.complex128, buffer=out)

    drv(intor, eval_gz, fill, out.ctypes.data_as(ctypes.c_void_p),
        ctypes.c_int(nkpts), ctypes.c_int(comp), ctypes.c_int(nimgs),
        Ls.ctypes.data_as(ctypes.c_void_p),
        expkL.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int * 4)(*shls_slice),
        ao_loc.ctypes.data_as(ctypes.c_void_p),
        GvT.ctypes.data_as(ctypes.c_void_p), p_b, p_gxyzT, p_gs,
        ctypes.c_int(nGv), atm.ctypes.data_as(ctypes.c_void_p),
        ctypes.c_int(cell.natm), bas.ctypes.data_as(ctypes.c_void_p),
        ctypes.c_int(cell.nbas), env.ctypes.data_as(ctypes.c_void_p))

    if aosym == 's1hermi':
        for i in range(1, ni):
            out[:, :, :i, i] = out[:, :, i, :i]
    out = numpy.rollaxis(out, -1, 2)
    if comp == 1:
        out = out[:, 0]
    return out
コード例 #59
0
  i=0
  j=0
  gft=0
  for row in csvreader: 
    try:
      print(gft)
      gft=gft+1
      image=np.zeros(shape=(1,224,224,3))
      row=row[0].split('\\')
      #print(row)
      ty=row[0]+'/'+row[1]
      #print('/content/images/'+ty)
      image[0] = img.imread('/content/images/'+ty)
      #print(ty)
      #print(1)
      image=np.rollaxis(image,3,1)
      a=torch.tensor(image,dtype=torch.float32)
      #print(torch.from_numpy(image))
      imdata[i]=model(a).detach().numpy()
      #print(imdata[i])
      #print(image.shape)
      i=i+1
    except:
      j=j+1
      pass
  print(i)
  print(j)
#imdata=np.rollaxis(imdata, 3, 1)
print(imdata.shape)

#model(torch.rand(1, 3, 224, 224)).shape
コード例 #60
0
	img_list=os.listdir(data_path+'/'+ dataset)
	print ('Loaded the images of dataset-'+'{}\n'.format(dataset))
	for img in img_list:
		img_path = data_path + '/'+ dataset + '/'+ img
		img = image.load_img(img_path, target_size=(224, 224))
		x = image.img_to_array(img)
		x = np.expand_dims(x, axis=0)
		x = preprocess_input(x)
#		x = x/255
		print('Input image shape:', x.shape)
		img_data_list.append(x)

img_data = np.array(img_data_list)
#img_data = img_data.astype('float32')
print (img_data.shape)
img_data=np.rollaxis(img_data,1,0)
print (img_data.shape)
img_data=img_data[0]
print (img_data.shape)


# Define the number of classes
num_classes = 4
num_of_samples = img_data.shape[0]
labels = np.ones((num_of_samples,),dtype='int64')

labels[0:202]=0
labels[202:404]=1
labels[404:606]=2
labels[606:]=3