def setUp(self):
     self.a = np.arange(np.prod([3,1,5,6])).reshape(3,1,5,6)
     self.b = np.empty((3,0,5,6))
     self.complex_indices = ['skip', Ellipsis,
         0,
         # Boolean indices, up to 3-d for some special cases of eating up
         # dimensions, also need to test all False
         np.array(False),
         np.array([True, False, False]),
         np.array([[True, False], [False, True]]),
         np.array([[[False, False], [False, False]]]),
         # Some slices:
         slice(-5, 5, 2),
         slice(1, 1, 100),
         slice(4, -1, -2),
         slice(None,None,-3),
         # Some Fancy indexes:
         np.empty((0,1,1), dtype=np.intp), # empty broadcastable
         np.array([0,1,-2]),
         np.array([[2],[0],[1]]),
         np.array([[0,-1], [0,1]]),
         np.array([2,-1]),
         np.zeros([1]*31, dtype=int), # trigger too large array.
         np.array([0., 1.])] # invalid datatype
     # Some simpler indices that still cover a bit more
     self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), 'skip']
     # Very simple ones to fill the rest:
     self.fill_indices = [slice(None,None), 0]
Esempio n. 2
0
    def tobsr(self, blocksize=None, copy=True):
        from .bsr import bsr_matrix

        if blocksize is None:
            from .spfuncs import estimate_blocksize
            return self.tobsr(blocksize=estimate_blocksize(self))

        elif blocksize == (1,1):
            arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr)
            return bsr_matrix(arg1, shape=self.shape, copy=copy)

        else:
            R,C = blocksize
            M,N = self.shape

            if R < 1 or C < 1 or M % R != 0 or N % C != 0:
                raise ValueError('invalid blocksize %s' % blocksize)

            blks = csr_count_blocks(M,N,R,C,self.indptr,self.indices)

            indptr = np.empty(M//R + 1, dtype=np.intc)
            indices = np.empty(blks, dtype=np.intc)
            data = np.zeros((blks,R,C), dtype=self.dtype)

            csr_tobsr(M, N, R, C, self.indptr, self.indices, self.data,
                    indptr, indices, data.ravel())

            return bsr_matrix((data,indices,indptr), shape=self.shape)
Esempio n. 3
0
    def testNormalizeLike(self):
        a = np.empty((10, 3))
        a[:, 0] = np.random.random(10)
        a[:, 1] = np.random.random(10)
        a[:, 2] = np.random.random(10)

        b = np.empty((10, 3))
        b[:, 0] = np.random.random(10)
        b[:, 1] = np.random.random(10)
        b[:, 2] = np.random.random(10)
        b = b * 2

        c = normalizeArrayLike(b, a)

        # Should be normalized like a
        mean = []
        std = []
        mean.append(np.mean(a[:, 0]))
        mean.append(np.mean(a[:, 1]))
        mean.append(np.mean(a[:, 2]))
        std.append(np.std(a[:, 0]))
        std.append(np.std(a[:, 1]))
        std.append(np.std(a[:, 2]))

        # Check all values
        for col in xrange(b.shape[1]):
            for bval, cval in zip(b[:, col].flat, c[:, col].flat):
                print cval, (bval - mean[col]) / std[col]
                print cval, bval
                assert cval == (bval - mean[col]) / std[col]
        print ("TestNormalizeLike success")
Esempio n. 4
0
    def _binopt(self, other, op):
        """apply the binary operation fn to two sparse matrices."""
        other = self.__class__(other)

        # e.g. csr_plus_csr, csr_minus_csr, etc.
        fn = getattr(_sparsetools, self.format + op + self.format)

        maxnnz = self.nnz + other.nnz
        idx_dtype = get_index_dtype((self.indptr, self.indices,
                                     other.indptr, other.indices),
                                    maxval=maxnnz)
        indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
        indices = np.empty(maxnnz, dtype=idx_dtype)

        bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
        if op in bool_ops:
            data = np.empty(maxnnz, dtype=np.bool_)
        else:
            data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))

        fn(self.shape[0], self.shape[1],
           np.asarray(self.indptr, dtype=idx_dtype),
           np.asarray(self.indices, dtype=idx_dtype),
           self.data,
           np.asarray(other.indptr, dtype=idx_dtype),
           np.asarray(other.indices, dtype=idx_dtype),
           other.data,
           indptr, indices, data)

        A = self.__class__((data, indices, indptr), shape=self.shape)
        A.prune()

        return A
Esempio n. 5
0
def theta_limiter(r,cfl,theta=0.95):
    r"""
    Theta limiter
    
    Additional Input:
     - *theta* =
    """
    a = np.empty((2,len(r)))
    b = np.empty((3,len(r)))
    
    a[0,:] = 0.001
    a[1,:] = cfl
    cfmod1 = np.max(a,axis=0)
    a[0,:] = 0.999
    cfmod2 = np.min(a,axis=0)
    s1 = 2.0 / cfmod1
    s2 = (1.0 + cfl) / 3.0
    phimax = 2.0 / (1.0 - cfmod2)
    
    a[0,:] = (1.0 - theta) * s1
    a[1,:] = 1.0 + s2 * (r - 1.0)
    left = np.max(a,axis=0)
    a[0,:] = (1.0 - theta) * phimax * r
    a[1,:] = theta * s1 * r
    middle = np.max(a,axis=0)
    
    b[0,:] = left
    b[1,:] = middle
    b[2,:] = theta*phimax
    
    return np.min(b,axis=0)
Esempio n. 6
0
	def evaluate(self, mu_eval, pix_index=None):
		'''
		Evaluate Ar at the given distance modulus, or list of distance
		moduli, mu_eval.
		'''
		if type(mu_eval) not in [list, np.ndarray]:
			mu_eval = [mu_eval]
		
		query_map = self.Ar
		if pix_index != None:
			query_map = self.Ar[:,pix_index]
		
		# Create an empty map for each value of mu
		Ar_map = None
		if (pix_index == None) or (type(pix_index) in [list, np.ndarray]):
			Ar_map = np.empty((len(mu_eval), query_map.shape[1]), dtype=np.float64)
		else:
			Ar_map = np.empty(len(mu_eval), dtype=np.float64)
		Ar_map.fill(np.NaN)
		
		for k,m in enumerate(mu_eval):
			if (m >= self.mu[0]) and (m <= self.mu[-1]):
				for i,mu_anchor in enumerate(self.mu[1:]):
					if mu_anchor >= m:
						slope = (query_map[i+1] - query_map[i]) / (self.mu[i+1] - self.mu[i])
						Ar_map[k] = query_map[i] + slope * (m - self.mu[i])
						break
		
		return Ar_map
Esempio n. 7
0
File: plca.py Progetto: EQ4/msaf-gpl
    def __init__(self, V, rank, win=1, circular=False, **kwargs):
        """
        Parameters
        ----------
        V : array, shape (`F`, `T`)
            Matrix to analyze.
        rank : int
            Rank of the decomposition (i.e. number of columns of `W`
            and rows of `H`).
        win : int
            Length of each of the convolutive bases.  Defaults to 1,
            i.e. the model is identical to PLCA.
        circular : boolean
            If True, data shifted past `T` will wrap around to
            0. Defaults to False.
        alphaW, alphaZ, alphaH : float or appropriately shaped array
            Sparsity prior parameters for `W`, `Z`, and `H`.  Negative
            values lead to sparser distributions, positive values
            makes the distributions more uniform.  Defaults to 0 (no
            prior).

            **Note** that the prior is not parametrized in the
            standard way where the uninformative prior has alpha=1.
        """
        PLCA.__init__(self, V, rank, **kwargs)

        self.win = win
        self.circular = circular

        self.VRW = np.empty((self.F, self.rank, self.win))
        self.VRH = np.empty((self.T, self.rank))
Esempio n. 8
0
    def get_new_values(self):
        values = self.values

        # place the values
        length, width = self.full_shape
        stride = values.shape[1]
        result_width = width * stride
        result_shape = (length, result_width)

        # if our mask is all True, then we can use our existing dtype
        if self.mask.all():
            dtype = values.dtype
            new_values = np.empty(result_shape, dtype=dtype)
        else:
            dtype, fill_value = _maybe_promote(values.dtype)
            new_values = np.empty(result_shape, dtype=dtype)
            new_values.fill(fill_value)

        new_mask = np.zeros(result_shape, dtype=bool)

        # is there a simpler / faster way of doing this?
        for i in xrange(values.shape[1]):
            chunk = new_values[:, i * width: (i + 1) * width]
            mask_chunk = new_mask[:, i * width: (i + 1) * width]

            chunk.flat[self.mask] = self.sorted_values[:, i]
            mask_chunk.flat[self.mask] = True

        return new_values, new_mask
Esempio n. 9
0
def block2d_to_blocknd(values, items, shape, labels, ref_items=None):
    """ pivot to the labels shape """
    from pandas.core.internals import make_block
    panel_shape = (len(items),) + shape

    # TODO: lexsort depth needs to be 2!!

    # Create observation selection vector using major and minor
    # labels, for converting to panel format.
    selector = factor_indexer(shape[1:], labels)
    mask = np.zeros(np.prod(shape), dtype=bool)
    mask.put(selector, True)

    if mask.all():
        pvalues = np.empty(panel_shape, dtype=values.dtype)
    else:
        dtype, fill_value = _maybe_promote(values.dtype)
        pvalues = np.empty(panel_shape, dtype=dtype)
        pvalues.fill(fill_value)

    values = values
    for i in xrange(len(items)):
        pvalues[i].flat[mask] = values[:, i]

    if ref_items is None:
        ref_items = items

    return make_block(pvalues, items, ref_items)
Esempio n. 10
0
    def components(self, component_indices, ind=None):
        assert self.check_ind(ind)
        assert (
            isinstance(component_indices, list)
            and (len(component_indices) == 0 or min(component_indices) >= 0)
            or (
                isinstance(component_indices, np.ndarray)
                and component_indices.ndim == 1
                and (len(component_indices) == 0 or np.min(component_indices) >= 0)
            )
        )

        if ind is None:
            ind = xrange(len(self._list))
        elif isinstance(ind, Number):
            ind = [ind]

        if len(ind) == 0:
            assert (
                len(component_indices) == 0
                or isinstance(component_indices, list)
                and max(component_indices) < self.dim
                or isinstance(component_indices, np.ndarray)
                and np.max(component_indices) < self.dim
            )
            return np.empty((0, len(component_indices)))

        R = np.empty((len(ind), len(component_indices)))
        for k, i in enumerate(ind):
            R[k] = self._list[i].components(component_indices)

        return R
def test_uniform_targets():
    enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
    m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
    lasso = LassoCV(fit_intercept=True, n_alphas=3)
    m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)

    models_single_task = (enet, lasso)
    models_multi_task = (m_enet, m_lasso)

    rng = np.random.RandomState(0)

    X_train = rng.random_sample(size=(10, 3))
    X_test = rng.random_sample(size=(10, 3))

    y1 = np.empty(10)
    y2 = np.empty((10, 2))

    for model in models_single_task:
        for y_values in (0, 5):
            y1.fill(y_values)
            assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
            assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)

    for model in models_multi_task:
        for y_values in (0, 5):
            y2[:, 0].fill(y_values)
            y2[:, 1].fill(2 * y_values)
            assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
            assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
Esempio n. 12
0
    def interpolate(self, points, diff=False):
        """Interpolate splines at manu points."""

        import time

        if points.ndim == 1:
            raise Exception('Expected 2d array. Received {}d array'.format(points.ndim))
        if points.shape[1] != self.d:
            raise Exception('Second dimension should be {}. Received : {}.'.format(self.d, points.shape[0]))
        if not np.all( np.isfinite(points)):
            raise Exception('Spline interpolator evaluated at non-finite points.')

        n_sp = self.__mcoeffs__.shape[-1]

        N = points.shape[0]
        d = points.shape[1]

        if not diff:
            from .eval_cubic import vec_eval_cubic_splines
            values = np.empty((N,n_sp), dtype=float)
            vec_eval_cubic_splines(self.a, self.b, self.orders, self.__mcoeffs__, points, values)
            return values
        else:
            from .eval_cubic import vec_eval_cubic_splines_G
            values = np.empty((N,n_sp), dtype=float)
            dvalues = np.empty((N,d,n_sp), dtype=float)
            vec_eval_cubic_splines_G(self.a, self.b, self.orders, self.__mcoeffs__, points, values, dvalues)
            return [values, dvalues]
Esempio n. 13
0
        def _test_dtype(dtype, can_hold_na):
            data = np.random.randint(0, 2, (5, 3)).astype(dtype)

            indexer = [2, 1, 0, 1]
            out0 = np.empty((4, 3), dtype=dtype)
            out1 = np.empty((5, 4), dtype=dtype)
            com.take_nd(data, indexer, out=out0, axis=0)
            com.take_nd(data, indexer, out=out1, axis=1)
            expected0 = data.take(indexer, axis=0)
            expected1 = data.take(indexer, axis=1)
            tm.assert_almost_equal(out0, expected0)
            tm.assert_almost_equal(out1, expected1)

            indexer = [2, 1, 0, -1]
            out0 = np.empty((4, 3), dtype=dtype)
            out1 = np.empty((5, 4), dtype=dtype)
            if can_hold_na:
                com.take_nd(data, indexer, out=out0, axis=0)
                com.take_nd(data, indexer, out=out1, axis=1)
                expected0 = data.take(indexer, axis=0)
                expected1 = data.take(indexer, axis=1)
                expected0[3, :] = np.nan
                expected1[:, 3] = np.nan
                tm.assert_almost_equal(out0, expected0)
                tm.assert_almost_equal(out1, expected1)
            else:
                for i, out in enumerate([out0, out1]):
                    with tm.assertRaisesRegexp(TypeError, self.fill_error):
                        com.take_nd(data, indexer, out=out, axis=i)
                    # no exception o/w
                    data.take(indexer, out=out, axis=i)
Esempio n. 14
0
    def setconstants(self, samplefac, colors):
        self.NCYCLES = 100 # Number of learning cycles
        self.NETSIZE = colors # Number of colours used
        self.SPECIALS = 3 # Number of reserved colours used
        self.BGCOLOR = self.SPECIALS-1 # Reserved background colour
        self.CUTNETSIZE = self.NETSIZE - self.SPECIALS
        self.MAXNETPOS = self.NETSIZE - 1

        self.INITRAD = self.NETSIZE/8 # For 256 colours, radius starts at 32
        self.RADIUSBIASSHIFT = 6
        self.RADIUSBIAS = 1 << self.RADIUSBIASSHIFT
        self.INITBIASRADIUS = self.INITRAD * self.RADIUSBIAS
        self.RADIUSDEC = 30 # Factor of 1/30 each cycle

        self.ALPHABIASSHIFT = 10 # Alpha starts at 1
        self.INITALPHA = 1 << self.ALPHABIASSHIFT # biased by 10 bits

        self.GAMMA = 1024.0
        self.BETA = 1.0/1024.0
        self.BETAGAMMA = self.BETA * self.GAMMA

        self.network = np.empty((self.NETSIZE, 3), dtype='float64') # The network itself
        self.colormap = np.empty((self.NETSIZE, 4), dtype='int32') # The network itself

        self.netindex = np.empty(256, dtype='int32') # For network lookup - really 256

        self.bias = np.empty(self.NETSIZE, dtype='float64') # Bias and freq arrays for learning
        self.freq = np.empty(self.NETSIZE, dtype='float64')

        self.pixels = None
        self.samplefac = samplefac

        self.a_s = {}
Esempio n. 15
0
    def __init__(self, ale, agent, resized_width, resized_height,
                 resize_method, num_epochs, epoch_length, test_length,
                 frame_skip, death_ends_episode, max_start_nullops):
        self.ale = ale
        self.agent = agent
        self.num_epochs = num_epochs
        self.epoch_length = epoch_length
        self.test_length = test_length
        self.frame_skip = frame_skip
        self.death_ends_episode = death_ends_episode
        self.min_action_set = ale.getMinimalActionSet()
        self.resized_width = resized_width
        self.resized_height = resized_height
        self.resize_method = resize_method
        self.width, self.height = ale.getScreenDims()

        self.buffer_length = 2
        self.buffer_count = 0
        self.screen_rgb = np.empty((self.height, self.width, 3),
                                   dtype=np.uint8)
        self.screen_buffer = np.empty((self.buffer_length,
                                       self.height, self.width),
                                      dtype=np.uint8)

        self.terminal_lol = False # Most recent episode ended on a loss of life
        self.max_start_nullops = max_start_nullops
Esempio n. 16
0
    def _reset_field(self, tm_posess=0):
        n = self.teams[0].n
        self.x  = [0, 0]
        self.y  = [0, 0]
        self.vx = [np.zeros(n), np.zeros(n)]
        self.vy = [np.zeros(n), np.zeros(n)]

        x_rows = np.empty(n)
        y_rows = np.empty(n)
        rows = 3
        for row in range(rows):
            i2 = (n * (row + 1)) // rows
            i1 = (n * (  row  )) // rows

            x_rows[i1:i2] = (row+1)/(rows+2) * self.p.X * np.ones(i2 - i1)
            y_rows[i1:i2] = np.arange(i2 - i1)
            
        # team with starting posession should start closer to ball
        dx = 0.5 * 1/(rows+2) * self.p.X

        self.x[     tm_posess     ] = x_rows.copy() - dx
        self.x[(tm_posess + 1) % 2] = x_rows.copy() + dx
        self.y[     tm_posess     ] = y_rows.copy()
        self.y[(tm_posess + 1) % 2] = y_rows.copy() # should it be (-)

        self.x[1] = -self.x[1]

        # ball starts in center
        self.bx  = 0.0
        self.by  = 0.0
        self.bvx = 0.0
        self.bvy = 0.0
Esempio n. 17
0
def get_rpn_batch(roidb):
    """
    prototype for rpn batch: data, im_info, gt_boxes
    :param roidb: ['image', 'flipped'] + ['gt_boxes', 'boxes', 'gt_classes']
    :return: data, label
    """
    assert len(roidb) == 1, 'Single batch only'
    imgs, roidb = get_image(roidb)
    im_array = imgs[0]
    im_info = np.array([roidb[0]['im_info']], dtype=np.float32)

    # gt boxes: (x1, y1, x2, y2, cls)
    if roidb[0]['gt_classes'].size > 0:
        gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
        gt_boxes = np.empty((roidb[0]['boxes'].shape[0], 5), dtype=np.float32)
        gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]
        gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
    else:
        gt_boxes = np.empty((0, 5), dtype=np.float32)

    data = {'data': im_array,
            'im_info': im_info}
    label = {'gt_boxes': gt_boxes}

    return data, label
Esempio n. 18
0
    def _convert_to_array(self, values, name=None, other=None):
        """converts values to ndarray"""
        from pandas.tseries.timedeltas import to_timedelta

        coerce = True
        if not is_list_like(values):
            values = np.array([values])
        inferred_type = lib.infer_dtype(values)

        if inferred_type in ('datetime64', 'datetime', 'date', 'time'):
            # if we have a other of timedelta, but use pd.NaT here we
            # we are in the wrong path
            if (other is not None and other.dtype == 'timedelta64[ns]' and
                    all(isnull(v) for v in values)):
                values = np.empty(values.shape, dtype=other.dtype)
                values[:] = iNaT

            # a datelike
            elif isinstance(values, pd.DatetimeIndex):
                values = values.to_series()
            elif not (isinstance(values, (np.ndarray, pd.Series)) and
                      is_datetime64_dtype(values)):
                values = tslib.array_to_datetime(values)
        elif inferred_type in ('timedelta', 'timedelta64'):
            # have a timedelta, convert to to ns here
            values = to_timedelta(values, coerce=coerce)
        elif inferred_type == 'integer':
            # py3 compat where dtype is 'm' but is an integer
            if values.dtype.kind == 'm':
                values = values.astype('timedelta64[ns]')
            elif isinstance(values, pd.PeriodIndex):
                values = values.to_timestamp().to_series()
            elif name not in ('__truediv__', '__div__', '__mul__'):
                raise TypeError("incompatible type for a datetime/timedelta "
                                "operation [{0}]".format(name))
        elif isinstance(values[0], pd.DateOffset):
            # handle DateOffsets
            os = np.array([getattr(v, 'delta', None) for v in values])
            mask = isnull(os)
            if mask.any():
                raise TypeError("cannot use a non-absolute DateOffset in "
                                "datetime/timedelta operations [{0}]".format(
                                    ', '.join([com.pprint_thing(v)
                                               for v in values[mask]])))
            values = to_timedelta(os, coerce=coerce)
        elif inferred_type == 'floating':

            # all nan, so ok, use the other dtype (e.g. timedelta or datetime)
            if isnull(values).all():
                values = np.empty(values.shape, dtype=other.dtype)
                values[:] = iNaT
            else:
                raise TypeError(
                    'incompatible type [{0}] for a datetime/timedelta '
                    'operation'.format(np.array(values).dtype))
        else:
            raise TypeError("incompatible type [{0}] for a datetime/timedelta"
                            " operation".format(np.array(values).dtype))

        return values
Esempio n. 19
0
    def na_op(x, y):
        try:
            result = expressions.evaluate(
                op, str_rep, x, y, raise_on_error=True, **eval_kwargs)
        except TypeError:
            xrav = x.ravel()
            if isinstance(y, (np.ndarray, pd.Series)):
                dtype = np.find_common_type([x.dtype, y.dtype], [])
                result = np.empty(x.size, dtype=dtype)
                yrav = y.ravel()
                mask = notnull(xrav) & notnull(yrav)
                xrav = xrav[mask]
                yrav = yrav[mask]
                if np.prod(xrav.shape) and np.prod(yrav.shape):
                    result[mask] = op(xrav, yrav)
            elif hasattr(x,'size'):
                result = np.empty(x.size, dtype=x.dtype)
                mask = notnull(xrav)
                xrav = xrav[mask]
                if np.prod(xrav.shape):
                    result[mask] = op(xrav, y)
            else:
                raise TypeError("cannot perform operation {op} between objects "
                                "of type {x} and {y}".format(op=name,x=type(x),y=type(y)))

            result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan)
            result = result.reshape(x.shape)

        result = com._fill_zeros(result, x, y, name, fill_zeros)

        return result
Esempio n. 20
0
def convert_yuv420_to_rgb_image(y_plane, u_plane, v_plane,
                                w, h,
                                ccm_yuv_to_rgb=DEFAULT_YUV_TO_RGB_CCM,
                                yuv_off=DEFAULT_YUV_OFFSETS):
    """Convert a YUV420 8-bit planar image to an RGB image.

    Args:
        y_plane: The packed 8-bit Y plane.
        u_plane: The packed 8-bit U plane.
        v_plane: The packed 8-bit V plane.
        w: The width of the image.
        h: The height of the image.
        ccm_yuv_to_rgb: (Optional) the 3x3 CCM to convert from YUV to RGB.
        yuv_off: (Optional) offsets to subtract from each of Y,U,V values.

    Returns:
        RGB float-3 image array, with pixel values in [0.0, 1.0].
    """
    y = numpy.subtract(y_plane, yuv_off[0])
    u = numpy.subtract(u_plane, yuv_off[1]).view(numpy.int8)
    v = numpy.subtract(v_plane, yuv_off[2]).view(numpy.int8)
    u = u.reshape(h/2, w/2).repeat(2, axis=1).repeat(2, axis=0)
    v = v.reshape(h/2, w/2).repeat(2, axis=1).repeat(2, axis=0)
    yuv = numpy.dstack([y, u.reshape(w*h), v.reshape(w*h)])
    flt = numpy.empty([h, w, 3], dtype=numpy.float32)
    flt.reshape(w*h*3)[:] = yuv.reshape(h*w*3)[:]
    flt = numpy.dot(flt.reshape(w*h,3), ccm_yuv_to_rgb.T).clip(0, 255)
    rgb = numpy.empty([h, w, 3], dtype=numpy.uint8)
    rgb.reshape(w*h*3)[:] = flt.reshape(w*h*3)[:]
    return rgb.astype(numpy.float32) / 255.0
Esempio n. 21
0
    def eval_complex(self, shape, fargs, mode='eval', term_mode=None,
                     diff_var=None, **kwargs):
        rout = nm.empty(shape, dtype=nm.float64)

        fargsd = split_complex_args(fargs)

        # Assuming linear forms. Then the matrix is the
        # same both for real and imaginary part.
        rstatus = self.call_function(rout, fargsd['r'])
        if (diff_var is None) and len(fargsd) >= 2:
            iout = nm.empty(shape, dtype=nm.float64)
            istatus = self.call_function(iout, fargsd['i'])

            if mode == 'eval' and len(fargsd) >= 4:
                irout = nm.empty(shape, dtype=nm.float64)
                irstatus = self.call_function(irout, fargsd['ir'])
                riout = nm.empty(shape, dtype=nm.float64)
                ristatus = self.call_function(riout, fargsd['ri'])

                out = (rout - iout) + (riout + irout) * 1j
                status = rstatus or istatus or ristatus or irstatus

            else:
                out = rout + 1j * iout
                status = rstatus or istatus

        else:
            out, status = rout + 0j, rstatus

        if mode == 'eval':
            out1 = nm.sum(out, 0).squeeze()
            return out1, status

        else:
            return out, status
Esempio n. 22
0
    def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
        data = 'one,one'
        result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})

        expected = DataFrame(
            {'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
        tm.assert_frame_equal(result, expected, check_index_type=False)
Esempio n. 23
0
    def __init__(self, parent=None, width=5, height=4, dpi=60):
        """
        Descript. :
        """
        self.mouse_position = [0, 0]
        self.max_plot_points = None

        self.fig = Figure(figsize=(width, height), dpi=dpi)
        self.axes = self.fig.add_subplot(111)

        FigureCanvas.__init__(self, self.fig)
        self.setParent(parent)
        FigureCanvas.setSizePolicy(
            self, QtImport.QSizePolicy.Expanding, QtImport.QSizePolicy.Expanding
        )
        FigureCanvas.updateGeometry(self)

        self.single_curve = None
        self.real_time = None
        self._axis_x_array = np.empty(0)
        self._axis_y_array = np.empty(0)
        self._axis_x_limits = [None, None]
        self._axis_y_limits = [None, None]

        self._curves_dict = {}
        self.setMaximumSize(2000, 2000)
        def sweep_lo_dirfile(self, Npackets_per = 10, channels = None,center_freq = 750.0e6, span = 2.0e6, bb_freqs=None,save_path = '/mnt/iqstream/lo_sweeps'):
		N = Npackets_per
		start = center_freq - (span/2.)
		stop = center_freq + (span/2.) 
		step = 2.5e3
		sweep_freqs = np.arange(start, stop, step)
		sweep_freqs = np.round(sweep_freqs/step)*step
		print 'Sweep freqs =', sweep_freqs/1.0e6
	 	f=np.empty((len(channels),sweep_freqs.size))
	 	i=np.empty((len(channels),sweep_freqs.size))
		q=np.empty((len(channels),sweep_freqs.size))
		for count,freq in enumerate(sweep_freqs):
			print 'Sweep freq =', freq/1.0e6
			if self.v1.set_frequency(0, freq/1.0e6, 0.01): 
				time.sleep(0.1)
				i_buffer,q_buffer = self.get_UDP(N, freq, skip_packets=2, channels = channels)
				f[:,count]=freq+bb_freqs
				i[:,count]=np.mean(i_buffer,axis=0)
				q[:,count]=np.mean(q_buffer,axis=0)
			else:
				time.sleep(0.1)
				f[:,count]=freq+bb_freqs
				i[:,count]=np.nan
				q[:,count]=np.nan
		self.v1.set_frequency(0,center_freq / (1.0e6), 0.01) # LO
		
		return f, i, q
        def get_UDP(self, Npackets, LO_freq, skip_packets=2, channels = None):
		#Npackets = np.int(time_interval * self.accum_freq)
		I_buffer = np.empty((Npackets + skip_packets, len(channels)))
		Q_buffer = np.empty((Npackets + skip_packets, len(channels)))
		self.fpga.write_int('pps_start', 1)
		count = 0
		while count < Npackets + skip_packets:
			packet = self.s.recv(8192) 
			data = np.fromstring(packet,dtype = '<i').astype('float')
			data /= 2.0**17
			data /= (self.accum_len/512.)
			ts = (np.fromstring(packet[-4:],dtype = '<i').astype('float')/ self.fpga_samp_freq)*1.0e3 # ts in ms
			odd_chan = channels[1::2]
			even_chan = channels[0::2]
			I_odd = data[1024 + ((odd_chan - 1) / 2)]	
			Q_odd = data[1536 + ((odd_chan - 1) /2)]	
			I_even = data[0 + (even_chan/2)]	
			Q_even = data[512 + (even_chan/2)]	
			even_phase = np.arctan2(Q_even,I_even)
			odd_phase = np.arctan2(Q_odd,I_odd)
			if len(channels) % 2 > 0:
				I = np.hstack(zip(I_even[:len(I_odd)], I_odd))
				Q = np.hstack(zip(Q_even[:len(Q_odd)], Q_odd))
				I = np.hstack((I, I_even[-1]))	
				Q = np.hstack((Q, Q_even[-1]))	
				I_buffer[count] = I
				Q_buffer[count] = Q
			else:
				I = np.hstack(zip(I_even, I_odd))
				Q = np.hstack(zip(Q_even, Q_odd))
				I_buffer[count] = I
				Q_buffer[count] = Q
				
			count += 1
		return I_buffer[skip_packets:],Q_buffer[skip_packets:]
Esempio n. 26
0
    def _TO_DELETE_initialize_drifters(self, driftersPerOceanModel):
        """
        Initialize drifters and attach them for each particle.
        """
        self.driftersPerOceanModel = np.int32(driftersPerOceanModel)
        
        # Define mid-points for the different drifters 
        # Decompose the domain, so that we spread the drifters as much as possible
        sub_domains_y = np.int(np.round(np.sqrt(self.driftersPerOceanModel)))
        sub_domains_x = np.int(np.ceil(1.0*self.driftersPerOceanModel/sub_domains_y))
        self.midPoints = np.empty((driftersPerOceanModel, 2))
        for sub_y in range(sub_domains_y):
            for sub_x in range(sub_domains_x):
                drifter_id = sub_y*sub_domains_x + sub_x
                if drifter_id >= self.driftersPerOceanModel:
                    break
                self.midPoints[drifter_id, 0]  = (sub_x + 0.5)*self.nx*self.dx/sub_domains_x
                self.midPoints[drifter_id, 1]  = (sub_y + 0.5)*self.ny*self.dy/sub_domains_y
              
        # Loop over particles, sample drifters, and attach them
        for i in range(self.numParticles+1):
            drifters = GPUDrifterCollection.GPUDrifterCollection(self.gpu_ctx, self.driftersPerOceanModel,
                                                 observation_variance=self.observation_variance,
                                                 boundaryConditions=self.boundaryConditions,
                                                 domain_size_x=self.nx*self.dx, domain_size_y=self.ny*self.dy)

            initPos = np.empty((self.driftersPerOceanModel, 2))
            for d in range(self.driftersPerOceanModel):
                initPos[d,:] = np.random.multivariate_normal(self.midPoints[d,:], self.initialization_cov_drifters)
            drifters.setDrifterPositions(initPos)
            self.particles[i].attachDrifters(drifters)
	def get_stream(self, chan, time_interval):
		self.fpga.write_int('pps_start', 1)
		#self.phases = np.empty((len(self.freqs),Npackets))
		Npackets = np.int(time_interval * self.accum_freq)
		Is = np.empty(Npackets)
		Qs = np.empty(Npackets)
		phases = np.empty(Npackets)
		count = 0
		while count < Npackets:
			packet = self.s.recv(8192 + 42) # total number of bytes including 42 byte header
			header = np.fromstring(packet[:42],dtype = '<B')
			roach_mac = header[6:12]
			filter_on = np.array([2, 68, 1, 2, 13, 33])
			if np.array_equal(roach_mac,filter_on):
				data = np.fromstring(packet[42:],dtype = '<i').astype('float')
				data /= 2.0**17
				data /= (self.accum_len/512.)
				ts = (np.fromstring(packet[-4:],dtype = '<i').astype('float')/ self.fpga_samp_freq)*1.0e3 # ts in ms
				# To stream one channel, make chan an argument
				if (chan % 2) > 0:
					I = data[1024 + ((chan - 1) / 2)]	
					Q = data[1536 + ((chan - 1) /2)]	
				else:
					I = data[0 + (chan/2)]	
					Q = data[512 + (chan/2)]	
				phase = np.arctan2([Q],[I])
				Is[count]=I
				Qs[count]=Q
				phases[count]=phase
			else:
				continue
			count += 1
		return Is, Qs, phases
Esempio n. 28
0
    def test_empty_pass_dtype(self):
        data = 'one,two'
        result = self.read_csv(StringIO(data), dtype={'one': 'u1'})

        expected = DataFrame({'one': np.empty(0, dtype='u1'),
                              'two': np.empty(0, dtype=np.object)})
        tm.assert_frame_equal(result, expected, check_index_type=False)
Esempio n. 29
0
    def test_buffer_mode(self) :
    
        
        # allocate something manifestly too short, should raise a value error
        buff = np.empty(0, dtype=np.int64)
        self.assertRaises(ValueError,
                          query_disc,
                          self.NSIDE, self.vec, self.radius, inclusive=True, buff=buff)
        

        # allocate something of wrong type, should raise a value error
        buff = np.empty(nside2npix(self.NSIDE), dtype=np.float64)
        self.assertRaises(ValueError,
                          query_disc,
                          self.NSIDE, self.vec, self.radius, inclusive=True, buff=buff)
       
        # allocate something acceptable, should succeed and return a subview
        buff = np.empty(nside2npix(self.NSIDE), dtype=np.int64)
        result = query_disc(self.NSIDE, self.vec, self.radius, inclusive=True, buff=buff)
        
        assert result.base is buff
        
        np.testing.assert_array_equal(
            result,
            np.array([ 0, 3, 4, 5, 11, 12, 13, 23 ])
            )
Esempio n. 30
0
def cart2sph(z, y, x):
    """Convert from cartesian coordinates (x,y,z) to spherical (elevation,
    azimuth, radius). Output is in degrees.

    usage:
        array3xN[el,az,rad] = cart2sph(array3xN[x,y,z])
        OR
        elevation, azimuth, radius = cart2sph(x,y,z)

        If working in DKL space, z = Luminance, y = S and x = LM"""
    width = len(z)

    elevation = numpy.empty([width,width])
    radius = numpy.empty([width,width])
    azimuth = numpy.empty([width,width])

    radius = numpy.sqrt(x**2 + y**2 + z**2)
    azimuth = numpy.arctan2(y, x)
    #Calculating the elevation from x,y up
    elevation = numpy.arctan2(z, numpy.sqrt(x**2+y**2))

#convert azimuth and elevation angles into degrees
    azimuth *=(180.0/numpy.pi)
    elevation *=(180.0/numpy.pi)

    sphere = numpy.array([elevation, azimuth, radius])
    sphere = numpy.rollaxis(sphere, 0, 3)

    return sphere
Created on Fri Jan 11 13:34:54 2019

@author: gabriel
"""

import numpy as np
from sklearn import datasets


base = datasets.load_breast_cancer()

data = base.data #Entradas

outputValues = base.target #Valores de saída no fomato de array à serem transformados em matriz

outputs = np.empty([569, 1], dtype=int) #Alocando espaço para as saídas

#Transformando o array em matriz
for i in range(569):
    outputs[i] = outputValues[i]

synapsesToHiddenLayer = 2 * np.random.random((30, 15)) - 1 #Pesos da entrada para a camada oculta

synapsesToOutput = 2 * np.random.random((15, 1)) - 1 #Pesos da camada oculta para a camada de saída

trainingTime = 100000 #Épocas (rodadas de atualização dos pesos)

learning_rate = 0.3 #Taxa de aprendizagem

time = 1 #Momento
Esempio n. 32
0
#Array creation using numpy methods :
#numpy.empty(shape, dtype = float, order = ‘C’)

import numpy as np
a = np.empty(2, dtype=int)
print('Matrix a: \n', a)

b = np.empty([2, 2], dtype=int)
print('Matrix b: \n',b )

c = np.empty([3, 3])
print('\n Matrix c: \n', c)



# numpy.zeros method

a = np.zeros(2, dtype=int)
print('Matrix a: \n',a)

b = np.zeros([2, 3], dtype=int)
print('Matrix b: \n', b)




# numpy.reshape() method

array = np.arange(8)
print('Original Array: \n', array)
Esempio n. 33
0
 def __init__(self, n_nodes):
     self.N = n_nodes
     self.W = np.empty([n_nodes, 3])
     self.C = np.eye(n_nodes, n_nodes)
     # test
     self.V = np.array([])
Esempio n. 34
0
    def __init__(self, stream, cutoff):
        # Number of columns, rows, and sections (3 words, 12 bytes, 1-12)
        self.NC = st.unpack('<L', stream.read(4))[0]
        self.NR = st.unpack('<L', stream.read(4))[0]
        self.NS = st.unpack('<L', stream.read(4))[0]
        self.Ntot = self.NC * self.NR * self.NS

        # Mode (1 word, 4 bytes, 13-16)
        self.mode = st.unpack('<L', stream.read(4))[0]

        # Number of first column, row, section (3 words, 12 bytes, 17-28)
        self.ncstart = st.unpack('<l', stream.read(4))[0]
        self.nrstart = st.unpack('<l', stream.read(4))[0]
        self.nsstart = st.unpack('<l', stream.read(4))[0]
        # Number of intervals along x, y, z (3 words, 12 bytes, 29-40)
        self.Nx = st.unpack('<L', stream.read(4))[0]
        self.Ny = st.unpack('<L', stream.read(4))[0]
        self.Nz = st.unpack('<L', stream.read(4))[0]

        # Cell dimensions (Angstroms) (3 words, 12 bytes, 41-52)
        self.Lx = st.unpack('<f', stream.read(4))[0]
        self.Ly = st.unpack('<f', stream.read(4))[0]
        self.Lz = st.unpack('<f', stream.read(4))[0]

        # Cell angles (Degrees) (3 words, 12 bytes, 53-64)
        self.a = st.unpack('<f', stream.read(4))[0]
        self.b = st.unpack('<f', stream.read(4))[0]
        self.c = st.unpack('<f', stream.read(4))[0]

        # Which axis corresponds to column, row, and sections (1, 2, 3 for x, y ,z)
        # (3 words, 12 bytes, 65-76)
        self.mapc = st.unpack('<L', stream.read(4))[0]
        self.mapr = st.unpack('<L', stream.read(4))[0]
        self.maps = st.unpack('<L', stream.read(4))[0]

        # Density values (min, max, mean) (3 words, 12 bytes, 77-88)
        self.dmin = st.unpack('<f', stream.read(4))[0]
        self.dmax = st.unpack('<f', stream.read(4))[0]
        self.dmean = st.unpack('<f', stream.read(4))[0]

        # Space group number (1 word, 4 bytes, 89-92)
        # For EM/ET, this encodes the type of data:
        # 0 for 2D images and image stacks, 1 for 3D volumes, 401 for volume stacks
        self.ispg = st.unpack('<L', stream.read(4))[0]

        # size of extended header (1 word, 4 bytes, 93-96)
        # contained symmetry records in original format definition
        self.nsymbt = st.unpack('<L', stream.read(4))[0]

        # we treat this all as extra stuff like MRC2014 format (25 word, 100 bytes, 97-196)
        self.extra = st.unpack('<100s', stream.read(100))[0]

        # origins for x, y, z (3 words, 12 bytes, 197-208)
        self.x0 = st.unpack('<f', stream.read(4))[0]
        self.y0 = st.unpack('<f', stream.read(4))[0]
        self.z0 = st.unpack('<f', stream.read(4))[0]

        # the character string 'MAP' to identify file type (1 word, 4 bytes, 209-212)
        self.wordMAP = st.unpack('<4s', stream.read(4))[0]

        # machine stamp encoding byte ordering of data (1 word, 4 bytes, 213-216)
        self.machst = st.unpack('<4s', stream.read(4))[0]

        # rms deviation of map from mean density (1 word, 4 bytes, 217-220)
        self.rms = st.unpack('<f', stream.read(4))[0]

        # number of labels being used (1 word, 4 bytes, 221-224)
        self.nlabels = st.unpack('<L', stream.read(4))[0]

        # 10 80-character text labels, which we leave concatenated (200 words, 800 bytes, 225-1024)
        self.labels = st.unpack('<800s', stream.read(800))[0]

        # Data blocks (1024-end)
        self.density = np.empty([self.NS, self.NR, self.NC])
        for s in range(0, self.NS):
            for r in range(0, self.NR):
                for c in range(0, self.NC):
                    d = st.unpack('<f', stream.read(4))[0]
                    if cutoff is not None and d < cutoff:
                        d = 0
                    self.density[s, r, c] = d

        self.sampled = False
Esempio n. 35
0
    def write_h5_file(self, photonfile):
        """
        Write the :class:`~pyxsim.photon_list.PhotonList` to the HDF5 
        file *photonfile*.
        """

        if parallel_capable:

            mpi_long = get_mpi_type("int64")
            mpi_double = get_mpi_type("float64")

            local_num_cells = len(self.photons["x"])
            sizes_c = comm.comm.gather(local_num_cells, root=0)

            local_num_photons = np.sum(self.photons["num_photons"])
            sizes_p = comm.comm.gather(local_num_photons, root=0)

            if comm.rank == 0:
                num_cells = sum(sizes_c)
                num_photons = sum(sizes_p)
                disps_c = [sum(sizes_c[:i]) for i in range(len(sizes_c))]
                disps_p = [sum(sizes_p[:i]) for i in range(len(sizes_p))]
                x = np.zeros(num_cells)
                y = np.zeros(num_cells)
                z = np.zeros(num_cells)
                vx = np.zeros(num_cells)
                vy = np.zeros(num_cells)
                vz = np.zeros(num_cells)
                dx = np.zeros(num_cells)
                n_ph = np.zeros(num_cells, dtype="int64")
                e = np.zeros(num_photons)
            else:
                sizes_c = []
                sizes_p = []
                disps_c = []
                disps_p = []
                x = np.empty([])
                y = np.empty([])
                z = np.empty([])
                vx = np.empty([])
                vy = np.empty([])
                vz = np.empty([])
                dx = np.empty([])
                n_ph = np.empty([])
                e = np.empty([])

            comm.comm.Gatherv(
                [self.photons["pos"][:, 0].d, local_num_cells, mpi_double],
                [x, (sizes_c, disps_c), mpi_double],
                root=0)
            comm.comm.Gatherv(
                [self.photons["pos"][:, 1].d, local_num_cells, mpi_double],
                [y, (sizes_c, disps_c), mpi_double],
                root=0)
            comm.comm.Gatherv(
                [self.photons["pos"][:, 2].d, local_num_cells, mpi_double],
                [z, (sizes_c, disps_c), mpi_double],
                root=0)
            comm.comm.Gatherv(
                [self.photons["vel"][:, 0].d, local_num_cells, mpi_double],
                [vx, (sizes_c, disps_c), mpi_double],
                root=0)
            comm.comm.Gatherv(
                [self.photons["vel"][:, 1].d, local_num_cells, mpi_double],
                [vy, (sizes_c, disps_c), mpi_double],
                root=0)
            comm.comm.Gatherv(
                [self.photons["vel"][:, 2].d, local_num_cells, mpi_double],
                [vz, (sizes_c, disps_c), mpi_double],
                root=0)
            comm.comm.Gatherv(
                [self.photons["dx"].d, local_num_cells, mpi_double],
                [dx, (sizes_c, disps_c), mpi_double],
                root=0)
            comm.comm.Gatherv(
                [self.photons["num_photons"], local_num_cells, mpi_long],
                [n_ph, (sizes_c, disps_c), mpi_long],
                root=0)
            comm.comm.Gatherv(
                [self.photons["energy"].d, local_num_photons, mpi_double],
                [e, (sizes_p, disps_p), mpi_double],
                root=0)

        else:

            x = self.photons["pos"][:, 0].d
            y = self.photons["pos"][:, 1].d
            z = self.photons["pos"][:, 2].d
            vx = self.photons["vel"][:, 0].d
            vy = self.photons["vel"][:, 1].d
            vz = self.photons["vel"][:, 2].d
            dx = self.photons["dx"].d
            n_ph = self.photons["num_photons"]
            e = self.photons["energy"].d

        if comm.rank == 0:

            f = h5py.File(photonfile, "w")

            # Parameters

            p = f.create_group("parameters")
            p.create_dataset("fid_area",
                             data=float(self.parameters["fid_area"]))
            p.create_dataset("fid_exp_time",
                             data=float(self.parameters["fid_exp_time"]))
            p.create_dataset("fid_redshift",
                             data=self.parameters["fid_redshift"])
            p.create_dataset("hubble", data=self.parameters["hubble"])
            p.create_dataset("omega_matter",
                             data=self.parameters["omega_matter"])
            p.create_dataset("omega_lambda",
                             data=self.parameters["omega_lambda"])
            p.create_dataset("fid_d_a", data=float(self.parameters["fid_d_a"]))
            p.create_dataset("data_type", data=self.parameters["data_type"])

            # Data

            d = f.create_group("data")
            d.create_dataset("x", data=x)
            d.create_dataset("y", data=y)
            d.create_dataset("z", data=z)
            d.create_dataset("vx", data=vx)
            d.create_dataset("vy", data=vy)
            d.create_dataset("vz", data=vz)
            d.create_dataset("dx", data=dx)
            d.create_dataset("num_photons", data=n_ph)
            d.create_dataset("energy", data=e)

            f.close()

        comm.barrier()
Esempio n. 36
0
def crossVal(prob, k=1, kernels=['Linear','Gaussian'], numPC=2, KSError=False, d=0, mu=1, normPCA=True, lams=None, linCon=True, covCon=True, dualize=True, outputFlag=True):
    # Given problem object, conducts 5-fold cross-validation with the parameters defined,
    # and returns the mean error (or fairness), the average variance of the dataset
    # explained by the principal components found, the total variance of the datasets,
    # the average top eigenvalues of the optimal solution to the SDP, and the average
    # correlation of the PC's found with the true PC's
    
    idx = np.arange(prob.numPoints); np.random.shuffle(idx)
    
    if KSError: errList = np.empty(k)
    else: errList = np.empty((k,len(kernels)))
    varExpList = np.empty(k)
    totVarList = np.empty(k)
    eigList = [[]]*k
    eigVecList = [[]]*k
    corrList = [[]]*k
    #train_test = np.array([train_test_split((idx),test_size=0.3) for i in range(k)])
    #trainFolds, testFolds = [list(train_test[:,0]),list(train_test[:,1])]
    testFolds = [idx[int(i*prob.numPoints/k):int((i+1)*prob.numPoints/k)] for i in range(k)]
    trainFolds = [np.concatenate([testFolds[j] for j in range(k) if j!=i]) for i in range(k)]
    dat = copy.deepcopy(prob.data)
    srsp = copy.deepcopy(prob.sideResp)
    if outputFlag: print('#################################################################')
    for iteration,(train,test) in enumerate(zip(trainFolds,testFolds)):
        if outputFlag: print('Iteration',iteration)
        if outputFlag: print('Fair PCA Parameters: numPC=%s, d=%s, mu=%s'%(numPC,d,mu))
        if normPCA: prob.data, means, stdevs = normalize(dat[train])
        else: prob.data = dat[train]
        prob.sideResp = srsp[train]
        if linCon and not covCon: B,m = prob.zpca(dimPCA=numPC,d=d)
        elif covCon: B,m = prob.spca(dimPCA=numPC,addLinear=linCon,mu=mu,d=d,dualize=dualize)
        else: B,m = prob.pca(dimPCA=numPC)
        eigVecs = la.eigh(m.m.dat.T.dot(m.m.dat))[1][:,-numPC:]
        varExp = np.trace(B.T.dot(m.m.dat.T).dot(m.m.dat.dot(B)))
        totVar = np.trace(m.m.dat.T.dot(m.m.dat))
        eig = np.round(la.eigvalsh(m.m.X)[-numPC:],4)
        corr = [round(la.norm(b,2),2) for b in B.T.dot(eigVecs)]
        
        varExpList[iteration] = varExp
        totVarList[iteration] = totVar
        eigList[iteration] = eig
        eigVecList[iteration] = eigVecs
        corrList[iteration] = corr
    
        if outputFlag: print(m.m.m.getprosta(mosek.soltype.itr))
        if m.m.m.getprosta(mosek.soltype.itr) not in [mosek.prosta.prim_and_dual_feas,mosek.prosta.unknown]: return None, None, None, None, None
        if outputFlag:
            print('Top eigenvalues of solution:',eig)
            print('Correlation with top eigenvectors:',corr)
            print('Proportion of variance explained:', varExp/totVar)
            print('Proportion of deviation explained:', np.sqrt(varExp/totVar))
            if linCon or covCon:
                if np.max(np.abs((m.m.B.T.dot(m.getZCon(prob.sideResp).reshape((prob.numFields,1))))))>1e-7:
                    print('Linear constraint unsatisfied')
                else:
                    print('Linear constraint satisfied')
        
        if normPCA: prob.data = normalize(((dat[test]-means[:len(test)])/stdevs[:len(test)]+means[:len(test)]).dot(B))[0]
        else: prob.data = normalize(dat[test].dot(B))[0]
        prob.sideResp = srsp[test]
        if KSError: errList[iteration] = np.max(np.abs(multiDimCDF(prob.data,prob.sideResp)))
        else:
            for kernum,kernel in enumerate(kernels):
                if outputFlag: print(kernel,'SVM:')
                if kernel=='Linear': svm, err = prob.svm(useSRSP=True,outputFlag=outputFlag,lams=lams)[1:3]
                elif kernel=='Gaussian': svm, err = prob.svm(useSRSP=True,dual=True,kernel=lambda x,y: math.exp(-la.norm(x-y)**2/2),outputFlag=outputFlag,lams=lams)[1:3]
                elif kernel=='Polynomial': svm, err = prob.svm(useSRSP=True,dual=True,conic=False,kernel=lambda x,y: (x.T.dot(y)+1)**2,outputFlag=outputFlag,lams=lams)[1:3]
                else:
                    if outputFlag: print('\tIncorrect kernel name')
                    continue
                errList[iteration,kernum] = err
    if outputFlag:
        print('-----------------------------------------------------------------')
        print('Average variation explained:',np.round(100*np.mean(varExpList/totVarList),2))
        print('Average deviation explained:',np.round(100*np.mean(np.sqrt(varExpList/totVarList)),2))
        print('Average errors',np.round(np.mean(errList,axis=0),4))
        # winsound.PlaySound("SystemExit", winsound.SND_ALIAS)
    prob.data = dat
    prob.sideResp = srsp
    return errList, varExpList, totVarList, eigList, eigVecList, corrList
Esempio n. 37
0
def plot_wigner_function(state, res=100, filename=None):
    """Plot the equal angle slice spin Wigner function of an arbitrary
    quantum state.

    Args:
        state (np.matrix[[complex]]):
            - Matrix of 2**n x 2**n complex numbers
            - State Vector of 2**n x 1 complex numbers
        res (int) : number of theta and phi values in meshgrid
            on sphere (creates a res x res grid of points)
        filename (str): the output file to save the plot as. If specified it
            will save and exit and not open up the plot in a new window.

    References:
        [1] T. Tilma, M. J. Everitt, J. H. Samson, W. J. Munro,
        and K. Nemoto, Phys. Rev. Lett. 117, 180401 (2016).
        [2] R. P. Rundle, P. W. Mills, T. Tilma, J. H. Samson, and
        M. J. Everitt, Phys. Rev. A 96, 022117 (2017).
    """
    state = np.array(state)
    if state.ndim == 1:
        state = np.outer(state,
                         state)  # turns state vector to a density matrix
    state = np.matrix(state)
    num = int(np.log2(len(state)))  # number of qubits
    phi_vals = np.linspace(0, np.pi, num=res, dtype=np.complex_)
    theta_vals = np.linspace(0, 0.5 * np.pi, num=res,
                             dtype=np.complex_)  # phi and theta values for WF
    w = np.empty([res, res])
    harr = np.sqrt(3)
    delta_su2 = np.zeros((2, 2), dtype=np.complex_)

    # create the spin Wigner function
    for theta in range(res):
        costheta = harr * np.cos(2 * theta_vals[theta])
        sintheta = harr * np.sin(2 * theta_vals[theta])

        for phi in range(res):
            delta_su2[0, 0] = 0.5 * (1 + costheta)
            delta_su2[0, 1] = -0.5 * (np.exp(2j * phi_vals[phi]) * sintheta)
            delta_su2[1, 0] = -0.5 * (np.exp(-2j * phi_vals[phi]) * sintheta)
            delta_su2[1, 1] = 0.5 * (1 - costheta)
            kernel = 1
            for _ in range(num):
                kernel = np.kron(kernel,
                                 delta_su2)  # creates phase point kernel

            w[phi,
              theta] = np.real(np.trace(state * kernel))  # Wigner function

    # Plot a sphere (x,y,z) with Wigner function facecolor data stored in Wc
    fig = plt.figure(figsize=(11, 9))
    ax = fig.gca(projection='3d')
    w_max = np.amax(w)
    # Color data for plotting
    w_c = cm.seismic_r((w + w_max) / (2 * w_max))  # color data for sphere
    w_c2 = cm.seismic_r(
        (w[0:res, int(res / 2):res] + w_max) / (2 * w_max))  # bottom
    w_c3 = cm.seismic_r((w[int(res / 4):int(3 * res / 4), 0:res] + w_max) /
                        (2 * w_max))  # side
    w_c4 = cm.seismic_r(
        (w[int(res / 2):res, 0:res] + w_max) / (2 * w_max))  # back

    u = np.linspace(0, 2 * np.pi, res)
    v = np.linspace(0, np.pi, res)
    x = np.outer(np.cos(u), np.sin(v))
    y = np.outer(np.sin(u), np.sin(v))
    z = np.outer(np.ones(np.size(u)), np.cos(v))  # creates a sphere mesh

    ax.plot_surface(x,
                    y,
                    z,
                    facecolors=w_c,
                    vmin=-w_max,
                    vmax=w_max,
                    rcount=res,
                    ccount=res,
                    linewidth=0,
                    zorder=0.5,
                    antialiased=False)  # plots Wigner Bloch sphere

    ax.plot_surface(x[0:res, int(res / 2):res],
                    y[0:res, int(res / 2):res],
                    -1.5 * np.ones((res, int(res / 2))),
                    facecolors=w_c2,
                    vmin=-w_max,
                    vmax=w_max,
                    rcount=res / 2,
                    ccount=res / 2,
                    linewidth=0,
                    zorder=0.5,
                    antialiased=False)  # plots bottom reflection

    ax.plot_surface(-1.5 * np.ones((int(res / 2), res)),
                    y[int(res / 4):int(3 * res / 4), 0:res],
                    z[int(res / 4):int(3 * res / 4), 0:res],
                    facecolors=w_c3,
                    vmin=-w_max,
                    vmax=w_max,
                    rcount=res / 2,
                    ccount=res / 2,
                    linewidth=0,
                    zorder=0.5,
                    antialiased=False)  # plots side reflection

    ax.plot_surface(x[int(res / 2):res, 0:res],
                    1.5 * np.ones((int(res / 2), res)),
                    z[int(res / 2):res, 0:res],
                    facecolors=w_c4,
                    vmin=-w_max,
                    vmax=w_max,
                    rcount=res / 2,
                    ccount=res / 2,
                    linewidth=0,
                    zorder=0.5,
                    antialiased=False)  # plots back reflection

    ax.w_xaxis.set_pane_color((0.4, 0.4, 0.4, 1.0))
    ax.w_yaxis.set_pane_color((0.4, 0.4, 0.4, 1.0))
    ax.w_zaxis.set_pane_color((0.4, 0.4, 0.4, 1.0))
    ax.set_xticks([], [])
    ax.set_yticks([], [])
    ax.set_zticks([], [])
    ax.grid(False)
    ax.xaxis.pane.set_edgecolor('black')
    ax.yaxis.pane.set_edgecolor('black')
    ax.zaxis.pane.set_edgecolor('black')
    ax.set_xlim(-1.5, 1.5)
    ax.set_ylim(-1.5, 1.5)
    ax.set_zlim(-1.5, 1.5)
    m = cm.ScalarMappable(cmap=cm.seismic_r)
    m.set_array([-w_max, w_max])
    plt.colorbar(m, shrink=0.5, aspect=10)
    if filename:
        plt.savefig(filename)
    else:
        plt.show()
Esempio n. 38
0
 def Viterbi(self):
     from pyhsmm.internals.hmm_messages_interface import viterbi
     self.stateseq = viterbi(self.trans_matrix,self.aBl,self.pi_0,
             np.empty(self.aBl.shape[0],dtype='int32'))
Esempio n. 39
0
 def _sample_forwards_log(betal,trans_matrix,init_state_distn,log_likelihoods):
     from pyhsmm.internals.hmm_messages_interface import sample_forwards_log
     return sample_forwards_log(trans_matrix,log_likelihoods,
             init_state_distn,betal,np.empty(log_likelihoods.shape[0],dtype='int32'))
Esempio n. 40
0
File: gpc.py Progetto: mugenop/Beads
    def log_marginal_likelihood(self, theta=None, eval_gradient=False):
        """Returns log-marginal likelihood of theta for training data.

        Parameters
        ----------
        theta : array-like, shape = (n_kernel_params,) or None
            Kernel hyperparameters for which the log-marginal likelihood is
            evaluated. If None, the precomputed log_marginal_likelihood
            of ``self.kernel_.theta`` is returned.

        eval_gradient : bool, default: False
            If True, the gradient of the log-marginal likelihood with respect
            to the kernel hyperparameters at position theta is returned
            additionally. If True, theta must not be None.

        Returns
        -------
        log_likelihood : float
            Log-marginal likelihood of theta for training data.

        log_likelihood_gradient : array, shape = (n_kernel_params,), optional
            Gradient of the log-marginal likelihood with respect to the kernel
            hyperparameters at position theta.
            Only returned when eval_gradient is True.
        """
        if theta is None:
            if eval_gradient:
                raise ValueError(
                    "Gradient can only be evaluated for theta!=None")
            return self.log_marginal_likelihood_value_

        kernel = self.kernel_.clone_with_theta(theta)

        if eval_gradient:
            K, K_gradient = kernel(self.X_train_, eval_gradient=True)
        else:
            K = kernel(self.X_train_)

        # Compute log-marginal-likelihood Z and also store some temporaries
        # which can be reused for computing Z's gradient
        Z, (pi, W_sr, L, b, a) = \
            self._posterior_mode(K, return_temporaries=True)

        if not eval_gradient:
            return Z

        # Compute gradient based on Algorithm 5.1 of GPML
        d_Z = np.empty(theta.shape[0])
        # XXX: Get rid of the np.diag() in the next line
        R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr))  # Line 7
        C = solve(L, W_sr[:, np.newaxis] * K)  # Line 8
        # Line 9: (use einsum to compute np.diag(C.T.dot(C))))
        s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \
            * (pi * (1 - pi) * (1 - 2 * pi))  # third derivative

        for j in range(d_Z.shape[0]):
            C = K_gradient[:, :, j]   # Line 11
            # Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
            s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel())

            b = C.dot(self.y_train_ - pi)  # Line 13
            s_3 = b - K.dot(R.dot(b))  # Line 14

            d_Z[j] = s_1 + s_2.T.dot(s_3)  # Line 15

        return Z, d_Z
Esempio n. 41
0
def multi_modal_network_fp(dim_input=27, dim_output=7, batch_size=25, network_config=None):
    """
    An example a network in tf that has both state and image inputs, with the feature
    point architecture (spatial softmax + expectation).
    Args:
        dim_input: Dimensionality of input.
        dim_output: Dimensionality of the output.
        batch_size: Batch size.
        network_config: dictionary of network structure parameters
    Returns:
        A tfMap object that stores inputs, outputs, and scalar loss.
    """
    n_layers = 3
    layer_size = 20
    dim_hidden = (n_layers - 1)*[layer_size]
    dim_hidden.append(dim_output)
    pool_size = 2
    filter_size = 5

    # List of indices for state (vector) data and image (tensor) data in observation.
    x_idx, img_idx, i = [], [], 0
    for sensor in network_config['obs_include']:
        dim = network_config['sensor_dims'][sensor]
        if sensor in network_config['obs_image_data']:
            img_idx = img_idx + list(range(i, i+dim))
        else:
            x_idx = x_idx + list(range(i, i+dim))
        i += dim

    nn_input, action, precision = get_input_layer(dim_input, dim_output)

    state_input = nn_input[:, 0:x_idx[-1]+1]
    image_input = nn_input[:, x_idx[-1]+1:img_idx[-1]+1]

    # image goes through 3 convnet layers
    num_filters = network_config['num_filters']

    im_height = network_config['image_height']
    im_width = network_config['image_width']
    num_channels = network_config['image_channels']
    image_input = tf.reshape(image_input, [-1, num_channels, im_width, im_height])
    image_input = tf.transpose(image_input, perm=[0,3,2,1])

    # we pool twice, each time reducing the image size by a factor of 2.
    conv_out_size = int(im_width/(2.0*pool_size)*im_height/(2.0*pool_size)*num_filters[1])
    first_dense_size = conv_out_size + len(x_idx)

    # Store layers weight & bias
    with tf.variable_scope('conv_params'):
        weights = {
            'wc1': init_weights([filter_size, filter_size, num_channels, num_filters[0]], name='wc1'), # 5x5 conv, 1 input, 32 outputs
            'wc2': init_weights([filter_size, filter_size, num_filters[0], num_filters[1]], name='wc2'), # 5x5 conv, 32 inputs, 64 outputs
            'wc3': init_weights([filter_size, filter_size, num_filters[1], num_filters[2]], name='wc3'), # 5x5 conv, 32 inputs, 64 outputs
        }

        biases = {
            'bc1': init_bias([num_filters[0]], name='bc1'),
            'bc2': init_bias([num_filters[1]], name='bc2'),
            'bc3': init_bias([num_filters[2]], name='bc3'),
        }

    conv_layer_0 = conv2d(img=image_input, w=weights['wc1'], b=biases['bc1'], strides=[1,2,2,1])
    conv_layer_1 = conv2d(img=conv_layer_0, w=weights['wc2'], b=biases['bc2'])
    conv_layer_2 = conv2d(img=conv_layer_1, w=weights['wc3'], b=biases['bc3'])

    _, num_rows, num_cols, num_fp = conv_layer_2.get_shape()
    num_rows, num_cols, num_fp = [int(x) for x in [num_rows, num_cols, num_fp]]
    x_map = np.empty([num_rows, num_cols], np.float32)
    y_map = np.empty([num_rows, num_cols], np.float32)

    for i in range(num_rows):
        for j in range(num_cols):
            x_map[i, j] = (i - num_rows / 2.0) / num_rows
            y_map[i, j] = (j - num_cols / 2.0) / num_cols

    x_map = tf.convert_to_tensor(x_map)
    y_map = tf.convert_to_tensor(y_map)

    x_map = tf.reshape(x_map, [num_rows * num_cols])
    y_map = tf.reshape(y_map, [num_rows * num_cols])

    # rearrange features to be [batch_size, num_fp, num_rows, num_cols]
    features = tf.reshape(tf.transpose(conv_layer_2, [0,3,1,2]),
                          [-1, num_rows*num_cols])
    softmax = tf.nn.softmax(features)

    fp_x = tf.reduce_sum(tf.multiply(x_map, softmax), [1], keep_dims=True)
    fp_y = tf.reduce_sum(tf.multiply(y_map, softmax), [1], keep_dims=True)

    fp = tf.reshape(tf.concat(axis=1, values=[fp_x, fp_y]), [-1, num_fp*2])

    fc_input = tf.concat(axis=1, values=[fp, state_input])

    fc_output, weights_FC, biases_FC = get_mlp_layers(fc_input, n_layers, dim_hidden)
    fc_vars = weights_FC + biases_FC

    loss = euclidean_loss_layer(a=action, b=fc_output, precision=precision, batch_size=batch_size)
    nnet = TfMap.init_from_lists([nn_input, action, precision], [fc_output], [loss], fp=fp)
    last_conv_vars = fc_input

    return nnet, fc_vars, last_conv_vars
Esempio n. 42
0
 def _sample_backwards_normalized(alphan,trans_matrix_transpose):
     from pyhsmm.internals.hmm_messages_interface import sample_backwards_normalized
     return sample_backwards_normalized(trans_matrix_transpose,alphan,
             np.empty(alphan.shape[0],dtype='int32'))
Esempio n. 43
0
 def test_fieldless_structured(self):
     # gh-10366
     no_fields = np.dtype([])
     arr_no_fields = np.empty(4, dtype=no_fields)
     assert_equal(repr(arr_no_fields), "array([(), (), (), ()], dtype=[])")
Esempio n. 44
0
def main():

    weight_file = "../pre-trained/wiki/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5"

    # for face detection
    # detector = dlib.get_frontal_face_detector()
    detector = MTCNN()
    try:
        os.mkdir('./img')
    except OSError:
        pass
    # load model and weights
    img_size = 64
    stage_num = [3, 3, 3]
    lambda_local = 1
    lambda_d = 1
    model = SSR_net(img_size, stage_num, lambda_local, lambda_d)()
    model.load_weights(weight_file)

    clip = VideoFileClip(sys.argv[1])  # can be gif or movie

    #python version
    pyFlag = ''
    if len(sys.argv) < 3:
        pyFlag = '2'  #default to use moviepy to show, this can work on python2.7 and python3.5
    elif len(sys.argv) == 3:
        pyFlag = sys.argv[2]  #python version
    else:
        print('Wrong input!')
        sys.exit()

    img_idx = 0
    detected = ''  #make this not local variable
    time_detection = 0
    time_network = 0
    time_plot = 0
    ad = 0.4
    skip_frame = 5  # every 5 frame do 1 detection and network forward propagation
    for img in clip.iter_frames():
        img_idx = img_idx + 1

        input_img = img  #using python2.7 with moivepy to show th image without channel flip

        if pyFlag == '3':
            input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        img_h, img_w, _ = np.shape(input_img)
        input_img = cv2.resize(input_img, (1024, int(1024 * img_h / img_w)))
        img_h, img_w, _ = np.shape(input_img)

        if img_idx == 1 or img_idx % skip_frame == 0:

            # detect faces using dlib detector
            detected = detector.detect_faces(input_img)
            faces = np.empty((len(detected), img_size, img_size, 3))

            start_time = timeit.default_timer()
            for i, d in enumerate(detected):
                print(i)
                print(d['confidence'])
                if d['confidence'] > 0.95:
                    x1, y1, w, h = d['box']
                    x2 = x1 + w
                    y2 = y1 + h
                    xw1 = max(int(x1 - ad * w), 0)
                    yw1 = max(int(y1 - ad * h), 0)
                    xw2 = min(int(x2 + ad * w), img_w - 1)
                    yw2 = min(int(y2 + ad * h), img_h - 1)
                    cv2.rectangle(input_img, (x1, y1), (x2, y2), (255, 0, 0),
                                  2)
                    # cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2)
                    faces[i, :, :, :] = cv2.resize(
                        input_img[yw1:yw2 + 1, xw1:xw2 + 1, :],
                        (img_size, img_size))
            elapsed_time = timeit.default_timer() - start_time
            time_detection = time_detection + elapsed_time

            start_time = timeit.default_timer()
            if len(detected) > 0:
                # predict ages and genders of the detected faces
                results = model.predict(faces)
                predicted_ages = results

            # draw results
            for i, d in enumerate(detected):
                if d['confidence'] > 0.95:
                    x1, y1, w, h = d['box']
                    label = "{}".format(int(predicted_ages[i]))
                    draw_label(input_img, (x1, y1), label)
            elapsed_time = timeit.default_timer() - start_time
            time_network = time_network + elapsed_time

            start_time = timeit.default_timer()

            if pyFlag == '2':
                img_clip = ImageClip(input_img)
                img_clip.show()
                cv2.imwrite('img/' + str(img_idx) + '.png',
                            cv2.cvtColor(input_img, cv2.COLOR_RGB2BGR))
            elif pyFlag == '3':
                cv2.imshow("result", input_img)
                cv2.imwrite('img/' + str(img_idx) + '.png',
                            cv2.cvtColor(input_img, cv2.COLOR_RGB2BGR))

            elapsed_time = timeit.default_timer() - start_time
            time_plot = time_plot + elapsed_time

        else:
            for i, d in enumerate(detected):
                if d['confidence'] > 0.95:
                    x1, y1, w, h = d['box']
                    x2 = x1 + w
                    y2 = y1 + h
                    xw1 = max(int(x1 - ad * w), 0)
                    yw1 = max(int(y1 - ad * h), 0)
                    xw2 = min(int(x2 + ad * w), img_w - 1)
                    yw2 = min(int(y2 + ad * h), img_h - 1)
                    cv2.rectangle(input_img, (x1, y1), (x2, y2), (255, 0, 0),
                                  2)
                    # cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2)
                    faces[i, :, :, :] = cv2.resize(
                        input_img[yw1:yw2 + 1, xw1:xw2 + 1, :],
                        (img_size, img_size))

            # draw results
            for i, d in enumerate(detected):
                if d['confidence'] > 0.95:
                    x1, y1, w, h = d['box']
                    label = "{}".format(int(predicted_ages[i]))
                    draw_label(input_img, (x1, y1), label)

            start_time = timeit.default_timer()
            if pyFlag == '2':
                img_clip = ImageClip(input_img)
                img_clip.show()
            elif pyFlag == '3':
                cv2.imshow("result", input_img)
            elapsed_time = timeit.default_timer() - start_time
            time_plot = time_plot + elapsed_time

        #Show the time cost (fps)
        print('avefps_time_detection:', img_idx / time_detection)
        print('avefps_time_network:', img_idx / time_network)
        print('avefps_time_plot:', img_idx / time_plot)
        print('===============================')
        if pyFlag == '3':
            key = cv2.waitKey(30)
            if key == 27:
                break
Esempio n. 45
0
def initDict(ConfigOptions,GeoMetaWrfHydro):
    """
    Initial function to create an input forcing dictionary, which
    will contain an abstract class for each input forcing product.
    This gets called one time by the parent calling program.
    :param ConfigOptions:
    :return: InputDict - A dictionary defining our inputs.
    """
    # Initialize an empty dictionary
    InputDict = {}

    # Loop through and initialize the empty class for each product.
    custom_count = 0
    for force_tmp in range(0,ConfigOptions.number_inputs):
        force_key = ConfigOptions.input_forcings[force_tmp]
        InputDict[force_key] = input_forcings()
        InputDict[force_key].keyValue = force_key
        InputDict[force_key].regridOpt = ConfigOptions.regrid_opt[force_tmp]
        InputDict[force_key].timeInterpOpt = ConfigOptions.forceTemoralInterp[force_tmp]

        InputDict[force_key].q2dDownscaleOpt = ConfigOptions.q2dDownscaleOpt[force_tmp]
        InputDict[force_key].t2dDownscaleOpt = ConfigOptions.t2dDownscaleOpt[force_tmp]
        InputDict[force_key].precipDownscaleOpt = ConfigOptions.precipDownscaleOpt[force_tmp]
        InputDict[force_key].swDowscaleOpt = ConfigOptions.swDownscaleOpt[force_tmp]
        InputDict[force_key].psfcDownscaleOpt = ConfigOptions.psfcDownscaleOpt[force_tmp]
        # Check to make sure the necessary input files for downscaling are present.
        #if InputDict[force_key].t2dDownscaleOpt == 2:
        #    # We are using a pre-calculated lapse rate on the WRF-Hydro grid.
        #    pathCheck = ConfigOptions.downscaleParamDir = "/T2M_Lapse_Rate_" + \
        #        InputDict[force_key].productName + ".nc"
        #    if not os.path.isfile(pathCheck):
        #        ConfigOptions.errMsg = "Expected temperature lapse rate grid: " + \
        #            pathCheck + " not found."
        #        raise Exception

        InputDict[force_key].t2dBiasCorrectOpt = ConfigOptions.t2BiasCorrectOpt[force_tmp]
        InputDict[force_key].q2dBiasCorrectOpt = ConfigOptions.q2BiasCorrectOpt[force_tmp]
        InputDict[force_key].precipBiasCorrectOpt = ConfigOptions.precipBiasCorrectOpt[force_tmp]
        InputDict[force_key].swBiasCorrectOpt = ConfigOptions.swBiasCorrectOpt[force_tmp]
        InputDict[force_key].lwBiasCorrectOpt = ConfigOptions.lwBiasCorrectOpt[force_tmp]
        InputDict[force_key].windBiasCorrectOpt = ConfigOptions.windBiasCorrect[force_tmp]
        InputDict[force_key].psfcBiasCorrectOpt = ConfigOptions.psfcBiasCorrectOpt[force_tmp]

        InputDict[force_key].inDir = ConfigOptions.input_force_dirs[force_tmp]
        InputDict[force_key].paramDir = ConfigOptions.dScaleParamDirs[force_tmp]
        InputDict[force_key].define_product()
        InputDict[force_key].userFcstHorizon = ConfigOptions.fcst_input_horizons[force_tmp]
        InputDict[force_key].userCycleOffset = ConfigOptions.fcst_input_offsets[force_tmp]

        # If we have specified specific humidity downscaling, establish arrays to hold
        # temporary temperature arrays that are un-downscaled.
        if InputDict[force_key].q2dDownscaleOpt > 0:
            InputDict[force_key].t2dTmp = np.empty([GeoMetaWrfHydro.ny_local,
                                                    GeoMetaWrfHydro.nx_local],
                                                   np.float32)
            InputDict[force_key].psfcTmp = np.empty([GeoMetaWrfHydro.ny_local,
                                                    GeoMetaWrfHydro.nx_local],
                                                   np.float32)

        # Initialize the local final grid of values. This is represntative
        # of the local grid for this forcing, for a specific output timesetp.
        # This grid will be updated from one output timestep to another, and
        # also through downscaling and bias correction.
        InputDict[force_key].final_forcings = np.empty([8,GeoMetaWrfHydro.ny_local,
                                                        GeoMetaWrfHydro.nx_local],
                                                       np.float64)
        InputDict[force_key].height = np.empty([GeoMetaWrfHydro.ny_local,
                                                GeoMetaWrfHydro.nx_local],np.float32)
        InputDict[force_key].regridded_mask = np.empty([GeoMetaWrfHydro.ny_local,
                                                GeoMetaWrfHydro.nx_local],np.float32)

        # Obtain custom input cycle frequencies
        if force_key == 10 or force_key == 11 or force_key == 12:
            InputDict[force_key].cycleFreq = ConfigOptions.customFcstFreq[custom_count]
            custom_count = custom_count + 1

    return InputDict
Esempio n. 46
0
pd=0.05

nu=0.6666
rhocore=300.
rhoshell=400.

rhoav=rhoshell*(1-nu**3) + rhocore*nu**3        
print 'average density=',rhoav

parameters=[R,pd*R/2.355,nu,rhocore,rhoshell]

q=np.logspace(-2,0,1000)

d=ST.Gaussian_Distribution(1, 2) #R,sigma
f=ST.FF_Core_Shell_Solvent(3,0,4,5) #nu, rho_solv, rho_core, rho_shell
s=ST.SAXScurve(f, d)	

for rho_solvent in np.linspace(280,420,11.):
	Rho_solvent=(rho_solvent, )
        I=np.array(s.compute(q,Rho_solvent+tuple(parameters)))	
        
        print rho_solvent - rhoav	
        
        result=np.empty(shape=(len(I),2))
        result[:,0]=q
        result[:,1]=I        
        np.savetxt('coreshell_solv_%.1f.dat'%(rho_solvent - rhoav),result)
        


Esempio n. 47
0
 def __init__(self, signal=None, num_signals=None, sample_rate=None):
     ############### buffer ########################
     self.buffer = buffer(signal, sample_rate, num_signals)
     self.allData = np.empty((0, self.buffer.channels))
     ###### mutex lock
     self.mutexBuffer = Lock()
Esempio n. 48
0
    return fourier


folder0 = "/home/pi/New/number0/"
paths = glob.glob(os.path.join(folder0, '*.jpg'))
raw_data0 = []
for path in paths:

    img = cv2.imread(path)

    fd = image_process(img)
    raw_data0.append(fd)

raw_data0 = np.array(raw_data0)
np.save("/home/pi/New/train/data0", raw_data0)
y0 = np.empty(len(raw_data0))
y0[:] = 0
np.save("/home/pi/New/train/y0", y0)

folder1 = "/home/pi/New/number1/"
paths = glob.glob(os.path.join(folder1, '*.jpg'))
raw_data1 = []
for path in paths:
    img = cv2.imread(path)
    fd = image_process(img)
    raw_data1.append(fd)
raw_data1 = np.array(raw_data1)
np.save("/home/pi/New/train/data1", raw_data1)
y1 = np.empty(len(raw_data1))
y1[:] = 1
np.save("/home/pi/New/train/y1", y1)
Esempio n. 49
0
    def __init__(self,
                 pos,
                 bins=100,
                 pos0Range=None,
                 pos1Range=None,
                 mask=None,
                 mask_checksum=None,
                 allow_pos0_neg=False,
                 unit="undefined",
                 workgroup_size=256,
                 devicetype="all",
                 platformid=None,
                 deviceid=None,
                 profile=False):


        self.bins = bins
        self.lut_size = 0
        self.allow_pos0_neg = allow_pos0_neg

        if len(pos.shape) == 3:
            assert pos.shape[1] == 4
            assert pos.shape[2] == 2
        elif len(pos.shape) == 4:
            assert pos.shape[2] == 4
            assert pos.shape[3] == 2
        else:
            raise ValueError("Pos array dimentions are wrong")
        self.pos_size = pos.size
        self.size = self.pos_size/8
        self.pos = numpy.ascontiguousarray(pos.ravel(), dtype=numpy.float32)
        self.pos0Range = numpy.empty(2,dtype=numpy.float32)
        self.pos1Range = numpy.empty(2,dtype=numpy.float32)

        if (pos0Range is not None) and (len(pos0Range) is 2):
            self.pos0Range[0] = min(pos0Range) # do it on GPU?
            self.pos0Range[1] = max(pos0Range)
            if (not self.allow_pos0_neg) and (self.pos0Range[0] < 0):
                self.pos0Range[0] = 0.0
                if self.pos0Range[1] < 0:
                    print("Warning: Invalid 0-dim range! Using the data derived range instead")
                    self.pos0Range[1] = 0.0
            #self.pos0Range[0] = pos0Range[0]
            #self.pos0Range[1] = pos0Range[1]
        else:
            self.pos0Range[0] = 0.0
            self.pos0Range[1] = 0.0
        if (pos1Range is not None) and (len(pos1Range) is 2):
            self.pos1Range[0] = min(pos1Range) # do it on GPU?
            self.pos1Range[1] = max(pos1Range)
            #self.pos1Range[0] = pos1Range[0]
            #self.pos1Range[1] = pos1Range[1]
        else:
            self.pos1Range[0] = 0.0
            self.pos1Range[1] = 0.0

        if  mask is not None:
            assert mask.size == self.size
            self.check_mask = True
            self.cmask = numpy.ascontiguousarray(mask.ravel(), dtype=numpy.int8)
            if mask_checksum:
                self.mask_checksum = mask_checksum
            else:
                self.mask_checksum = crc32(mask)
        else:
            self.check_mask = False
            self.mask_checksum = None

        self._sem = threading.Semaphore()
        self.profile = profile
        self._cl_kernel_args = {}
        self._cl_mem = {}
        self.events = []
        self.workgroup_size = workgroup_size
        if self.size < self.workgroup_size:
            raise RuntimeError("Fatal error in workgroup size selection. Size (%d) must be >= workgroup size (%d)\n", self.size, self.workgroup_size)
        if (platformid is None) and (deviceid is None):
            platformid, deviceid = ocl.select_device(devicetype)
        elif platformid is None:
            platformid = 0
        elif deviceid is None:
            deviceid = 0
        self.platform = ocl.platforms[platformid]
        self.device = self.platform.devices[deviceid]
        self.device_type = self.device.type

        if (self.device_type == "CPU") and (self.platform.vendor == "Apple"):
            logger.warning("This is a workaround for Apple's OpenCL on CPU: enforce BLOCK_SIZE=1")
            self.workgroup_size = 1
        try:
            self._ctx = pyopencl.Context(devices=[pyopencl.get_platforms()[platformid].get_devices()[deviceid]])
            if self.profile:
                self._queue = pyopencl.CommandQueue(self._ctx, properties=pyopencl.command_queue_properties.PROFILING_ENABLE)
            else:
                self._queue = pyopencl.CommandQueue(self._ctx)
            self._compile_kernels()
            self._calc_boundaries()
            self._calc_LUT()
        except pyopencl.MemoryError as error:
            raise MemoryError(error)
Esempio n. 50
0
 def reset_data_store(self):
     self.allData = np.empty((0, self.buffer.channels))
     print('reset alldata E4 signal' + self.buffer.SIGNAL)
Esempio n. 51
0
File: fgcr.py Progetto: krox/gpt
        def inv(psi, src):
            self.history = []
            # verbosity
            verbose = g.default.is_verbose("fgcr")

            # timing
            t = g.timer("fgcr")
            t("setup")

            # parameters
            rlen = self.restartlen

            # tensors
            dtype_r, dtype_c = g.double.real_dtype, g.double.complex_dtype
            alpha = np.empty((rlen), dtype_c)
            beta = np.empty((rlen, rlen), dtype_c)
            gamma = np.empty((rlen), dtype_r)
            chi = np.empty((rlen), dtype_c)

            # fields
            r, mmpsi = g.copy(src), g.copy(src)
            p = [g.lattice(src) for i in range(rlen)]
            z = [g.lattice(src) for i in range(rlen)]

            # initial residual
            r2 = self.restart(mat, psi, mmpsi, src, r, p)

            # source
            ssq = g.norm2(src)
            if ssq == 0.0:
                assert r2 != 0.0  # need either source or psi to not be zero
                ssq = r2

            # target residual
            rsq = self.eps ** 2.0 * ssq

            for k in range(self.maxiter):
                # iteration within current krylov space
                i = k % rlen

                # iteration criteria
                reached_maxiter = k + 1 == self.maxiter
                need_restart = i + 1 == rlen

                t("prec")
                if prec is not None:
                    prec(p[i], r)
                else:
                    p[i] @= r

                t("mat")
                mat(z[i], p[i])

                t("ortho")
                g.default.push_verbose("orthogonalize", False)
                g.orthogonalize(z[i], z[0:i], beta[:, i])
                g.default.pop_verbose()

                t("linalg")
                ip, z2 = g.inner_product_norm2(z[i], r)
                gamma[i] = z2 ** 0.5
                if gamma[i] == 0.0:
                    g.message("fgcr: breakdown, gamma[%d] = 0" % (i))
                    break
                z[i] /= gamma[i]
                alpha[i] = ip / gamma[i]
                r2 = g.axpy_norm2(r, -alpha[i], z[i], r)

                t("other")
                self.history.append(r2)

                if verbose:
                    g.message(
                        "fgcr: res^2[ %d, %d ] = %g, target = %g" % (k, i, r2, rsq)
                    )

                if r2 <= rsq or need_restart or reached_maxiter:
                    t("update_psi")
                    self.update_psi(psi, alpha, beta, gamma, chi, p, i)
                    comp_res = r2 / ssq

                    if r2 <= rsq:
                        if verbose:
                            t()
                            g.message(
                                "fgcr: converged in %d iterations, took %g s"
                                % (k + 1, t.dt["total"])
                            )
                            g.message(t)
                            if self.checkres:
                                res = self.calc_res(mat, psi, mmpsi, src, r) / ssq
                                g.message(
                                    "fgcr: computed res = %g, true res = %g, target = %g"
                                    % (comp_res ** 0.5, res ** 0.5, self.eps)
                                )
                            else:
                                g.message(
                                    "fgcr: computed res = %g, target = %g"
                                    % (comp_res ** 0.5, self.eps)
                                )
                        break

                    if reached_maxiter:
                        if verbose:
                            t()
                            g.message(
                                "fgcr: did NOT converge in %d iterations, took %g s"
                                % (k + 1, t.dt["total"])
                            )
                            g.message(t)
                            if self.checkres:
                                res = self.calc_res(mat, psi, mmpsi, src, r) / ssq
                                g.message(
                                    "fgcr: computed res = %g, true res = %g, target = %g"
                                    % (comp_res ** 0.5, res ** 0.5, self.eps)
                                )
                            else:
                                g.message(
                                    "fgcr: computed res = %g, target = %g"
                                    % (comp_res ** 0.5, self.eps)
                                )
                        break

                    if need_restart:
                        t("restart")
                        r2 = self.restart(mat, psi, mmpsi, src, r, p)
                        if verbose:
                            g.message("fgcr: performed restart")
Esempio n. 52
0
def main(args):

    t = time.perf_counter()

    # Constants:
    resolution = 3500.  # lambda/dlambda
    fiber_diameter = 0.8  # Arcsec
    rn = 2.  # Detector readnoise (e-)
    dark = 0.0  # Detector dark-current (e-/s)

    # Temporary numbers that assume a given spectrograph PSF and LSF.
    # Assume 3 pixels per spectral and spatial FWHM.
    spatial_fwhm = 3.0
    spectral_fwhm = 3.0

    mags = numpy.arange(args.mag[0], args.mag[1] + args.mag[2], args.mag[2])
    times = numpy.arange(args.time[0], args.time[1] + args.time[2],
                         args.time[2])

    # Get source spectrum in 1e-17 erg/s/cm^2/angstrom. Currently, the
    # source spectrum is assumed to be
    #   - normalized by the total integral of the source flux
    #   - independent of position within the source
    wave = get_wavelength_vector(args.wavelengths[0], args.wavelengths[1],
                                 args.wavelengths[2])
    spec = get_spectrum(wave, mags[0], resolution=resolution)

    # Get the source distribution.  If the source is uniform, onsky is None.
    onsky = None

    # Get the sky spectrum
    sky_spectrum = spectrum.MaunakeaSkySpectrum()

    # Overplot the source and sky spectrum
    #    ax = spec.plot()
    #    ax = sky_spectrum.plot(ax=ax, show=True)

    # Get the atmospheric throughput
    atmospheric_throughput = efficiency.AtmosphericThroughput(
        airmass=args.airmass)

    # Set the telescope. Defines the aperture area and throughput
    # (nominally 3 aluminum reflections for Keck)
    telescope = telescopes.KeckTelescope()

    # Define the observing aperture; fiber diameter is in arcseconds,
    # center is 0,0 to put the fiber on the target center. "resolution"
    # sets the resolution of the fiber rendering; it has nothing to do
    # with spatial or spectral resolution of the instrument
    fiber = aperture.FiberAperture(0, 0, fiber_diameter, resolution=100)

    # Get the spectrograph throughput (circa June 2018; TODO: needs to
    # be updated). Includes fibers + foreoptics + FRD + spectrograph +
    # detector QE (not sure about ADC). Because this is the total
    # throughput, define a generic efficiency object.
    thru_db = numpy.genfromtxt(
        os.path.join(os.environ['SYNOSPEC_DIR'], 'data/efficiency',
                     'fobos_throughput.db'))
    spectrograph_throughput = efficiency.Efficiency(thru_db[:, 1],
                                                    wave=thru_db[:, 0])

    # System efficiency combines the spectrograph and the telescope
    system_throughput = efficiency.SystemThroughput(
        wave=spec.wave,
        spectrograph=spectrograph_throughput,
        telescope=telescope.throughput)

    # Instantiate the detector; really just a container for the rn and
    # dark current for now. QE is included in fobos_throughput.db file,
    # so I set it to 1 here.
    det = detector.Detector(rn=rn, dark=dark, qe=1.0)

    # Extraction: makes simple assumptions about the detector PSF for
    # each fiber spectrum and mimics a "perfect" extraction, including
    # an assumption of no cross-talk between fibers. Ignore the
    # "spectral extraction".
    extraction = extract.Extraction(det,
                                    spatial_fwhm=spatial_fwhm,
                                    spatial_width=1.5 * spatial_fwhm,
                                    spectral_fwhm=spectral_fwhm,
                                    spectral_width=spectral_fwhm)

    snr_label = 'S/N per {0}'.format('R element' if args.snr_units ==
                                     'resolution' else args.snr_units)

    # SNR
    g = efficiency.FilterResponse()
    r = efficiency.FilterResponse(band='r')
    snr_g = numpy.empty((mags.size, times.size), dtype=float)
    snr_r = numpy.empty((mags.size, times.size), dtype=float)
    for i in range(mags.size):
        spec.rescale_magnitude(mags[i], band=g)
        for j in range(times.size):
            print('{0}/{1} ; {2}/{3}'.format(i + 1, mags.size, j + 1,
                                             times.size),
                  end='\r')
            # Perform the observation
            obs = Observation(telescope,
                              sky_spectrum,
                              fiber,
                              times[j],
                              det,
                              system_throughput=system_throughput,
                              atmospheric_throughput=atmospheric_throughput,
                              airmass=args.airmass,
                              onsky_source_distribution=onsky,
                              source_spectrum=spec,
                              extraction=extraction,
                              snr_units=args.snr_units)

            # Construct the S/N spectrum
            snr_spec = obs.snr(sky_sub=True)
            snr_g[i,
                  j] = numpy.sum(g(snr_spec.wave) * snr_spec.flux) / numpy.sum(
                      g(snr_spec.wave))
            snr_r[i,
                  j] = numpy.sum(r(snr_spec.wave) * snr_spec.flux) / numpy.sum(
                      r(snr_spec.wave))
    print('{0}/{1} ; {2}/{3}'.format(i + 1, mags.size, j + 1, times.size))

    extent = [
        args.time[0] - args.time[2] / 2, args.time[1] + args.time[2] / 2,
        args.mag[0] - args.mag[2] / 2, args.mag[1] + args.mag[2] / 2
    ]

    w, h = pyplot.figaspect(1)
    fig = pyplot.figure(figsize=(1.5 * w, 1.5 * h))
    ax = fig.add_axes([0.15, 0.2, 0.7, 0.7])
    img = ax.imshow(snr_g,
                    origin='lower',
                    interpolation='nearest',
                    extent=extent,
                    aspect='auto',
                    norm=colors.LogNorm(vmin=snr_g.min(), vmax=snr_g.max()))
    cax = fig.add_axes([0.86, 0.2, 0.02, 0.7])
    pyplot.colorbar(img, cax=cax)
    cax.text(4,
             0.5,
             snr_label,
             ha='center',
             va='center',
             transform=cax.transAxes,
             rotation='vertical')
    ax.text(0.5,
            -0.08,
            'Exposure Time [s]',
            ha='center',
            va='center',
            transform=ax.transAxes)
    ax.text(-0.12,
            0.5,
            r'Surface Brightness [AB mag/arcsec$^2$]',
            ha='center',
            va='center',
            transform=ax.transAxes,
            rotation='vertical')
    ax.text(0.5,
            1.03,
            r'$g$-band S/N',
            ha='center',
            va='center',
            transform=ax.transAxes,
            fontsize=12)
    pyplot.show()
Esempio n. 53
0
        val_err += err
        val_acc += acc
        val_batches += 1

    # Then we print the results for this epoch:
    print("{:.4f}\t\t{:.4f}\t\t{:.4f}\t\t{}/{}\t\t{:.3f}".format(
        train_err / train_batches, val_err / val_batches,
        val_acc / val_batches, epoch + 1, num_epochs,
        time.time() - start_time))
    sys.stdout.flush()

# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
P = np.empty(shape=(0))
T = np.empty(shape=(0))
for batch in iterate_minibatches(X_test, y_test, batchsize, shuffle=False):
    inputs, targets = batch
    err, acc, pred = val_fn(inputs, targets)
    P = np.concatenate((P, np.array(pred)))
    T = np.concatenate((T, np.array(targets)))
    test_err += err
    test_acc += acc
    test_batches += 1
print("Final results:")
print("  test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print("  test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100))
sys.stdout.flush()

print("Confusion matrix:")
def _train(path_to_train_lmdb_dir, path_to_val_lmdb_dir, path_to_log_dir,
           path_to_restore_checkpoint_file, training_options, max_steps):
    batch_size = training_options['batch_size']
    initial_learning_rate = training_options['learning_rate']
    initial_patience = training_options['patience']
    num_steps_to_show_loss = 100
    num_steps_to_check = training_options["validation_interval"]

    step = 0
    patience = initial_patience
    best_accuracy = 0.0
    duration = 0.0

    model = Model()
    model.cuda()

    transform = transforms.Compose([
        transforms.RandomCrop([54, 54]),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    train_loader = torch.utils.data.DataLoader(Dataset(path_to_train_lmdb_dir, transform),
                                               batch_size=batch_size, shuffle=True,
                                               num_workers=0, pin_memory=True)
    evaluator = Evaluator(path_to_val_lmdb_dir)
    optimizer = optim.SGD(model.parameters(), lr=initial_learning_rate, momentum=0.9, weight_decay=0.0005)
    scheduler = StepLR(optimizer, step_size=training_options['decay_steps'], gamma=training_options['decay_rate'])

    if path_to_restore_checkpoint_file is not None:
        assert os.path.isfile(path_to_restore_checkpoint_file), '%s not found' % path_to_restore_checkpoint_file
        step = model.restore(path_to_restore_checkpoint_file)
        scheduler.last_epoch = step
        print('Model restored from file: %s' % path_to_restore_checkpoint_file)

    path_to_losses_npy_file = os.path.join(path_to_log_dir, 'losses.npy')
    if os.path.isfile(path_to_losses_npy_file):
        losses = np.load(path_to_losses_npy_file)
    else:
        losses = np.empty([0], dtype=np.float32)

    path_to_test_losses_npy_file = os.path.join(path_to_log_dir, 'test_losses.npy')
    if os.path.isfile(path_to_test_losses_npy_file):
        test_losses = np.load(path_to_test_losses_npy_file)
    else:
        test_losses = np.empty([0], dtype=np.float32)

    train_loss_array = []
    val_loss_array = []
    model_checkpoints = []
    model_saved = False

    # Used to save model (checkpoint) every 2 epochs
    model_save_counter = 0

    while True:
        for batch_idx, (images, length_labels, digits_labels, _) in enumerate(train_loader):
            start_time = time.time()
            images, length_labels, digits_labels = images.cuda(), length_labels.cuda(), [digit_labels.cuda() for digit_labels in digits_labels]
            length_logits, digit1_logits, digit2_logits, digit3_logits, digit4_logits, digit5_logits = model.train()(images)
            loss = _loss(length_logits, digit1_logits, digit2_logits, digit3_logits, digit4_logits, digit5_logits, length_labels, digits_labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step()
            step += 1
            duration += time.time() - start_time

            if step % num_steps_to_show_loss == 0:
                examples_per_sec = batch_size * num_steps_to_show_loss / duration
                duration = 0.0
                print('=> %s: step %d, loss = %f, learning_rate = %f (%.1f examples/sec)' % (
                    datetime.now(), step, loss.item(), scheduler.get_lr()[0], examples_per_sec))

            if step % num_steps_to_check != 0:
                continue

            model_save_counter += 1
            losses = np.append(losses, loss.item())
            np.save(path_to_losses_npy_file, losses)
            train_loss_array.append((step, loss.item()))

            print('=> Evaluating on validation dataset...')
            accuracy, test_loss_args = evaluator.evaluate(model)
            test_loss = _loss(*test_loss_args)
            val_loss_array.append((step, test_loss.item()))

            print('==> accuracy = %f, best accuracy %f' % (accuracy, best_accuracy))
            # print(f'==> loss = {test_loss}')

            # Save model every 2 epochs
            if model_save_counter >= 2 or step in  [1000, 2000, 3000, 4000, 5000]:
                path_to_checkpoint_file = model.store(path_to_log_dir, step=step)
                print('=> Model saved to file: %s' % path_to_checkpoint_file)
                model_save_counter = 0
                model_saved = True
                model_checkpoints.append((step, f"model-{step}.pth"))

            if accuracy > best_accuracy:
                patience = initial_patience
                best_accuracy = accuracy
            else:
                patience -= 1

            print("Train losses: ", train_loss_array)
            print("Saved Model Checkpoints: ", model_checkpoints)

            print('=> patience = %d' % patience)
            if patience == 0 or step >= max_steps:
                if not model_saved:
                    path_to_checkpoint_file = model.store(path_to_log_dir, step=step)
                    print('=> Model MANUALLY saved to file: %s' % path_to_checkpoint_file)
                    model_checkpoints.append((step, f"model-{step}.pth"))

                training_output = {
                    "model_checkpoints": model_checkpoints,
                    "train_loss": train_loss_array,
                    "val_loss": val_loss_array,
                }
                print("TRAINING OUTPUT -----------------------------")
                print(training_output)
                return training_output
Esempio n. 55
0
 def __init__(self, ntiling, memory_size=1e6):
     self.ntiling = ntiling
     self.memory_size = int(1e6)
     self.tiles = np.empty((ntiling, ), dtype=np.int32)
Esempio n. 56
0
import matplotlib.pyplot as plt

#####################
# process data

# process train data
raw_data = np.genfromtxt('data/train.csv', delimiter=',')
data = raw_data[1:, 3:]
data[np.isnan(data)] = 0  # process nan

# Dictionary: key:month value:month data
month_data = {}

# make data timeline continuous
for month in range(12):
    temp = np.empty(shape=(18, 20 * 24))
    for day in range(20):
        temp[:, day * 24:(day + 1) *
             24] = data[(month * 20 + day) * 18:(month * 20 + day + 1) * 18, :]
    month_data[month] = temp

# x_data v1: only consider PM2.5
x_data = np.empty(shape=(12 * 471, 9))
y_data = np.empty(shape=(12 * 471, 1))
for month in range(12):
    for i in range(471):
        x_data[month * 471 + i][:] = month_data[month][9][i:i + 9]
        y_data[month * 471 + i] = month_data[month][9][i + 9]

# process test data
test_raw_data = np.genfromtxt('data/test.csv', delimiter=',')
Esempio n. 57
0
def sem_generator(
    graph: nx.DiGraph,
    schema: Optional[Dict] = None,
    default_type: str = "continuous",
    noise_std: float = 1.0,
    n_samples: int = 1000,
    distributions: Dict[str, str] = None,
    intercept: bool = True,
    seed: int = None,
) -> pd.DataFrame:
    """
    Generator for tabular data with mixed variable types from a DAG.

    Supported variable types: `'binary', 'categorical', 'continuous'`. The number
    of categories can be determined using a colon, e.g. `'categorical:5'`
    specifies a categorical feature with 5 categories.

    Notation: For binary and continuous variables, a ``variable'' refers to a
    ``node'', a ``feature'' refers to the one-hot column for categorical
    variables and is equivalent to a binary or continuous variable.

    Args:
        graph: A DAG in form of a networkx or StructureModel.
        schema: Dictionary with schema for a node/variable, if a node is missing
            uses ``default_type``. Format, {node_name: variable type}.
        default_type: The default data type for a node/variable not listed
            in the schema, or when the schema is empty.
        noise_std: The standard deviation of the noise. The binary and
            categorical features are created using a latent variable approach.
            The noise standard deviation determines how much weight the "mean"
            estimate has on the feature value.
        n_samples: The number of rows/observations to sample.
        distributions:
            ``continuous'': The type of distribution to use for the noise
                of a continuous variable. Options: 'gaussian'/'normal' (alias)
                (default), 'student-t', 'exponential', 'gumbel'.
            ``binary'': The type of distribution to use for the noise
                of the latent binary variable. Options: 'probit'/'normal' (alias),
                'logit' (default).
            ``categorical'': The type of distribution to use for the noise
                of a latent continuous feature. Options: 'probit'/'normal' (alias),
                'logit'/'gumbel' (alias) (default).
            ``weight'': The type of distribution to use for the linear coefficients.
                Options: 'gaussian'/'normal' (alias), 'uniform' (default).
            ``intercept'': The type of distribution to use for the intercept. For
                binary/categorical: this is the mean in the latent space.
                Options: 'gaussian'/'normal' (alias), 'uniform' (default).
        intercept: Whether to use an intercept for each feature. The intercept
            is sampled once and held constant for all rows. For binary or
            categorical the intercept determines the class imbalance.
        seed: Random State

    Returns:
        DataFrame with generated features, uses a one-hot coding for
        categorical features.

    Raises:
        ValueError: if the graph is not a DAG.
        ValueError: if schema variable type is not in `'binary', 'categorical',
            'continuous', 'continuous:X` (for variables with X categories).
        ValueError: if distributions['continuous'] is not 'gaussian', 'normal', 'student-t',
            'exponential', 'gumbel'.
        ValueError: if distributions['binary'] is not 'probit', 'normal', 'logit'.
        ValueError: if distributions['categorical'] is not 'probit', 'normal', 'logit', 'gumbel'.
        ValueError: if distributions['weight'] is not 'normal' / 'gaussian' (alias), 'uniform'.
        ValueError: if distributions['intercept'] is not 'normal' / 'gaussian' (alias), 'uniform'.


    Example:
        sm = StructureModel()

        sm.add_edges_from([('A', 'C'), ('D', 'C'), ('E', 'D')])

        sm.add_nodes_from(['B', 'F'])

        schema = {'B': 'binary', 'C': 'categorical:5',
                  'E': 'binary', 'F': 'continuous'}

        df = sem_generator(sm, schema, noise_scale=1,
                          n_samples=10000,
                          intercept=True,
                          )
    """

    np.random.seed(seed)

    if not nx.algorithms.is_directed_acyclic_graph(graph):
        raise ValueError("Provided graph is not a DAG.")

    distributions = _set_default_distributions(distributions=distributions)
    validated_schema = validate_schema(
        nodes=graph.nodes(), schema=schema, default_type=default_type
    )
    var_fte_mapper = VariableFeatureMapper(validated_schema)

    n_columns = var_fte_mapper.n_features

    # get dependence based on edges in graph (not via adjacency matrix)
    w_mat = _create_weight_matrix(
        edges_w_weights=graph.edges(data="weight"),
        variable_to_indices_dict=var_fte_mapper.var_indices_dict,
        weight_distribution=distributions["weight"],
        intercept_distribution=distributions["intercept"],
        intercept=intercept,
    )

    # pre-allocate array
    x_mat = np.empty([n_samples, n_columns + 1 if intercept else n_columns])
    # intercept, append ones to the feature matrix
    if intercept:
        x_mat[:, -1] = 1

    # loop over sorted features according to ancestry (no parents first)
    for j_node in nx.topological_sort(graph):
        # all feature indices corresponding to the node/variable
        j_idx_list = var_fte_mapper.get_indices(j_node)

        # get all parent feature indices for the variable/node
        parents_idx = var_fte_mapper.get_indices(list(graph.predecessors(j_node)))
        if intercept:
            parents_idx += [n_columns]

        # continuous variable
        if var_fte_mapper.is_var_of_type(j_node, "continuous"):
            x_mat[:, j_idx_list[0]] = _add_continuous_noise(
                mean=x_mat[:, parents_idx].dot(w_mat[parents_idx, j_idx_list[0]]),
                distribution=distributions["continuous"],
                noise_std=noise_std,
            )

        # binary variable
        elif var_fte_mapper.is_var_of_type(j_node, "binary"):
            x_mat[:, j_idx_list[0]] = _sample_binary_from_latent(
                latent_mean=x_mat[:, parents_idx].dot(
                    w_mat[parents_idx, j_idx_list[0]]
                ),
                distribution=distributions["binary"],
                noise_std=noise_std,
            )

        # categorical variable
        elif var_fte_mapper.is_var_of_type(j_node, "categorical"):
            x_mat[:, j_idx_list] = _sample_categories_from_latent(
                latent_mean=x_mat[:, parents_idx].dot(
                    w_mat[np.ix_(parents_idx, j_idx_list)]
                ),
                distribution=distributions["categorical"],
                noise_std=noise_std,
            )

    return pd.DataFrame(
        x_mat[:, :-1] if intercept else x_mat, columns=var_fte_mapper.feature_list
    )
Esempio n. 58
0
    def __init__(
        self,
        data=None,
        *,
        symbol='o',
        size=10,
        edge_width=1,
        edge_color='black',
        face_color='white',
        n_dimensional=False,
        name=None,
        metadata=None,
        scale=None,
        translate=None,
        opacity=1,
        blending='translucent',
        visible=True,
    ):
        if data is None:
            data = np.empty((0, 2))
        ndim = data.shape[1]
        super().__init__(
            ndim,
            name=name,
            metadata=metadata,
            scale=scale,
            translate=translate,
            opacity=opacity,
            blending=blending,
            visible=visible,
        )

        self.events.add(
            mode=Event,
            size=Event,
            edge_width=Event,
            face_color=Event,
            edge_color=Event,
            symbol=Event,
            n_dimensional=Event,
            highlight=Event,
        )
        self._colors = get_color_names()

        # Save the point coordinates
        self._data = data
        self.dims.clip = False

        # Save the point style params
        self.symbol = symbol
        self._n_dimensional = n_dimensional
        self.edge_width = edge_width

        # The following point properties are for the new points that will
        # be added. For any given property, if a list is passed to the
        # constructor so each point gets its own value then the default
        # value is used when adding new points
        if np.isscalar(size):
            self._size = size
        else:
            self._size = 10

        if type(edge_color) is str:
            self._edge_color = edge_color
        else:
            self._edge_color = 'black'

        if type(face_color) is str:
            self._face_color = face_color
        else:
            self._face_color = 'white'

        # Indices of selected points
        self._selected_data = []
        self._selected_data_stored = []
        self._selected_data_history = []
        # Indices of selected points within the currently viewed slice
        self._selected_view = []
        # Index of hovered point
        self._value = None
        self._value_stored = None
        self._selected_box = None
        self._mode = Mode.PAN_ZOOM
        self._mode_history = self._mode
        self._status = self.mode

        self._drag_start = None

        # Nx2 array of points in the currently viewed slice
        self._data_view = np.empty((0, 2))
        # Sizes of points in the currently viewed slice
        self._sizes_view = 0
        # Full data indices of points located in the currently viewed slice
        self._indices_view = []

        self._drag_box = None
        self._drag_box_stored = None
        self._is_selecting = False
        self._clipboard = {}

        self.edge_colors = list(
            itertools.islice(ensure_iterable(edge_color, color=True), 0,
                             len(self.data)))
        self.face_colors = list(
            itertools.islice(ensure_iterable(face_color, color=True), 0,
                             len(self.data)))
        self.sizes = size

        # Trigger generation of view slice and thumbnail
        self._update_dims()
Esempio n. 59
0
def get_pixeldata(ds):
    """Return a :class:`numpy.ndarray` of the pixel data.

    Parameters
    ----------
    ds : Dataset
        The :class:`Dataset` containing an Image Pixel, Floating Point Image
        Pixel or Double Floating Point Image Pixel module and the
        *Pixel Data*, *Float Pixel Data* or *Double Float Pixel Data* to be
        converted. If (0028,0004) *Photometric Interpretation* is
        `'YBR_FULL_422'` then the pixel data will be
        resampled to 3 channel data as per Part 3, :dcm:`Annex C.7.6.3.1.2
        <part03/sect_C.7.6.3.html#sect_C.7.6.3.1.2>` of the DICOM Standard.

    Returns
    -------
    np.ndarray
        The contents of (7FE0,0010) *Pixel Data* as a 1D array.
    """
    tsyntax = ds.file_meta.TransferSyntaxUID
    # The check of transfer syntax must be first
    if tsyntax not in SUPPORTED_TRANSFER_SYNTAXES:
        raise NotImplementedError(
            "Unable to convert the pixel data as the transfer syntax "
            "is not supported by the pylibjpeg pixel data handler."
        )

    # Check required elements
    required_elements = [
        'BitsAllocated', 'Rows', 'Columns', 'PixelRepresentation',
        'SamplesPerPixel', 'PhotometricInterpretation', 'PixelData',
    ]
    missing = [elem for elem in required_elements if elem not in ds]
    if missing:
        raise AttributeError(
            "Unable to convert the pixel data as the following required "
            "elements are missing from the dataset: " + ", ".join(missing)
        )

    # Calculate the expected length of the pixel data (in bytes)
    #   Note: this does NOT include the trailing null byte for odd length data
    expected_len = get_expected_length(ds)
    if ds.PhotometricInterpretation == 'YBR_FULL_422':
        # libjpeg has already resampled the pixel data, see PS3.3 C.7.6.3.1.2
        expected_len = expected_len // 2 * 3

    p_interp = ds.PhotometricInterpretation

    # How long each frame is in bytes
    nr_frames = getattr(ds, 'NumberOfFrames', 1)
    frame_len = expected_len // nr_frames

    # The decoded data will be placed here
    arr = np.empty(expected_len, np.uint8)

    # Generators for the encoded JPG image frame(s) and insertion offsets
    generate_frames = generate_pixel_data_frame(ds.PixelData, nr_frames)
    generate_offsets = range(0, expected_len, frame_len)
    for frame, offset in zip(generate_frames, generate_offsets):
        # Encoded JPG data to be sent to the decoder
        frame = np.frombuffer(frame, np.uint8)
        arr[offset:offset + frame_len] = decode_pixel_data(
            frame, ds.group_dataset(0x0028)
        )

    return arr.view(pixel_dtype(ds))
Esempio n. 60
0
def test_pulls(mock_draw):
    bestfit = np.asarray([0.8, 1.0, 1.05, 1.1])
    uncertainty = np.asarray([0.9, 1.0, 0.03, 0.7])
    labels = ["a", "b", "staterror_region[bin_0]", "c"]
    exclude_list = ["a"]
    folder_path = "tmp"
    fit_results = fit.FitResults(bestfit, uncertainty, labels, np.empty(0), 1.0)

    filtered_bestfit = np.asarray([1.0, 1.1])
    filtered_uncertainty = np.asarray([1.0, 0.7])
    filtered_labels = np.asarray(["b", "c"])
    figure_path = pathlib.Path(folder_path) / "pulls.pdf"

    # with filtering
    visualize.pulls(
        fit_results,
        figure_folder=folder_path,
        exclude_list=exclude_list,
        method="matplotlib",
    )

    mock_draw.assert_called_once()
    assert np.allclose(mock_draw.call_args[0][0], filtered_bestfit)
    assert np.allclose(mock_draw.call_args[0][1], filtered_uncertainty)
    assert np.any(
        [
            mock_draw.call_args[0][2][i] == filtered_labels[i]
            for i in range(len(filtered_labels))
        ]
    )
    assert mock_draw.call_args[0][3] == figure_path
    assert mock_draw.call_args[1] == {}

    # without filtering via list, but with staterror removal
    # and fixed parameter removal
    fit_results.uncertainty[0] = 0.0

    bestfit_expected = np.asarray([1.0, 1.1])
    uncertainty_expected = np.asarray([1.0, 0.7])
    labels_expected = ["b", "c"]
    visualize.pulls(fit_results, figure_folder=folder_path, method="matplotlib")

    assert np.allclose(mock_draw.call_args[0][0], bestfit_expected)
    assert np.allclose(mock_draw.call_args[0][1], uncertainty_expected)
    assert np.any(
        [
            mock_draw.call_args[0][2][i] == labels_expected[i]
            for i in range(len(labels_expected))
        ]
    )
    assert mock_draw.call_args[0][3] == figure_path
    assert mock_draw.call_args[1] == {}

    # unknown plotting method
    with pytest.raises(NotImplementedError, match="unknown backend: unknown"):
        visualize.pulls(
            fit_results,
            figure_folder=folder_path,
            exclude_list=exclude_list,
            method="unknown",
        )