Esempio n. 1
0
def test_operations():
    a = np.ones((2, 3))
    b = np.ones((2, 3))
    c = a + b
    d = -c
    print(d)
    e = np.sin(c**2).T
    print(e)
    f = np.maximum(a, c)
    print(f)
Esempio n. 2
0
def test_creation():
    a = np.array([1, 2, 3])
    print(a)
    b = np.array([[1, 2, 3], [2, 3, 4]])
    print(b)
    a = np.zeros((2, 3))
    print(a)
    b = np.ones((2, 3))
    print(b)
    c = np.full((2, 3), 7)
    print(c)
    d = np.empty((2, 3))
    print(d)
Esempio n. 3
0
 def gaussian_cluster_generator(num_samples=10000,
                                num_features=500,
                                num_classes=5):
     mu = np.random.rand(num_classes, num_features)
     sigma = np.ones((num_classes, num_features)) * 0.1
     num_cls_samples = int(num_samples / num_classes)
     x = np.zeros((num_samples, num_features))
     y = np.zeros((num_samples, num_classes))
     for i in range(num_classes):
         cls_samples = np.random.normal(mu[i, :], sigma[i, :],
                                        (num_cls_samples, num_features))
         x[i * num_cls_samples:(i + 1) * num_cls_samples] = cls_samples
         y[i * num_cls_samples:(i + 1) * num_cls_samples, i] = 1
     return x, y
Esempio n. 4
0
File: init.py Progetto: wddabc/minpy
def constant(shape, config):
    """Initialize weights with constant value.

    Parameters
    ----------
    shape : tuple
        Shape of the array to be initialized.
    config : dict
        The value to initailize the array

    Returns
    -------
    Array
        Initialized array of size `shape` and with the value `value`

    """
    config.setdefault('value', 0.0)
    val = config['value']
    return np.ones(shape) * val
Esempio n. 5
0
def test_NDArrayIter():
    # check minpy.ndarray as input
    datas = np.ones([1000, 2, 2])
    labels = np.ones([1000, 1])
    for i in range(1000):
        datas[i] = i / 100
        labels[i] = i / 100
    dataiter = io.NDArrayIter(datas,
                              labels,
                              batch_size=100,
                              shuffle=True,
                              last_batch_handle='pad')
    batchidx = 0
    for batch in dataiter:
        batchidx += 1
    assert (batchidx == 10)
    dataiter = io.NDArrayIter(datas,
                              labels,
                              batch_size=100,
                              shuffle=False,
                              last_batch_handle='pad')
    batchidx = 0
    labelcount = [0 for i in range(10)]
    for batch in dataiter:
        label = batch.label[0].asnumpy().flatten()
        assert ((batch.data[0].asnumpy()[:, 0, 0] == label).all())
        for i in range(label.shape[0]):
            labelcount[int(label[i])] += 1
    for i in range(10):
        assert (labelcount[i] == 100)

    # check numpy.ndarray as input
    datas = ori_np.ones([1000, 2, 2])
    labels = ori_np.ones([1000, 1])
    for i in range(1000):
        datas[i] = i / 100
        labels[i] = i / 100
    dataiter = io.NDArrayIter(datas,
                              labels,
                              batch_size=100,
                              shuffle=True,
                              last_batch_handle='pad')
    batchidx = 0
    for batch in dataiter:
        batchidx += 1
    assert (batchidx == 10)
    dataiter = io.NDArrayIter(datas,
                              labels,
                              batch_size=100,
                              shuffle=False,
                              last_batch_handle='pad')
    batchidx = 0
    labelcount = [0 for i in range(10)]
    for batch in dataiter:
        label = batch.label[0].asnumpy().flatten()
        assert ((batch.data[0][:, 0, 0].asnumpy() == label).all())
        for i in range(label.shape[0]):
            labelcount[int(label[i])] += 1
    for i in range(10):
        assert (labelcount[i] == 100)

    # check padding
    datas = np.ones([1000, 2, 2])
    labels = np.ones([1000, 1])
    for i in range(1000):
        datas[i] = i / 100
        labels[i] = i / 100
    dataiter = io.NDArrayIter(datas,
                              labels,
                              batch_size=128,
                              shuffle=True,
                              last_batch_handle='pad')
    batchidx = 0
    for batch in dataiter:
        batchidx += 1
    assert (batchidx == 8)
    dataiter = io.NDArrayIter(datas,
                              labels,
                              batch_size=128,
                              shuffle=False,
                              last_batch_handle='pad')
    batchidx = 0
    labelcount = [0 for i in range(10)]
    for batch in dataiter:
        label = batch.label[0].asnumpy().flatten()
        assert ((batch.data[0].asnumpy()[:, 0, 0] == label).all())
        for i in range(label.shape[0]):
            labelcount[int(label[i])] += 1

    for i in range(10):
        if i == 0:
            assert (labelcount[i] == 124)
        else:
            assert (labelcount[i] == 100)

    # check padding
    datas = ori_np.ones([1000, 2, 2])
    labels = ori_np.ones([1000, 1])
    for i in range(1000):
        datas[i] = i / 100
        labels[i] = i / 100
    dataiter = io.NDArrayIter(datas,
                              labels,
                              batch_size=128,
                              shuffle=True,
                              last_batch_handle='pad')
    batchidx = 0
    for batch in dataiter:
        batchidx += 1
    assert (batchidx == 8)
    dataiter = io.NDArrayIter(datas,
                              labels,
                              batch_size=128,
                              shuffle=False,
                              last_batch_handle='pad')
    batchidx = 0
    labelcount = [0 for i in range(10)]
    for batch in dataiter:
        label = batch.label[0].asnumpy().flatten()
        assert ((batch.data[0].asnumpy()[:, 0, 0] == label).all())
        for i in range(label.shape[0]):
            labelcount[int(label[i])] += 1

    for i in range(10):
        if i == 0:
            assert (labelcount[i] == 124)
        else:
            assert (labelcount[i] == 100)
Esempio n. 6
0
def constant(shape, config):
    config.setdefault('value', 0.0)
    val = config['value']
    return np.ones(shape) * val
  def sample(self, features, max_length=30):
    """
    Run a test-time forward pass for the model, sampling captions for input
    feature vectors.

    At each timestep, we embed the current word, pass it and the previous hidden
    state to the RNN to get the next hidden state, use the hidden state to get
    scores for all vocab words, and choose the word with the highest score as
    the next word. The initial hidden state is computed by applying an affine
    transform to the input image features, and the initial word is the <START>
    token.

    For LSTMs you will also have to keep track of the cell state; in that case
    the initial cell state should be zero.

    Inputs:
    - features: Array of input image features of shape (N, D).
    - max_length: Maximum length T of generated captions.

    Returns:
    - captions: Array of shape (N, max_length) giving sampled captions,
      where each element is an integer in the range [0, V). The first element
      of captions should be the first sampled word, not the <START> token.
    """
    N = features.shape[0]
    #captions = self._null * np.ones((N, max_length), dtype=np.int32)
    captions = self._null * np.ones((N, max_length), dtype=int)

    # Unpack parameters
    W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
    W_embed = self.params['W_embed']
    Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
    W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
    
    h = affine_forward(features, W_proj, b_proj)

    if self.cell_type == 'lstm':
      c = np.zeros(h.shape)

    embed = self._start * np.ones(N, dtype=int)
    
    
    # TODO: Get captions
    for t in xrange(max_length):
      # (1) Embed the previous word using the learned word embeddings

      # (2) Make an RNN / LSTM step using the previous hidden state and the
      #      embedded current word to get the next hidden state.
      if self.cell_type == 'rnn':
        h = rnn_step_forward(embed, h, Wx, Wh, b)
      else:
        h, c = lstm_step_forward(embed, h, c, Wx, Wh, b)
      # (3) Apply the learned affine transformation to the next hidden state to
      #     get scores for all words in the vocabulary
   

      # (4) Select the word with the highest score as the next word, writing it
      #     to the appropriate slot in the captions variable  


    # END TODO
    ############################################################################
    #                             END OF YOUR CODE                             #
    ############################################################################
    return captions
    def MTNE(self):
        # dictionary
        D = np.random.rand(self.p, self.m)

        lambda_ff_list = []

        # Author
        Aprime_list = []
        # weight
        W_list = []
        # dense vector
        F_list = []
        # similarity local
        X_list = []
        X_mask_list = []

        # all sparse embeddings across all timestamps

        # F_big=np.zeros((self.q,self.k))
        X_big = np.zeros((self.q, self.q))
        X_mask_big = np.zeros((self.q, self.q))

        S = np.random.rand(self.q, self.q)

        indexDict_local2global = collections.OrderedDict()
        indexDict_global2local = dict()
        globalIndex = 0

        for key in self.edgeDict:

            A = self.edgeDict[key]

            X = self.initX(A, self.theta)
            X = X / (np.amax(X) - np.amin(X))
            X_list.append(X)

            X_mask = np.zeros((self.q, self.q))
            # number of nodes in the current time
            n = A.shape[0]

            Aprime = np.random.rand(n, self.p)
            Aprime_list.append(Aprime)

            indexDict = dict()
            for i in range(n):
                indexDict[i] = globalIndex + i
                indexDict_global2local[globalIndex + i] = (key, i)
            indexDict_local2global[key] = indexDict

            for i in range(n):
                i_big = indexDict[i]
                for j in range(n):
                    j_big = indexDict[j]
                    X_big[i_big, j_big] = X[i, j]
                    X_mask[i_big, j_big] = 1.
                    X_mask_big[i_big, j_big] = 1.
            X_mask_list.append(X_mask)

            globalIndex += n

            W = np.random.rand(n, self.p)
            W_list.append(W)

            F = np.random.rand(n, self.m)
            F_list.append(F)

            lambda_ff_list.append(random.random())

        F_big = self.concatenateMatrixInList(F_list, self.m, 0)

        loss_t1 = 1000000000.0

        print loss_t1
        loss_t = self.epsilon + loss_t1 + 1
        while abs(loss_t - loss_t1) >= self.epsilon:

            #% optimize each element in randomized sequence
            nita = 1. / math.sqrt(self.t)
            self.t = self.t + 1
            loss_t = loss_t1
            loss_t1 = 0.0

            counter = 0

            for key in self.edgeDict:

                X = X_list[counter]
                W = W_list[counter]
                F = F_list[counter]
                Aprime = Aprime_list[counter]
                indexDict = indexDict_local2global[key]
                lambda_ff = lambda_ff_list[counter]
                X_mask = X_mask_list[counter]

                P = self.getP(X_mask, F_big, X_big)

                n = X.shape[0]

                for i in range(n):
                    # for A
                    z = Aprime[i] - nita * (
                        np.dot(np.dot(Aprime[i], W.T) - X[i], W) +
                        self.beta * np.dot(np.dot(Aprime[i], D) - F[i], D.T))
                    update = np.maximum(np.zeros(np.shape(z)),
                                        np.abs(z) - nita * self.lamda_pgd)
                    Aprime[i] = np.sign(z) * update

                    # for F
                    lf_part1 = self.beta * F[i] - np.dot(Aprime[i], D)
                    lf_part2 = np.zeros(self.m)
                    i_big_index = indexDict[i]
                    for j in range(self.q):
                        lf_part2 += self.rho * (F[i] -
                                                F_big[j]) * S[i_big_index, j]

                    val1 = np.dot(F[i], F_big.T)
                    val2 = val1 - np.ones(self.q)
                    val3 = np.dot(val2, F_big)
                    lf_part3 = 0.01 * val3
                    F[i] = F[i] - nita * (lf_part1 + lf_part2 + lf_part3)
                    F_big[i_big_index] = F[i]

                    # vec=np.dot(F[i],F_big.T)-np.ones(self.q)
                    # # print vec.shape
                    # lambda_ff=lambda_ff-nita*np.linalg.norm(vec)

                    # for S
                    ls = (S[i_big_index] -
                          P[i_big_index]) - self.epsilon * np.ones(
                              self.q) - self.alpha * S[i_big_index]
                    S[i_big_index] = S[i_big_index] - nita * ls

                Aprime = self.chechnegtive(Aprime, None, None)

                LW = np.dot((np.dot(W, Aprime.T) - X), Aprime) + self.lamda * W
                W = W - nita * LW
                W = self.chechnegtive(W, None, None)

                p1 = np.dot(Aprime, D) - F
                p2 = self.beta * np.dot(Aprime.T, p1)
                LD = p2 + self.gamma * D
                D = D - nita * LD

                W_list[counter] = W
                F_list[counter] = F
                Aprime_list[counter] = Aprime
                lambda_ff_list[counter] = lambda_ff

                loss_t1_part = self.lossfuction(X, W, Aprime, F, D)
                loss_t1 += loss_t1_part
                counter += 1

            # loss_last=self.laplacianLoss(simM,F)
            simMD, simML = self.getLaplacian(S)
            trval = np.trace(np.dot(np.dot(F_big.T, simML), F_big))
            gapval = self.norm(X_mask_big * (S - X_big))

            loss_t1 += self.rho * trval + self.eta * gapval

            if loss_t < loss_t1 and loss_t != 0:
                break
        # print loss_t
            print loss_t1
        return [Aprime_list, F_list, S]
Esempio n. 9
0
def test_fromnumeric():
    # Functions
    # 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
    # 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
    # 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
    # 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
    # 'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_',
    # 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
    # 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
    a = [4, 3, 5, 7, 6, 8]
    indices = [0, 1, 4]
    np.take(a, indices)
    a = np.array(a)
    # a[indices]
    np.take(a, [[0, 1], [2, 3]])
    a = np.zeros((10, 2))
    b = a.T
    a = np.arange(6).reshape((3, 2))
    np.reshape(a, (2, 3))  # C-like index ordering
    np.reshape(np.ravel(a), (2, 3))  # equivalent to C ravel then C reshape
    np.reshape(a, (2, 3), order='F')  # Fortran-like index ordering
    np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
    a = np.array([[1, 2, 3], [4, 5, 6]])
    np.reshape(a, 6)
    np.reshape(a, 6, order='F')
    np.reshape(a, (3, -1))  # the unspecified value is inferred to be 2
    choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23],
               [30, 31, 32, 33]]
    np.choose([2, 3, 1, 0], choices)
    np.choose([2, 4, 1, 0], choices, mode='clip')  # 4 goes to 3 (4-1)
    np.choose([2, 4, 1, 0], choices, mode='wrap')  # 4 goes to (4 mod 4)
    a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
    choices = [-10, 10]
    np.choose(a, choices)
    a = np.array([0, 1]).reshape((2, 1, 1))
    c1 = np.array([1, 2, 3]).reshape((1, 3, 1))
    c2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5))
    np.choose(a, (c1, c2))  # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
    np.repeat(3, 4)
    x = np.array([[1, 2], [3, 4]])
    np.repeat(x, 2)
    np.repeat(x, 3, axis=1)
    np.repeat(x, [1, 2], axis=0)
    a = np.arange(5)
    np.put(a, [0, 2], [-44, -55])
    a = np.arange(5)
    np.put(a, 22, -5, mode='clip')
    x = np.array([[1, 2, 3]])
    np.swapaxes(x, 0, 1)
    x = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
    np.swapaxes(x, 0, 2)
    x = np.arange(4).reshape((2, 2))
    np.transpose(x)
    x = np.ones((1, 2, 3))
    np.transpose(x, (1, 0, 2)).shape
    a = np.array([3, 4, 2, 1])
    np.partition(a, 3)
    np.partition(a, (1, 3))
    x = np.array([3, 4, 2, 1])
    x[np.argpartition(x, 3)]
    x[np.argpartition(x, (1, 3))]
    x = [3, 4, 2, 1]
    np.array(x)[np.argpartition(x, 3)]
    a = np.array([[1, 4], [3, 1]])
    np.sort(a)  # sort along the last axis
    np.sort(a, axis=None)  # sort the flattened array
    np.sort(a, axis=0)  # sort along the first axis
    dtype = [('name', 'S10'), ('height', float), ('age', int)]
    values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), ('Galahad', 1.7, 38)]
    a = np.array(values, dtype=dtype)  # create a structured array
    np.sort(a, order='height')  # doctest: +SKIP
    np.sort(a, order=['age', 'height'])  # doctest: +SKIP
    x = np.array([3, 1, 2])
    np.argsort(x)
    x = np.array([[0, 3], [2, 2]])
    np.argsort(x, axis=0)
    np.argsort(x, axis=1)
    x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
    np.argsort(x, order=('x', 'y'))
    np.argsort(x, order=('y', 'x'))
    a = np.arange(6).reshape(2, 3)
    np.argmax(a)
    np.argmax(a, axis=0)
    np.argmax(a, axis=1)
    b = np.arange(6)
    b[1] = 5
    np.argmax(b)  # Only the first occurrence is returned.
    a = np.arange(6).reshape(2, 3)
    np.argmin(a)
    np.argmin(a, axis=0)
    np.argmin(a, axis=1)
    b = np.arange(6)
    b[4] = 0
    np.argmin(b)  # Only the first occurrence is returned.
    np.searchsorted([1, 2, 3, 4, 5], 3)
    np.searchsorted([1, 2, 3, 4, 5], 3, side='right')
    np.searchsorted([1, 2, 3, 4, 5], [-10, 10, 2, 3])
    a = np.array([[0, 1], [2, 3]])
    np.resize(a, (2, 3))
    np.resize(a, (1, 4))
    np.resize(a, (2, 4))
    x = np.array([[[0], [1], [2]]])
    x.shape
    np.squeeze(x).shape
    np.squeeze(x, axis=(2, )).shape
    a = np.arange(4).reshape(2, 2)
    a = np.arange(8).reshape(2, 2, 2)
    a
    a[:, :, 0]  # main diagonal is [0 6]
    a[:, :, 1]  # main diagonal is [1 7]
    np.trace(np.eye(3))
    a = np.arange(8).reshape((2, 2, 2))
    np.trace(a)
    a = np.arange(24).reshape((2, 2, 2, 3))
    np.trace(a).shape
    x = np.array([[1, 2, 3], [4, 5, 6]])
    np.ravel(x)
    x.reshape(-1)
    np.ravel(x, order='F')
    np.ravel(x.T)
    np.ravel(x.T, order='A')
    a = np.arange(3)[::-1]
    a
    # a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
    x = np.eye(3)
    np.nonzero(x)
    x[np.nonzero(x)]
    np.transpose(np.nonzero(x))
    a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
    a > 3
    np.nonzero(a > 3)
    np.shape(np.eye(3))
    np.shape([[1, 2]])
    np.shape([0])
    np.shape(0)
    a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
    np.shape(a)
    a.shape
    a = np.array([[1, 2], [3, 4], [5, 6]])
    np.compress([0, 1], a, axis=0)
    np.compress([False, True, True], a, axis=0)
    np.compress([False, True], a, axis=1)
    np.compress([False, True], a)
    a = np.arange(10)
    np.clip(a, 1, 8)
    np.clip(a, 3, 6, out=a)
    a = np.arange(10)
    np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8)
    np.sum([])
    np.sum([0.5, 1.5])
    np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
    np.sum([[0, 1], [0, 5]])
    np.sum([[0, 1], [0, 5]], axis=0)
    np.sum([[0, 1], [0, 5]], axis=1)
    # np.ones(128, dtype=np.int8).sum(dtype=np.int8)
    # np.any([[True, False], [True, True]])
    # np.any([[True, False], [False, False]], axis=0)
    # np.any([-1, 0, 5])
    # np.any(np.nan)
    # np.all([[True,False],[True,True]])
    # np.all([[True,False],[True,True]], axis=0)
    # np.all([-1, 4, 5])
    # np.all([1.0, np.nan])
    a = np.array([[1, 2, 3], [4, 5, 6]])
    np.cumsum(a)
    np.cumsum(a, dtype=float)  # specifies type of output value(s)
    np.cumsum(a, axis=0)  # sum over rows for each of the 3 columns
    np.cumsum(a, axis=1)  # sum over columns for each of the 2 rows
    x = np.arange(4).reshape((2, 2))
    np.ptp(x, axis=0)
    np.ptp(x, axis=1)
    a = np.arange(4).reshape((2, 2))
    np.amax(a)  # Maximum of the flattened array
    np.amax(a, axis=0)  # Maxima along the first axis
    np.amax(a, axis=1)  # Maxima along the second axis
    b = np.arange(5, dtype=np.float)
    # b[2] = np.NaN
    np.amax(b)
    np.nanmax(b)
    a = np.arange(4).reshape((2, 2))
    np.amin(a)  # Minimum of the flattened array
    np.amin(a, axis=0)  # Minima along the first axis
    np.amin(a, axis=1)  # Minima along the second axis
    b = np.arange(5, dtype=np.float)
    # b[2] = np.NaN
    np.amin(b)
    np.nanmin(b)
    a = np.zeros((7, 4, 5))
    a.shape[0]
    np.alen(a)
    x = np.array([536870910, 536870910, 536870910, 536870910])
    np.prod(x)  #random
    np.prod([])
    np.prod([1., 2.])
    np.prod([[1., 2.], [3., 4.]])
    np.prod([[1., 2.], [3., 4.]], axis=1)
    x = np.array([1, 2, 3], dtype=np.uint8)
    # np.prod(x).dtype == np.uint
    x = np.array([1, 2, 3], dtype=np.int8)
    # np.prod(x).dtype == np.int
    a = np.array([1, 2, 3])
    np.cumprod(a)  # intermediate results 1, 1*2
    a = np.array([[1, 2, 3], [4, 5, 6]])
    np.cumprod(a, dtype=float)  # specify type of output
    np.cumprod(a, axis=0)
    np.cumprod(a, axis=1)
    np.ndim([[1, 2, 3], [4, 5, 6]])
    np.ndim(np.array([[1, 2, 3], [4, 5, 6]]))
    np.ndim(1)
    a = np.array([[1, 2, 3], [4, 5, 6]])
    np.size(a)
    np.size(a, 1)
    np.size(a, 0)
    np.around([0.37, 1.64])
    np.around([0.37, 1.64], decimals=1)
    np.around([.5, 1.5, 2.5, 3.5, 4.5])  # rounds to nearest even value
    np.around([1, 2, 3, 11], decimals=1)  # ndarray of ints is returned
    np.around([1, 2, 3, 11], decimals=-1)
    a = np.array([[1, 2], [3, 4]])
    np.mean(a)
    np.mean(a, axis=0)
    np.mean(a, axis=1)
    a = np.zeros((2, 512 * 512), dtype=np.float32)
    a[0, :] = 1.0
    a[1, :] = 0.1
    np.mean(a)
    np.mean(a, dtype=np.float64)
    a = np.array([[1, 2], [3, 4]])
    np.std(a)
    np.std(a, axis=0)
    np.std(a, axis=1)
    a = np.zeros((2, 512 * 512), dtype=np.float32)
    a[0, :] = 1.0
    a[1, :] = 0.1
    np.std(a)
    np.std(a, dtype=np.float64)
    a = np.array([[1, 2], [3, 4]])
    np.var(a)
    np.var(a, axis=0)
    np.var(a, axis=1)
    a = np.zeros((2, 512 * 512), dtype=np.float32)
    a[0, :] = 1.0
    a[1, :] = 0.1
    np.var(a)
    np.var(a, dtype=np.float64)
Esempio n. 10
0
def test_numeric():
    # 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
    # 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast',
    # 'dtype', 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer',
    # 'where', 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose',
    # 'lexsort', 'set_numeric_ops', 'can_cast', 'promote_types',
    # 'min_scalar_type', 'result_type', 'asarray', 'asanyarray',
    # 'ascontiguousarray', 'asfortranarray', 'isfortran', 'empty_like',
    # 'zeros_like', 'ones_like', 'correlate', 'convolve', 'inner', 'dot',
    # 'einsum', 'outer', 'vdot', 'alterdot', 'restoredot', 'roll',
    # 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'array2string',
    # 'get_printoptions', 'set_printoptions', 'array_repr', 'array_str',
    # 'set_string_function', 'little_endian', 'require', 'fromiter',
    # 'array_equal', 'array_equiv', 'indices', 'fromfunction', 'isclose', 'load',
    # 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones', 'identity',
    # 'allclose', 'compare_chararrays', 'putmask', 'seterr', 'geterr',
    # 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall', 'errstate',
    # 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_',
    # 'True_', 'bitwise_not', 'full', 'full_like', 'matmul'
    x = np.arange(6)
    x = x.reshape((2, 3))
    np.zeros_like(x)
    y = np.arange(3, dtype=np.float)
    np.zeros_like(y)
    np.ones(5)
    np.ones((5, ), dtype=np.int)
    np.ones((2, 1))
    s = (2, 2)
    np.ones(s)
    x = np.arange(6)
    x = x.reshape((2, 3))
    np.ones_like(x)
    y = np.arange(3, dtype=np.float)
    np.ones_like(y)
    np.full((2, 2), np.inf)
    x = np.arange(6, dtype=np.int)
    np.full_like(x, 1)
    np.full_like(x, 0.1)
    np.full_like(y, 0.1)
    np.count_nonzero(np.eye(4))
    np.count_nonzero([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]])
    np.count_nonzero([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]], axis=0)
    np.count_nonzero([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]], axis=1)
    a = [1, 2]
    np.asarray(a)
    a = np.array([1, 2])
    np.asarray(a) is a
    a = np.array([1, 2], dtype=np.float32)
    np.asarray(a, dtype=np.float32) is a
    np.asarray(a, dtype=np.float64) is a
    np.asarray(a) is a
    np.asanyarray(a) is a
    a = [1, 2]
    np.asanyarray(a)
    np.asanyarray(a) is a
    x = np.arange(6).reshape(2, 3)
    np.ascontiguousarray(x, dtype=np.float32)
    x = np.arange(6).reshape(2, 3)
    y = np.asfortranarray(x)
    x = np.arange(6).reshape(2, 3)
    y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
    a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
    np.isfortran(a)
    b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN')
    np.isfortran(b)
    a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
    np.isfortran(a)
    b = a.T
    np.isfortran(b)
    np.isfortran(np.array([1, 2], order='FORTRAN'))
    x = np.arange(6).reshape(2, 3)
    np.argwhere(x > 1)
    x = np.arange(-2, 3)
    np.flatnonzero(x)
    np.correlate([1, 2, 3], [0, 1, 0.5])
    np.correlate([1, 2, 3], [0, 1, 0.5], "same")
    np.correlate([1, 2, 3], [0, 1, 0.5], "full")
    np.correlate([1 + 1j, 2, 3 - 1j], [0, 1, 0.5j], 'full')
    np.correlate([0, 1, 0.5j], [1 + 1j, 2, 3 - 1j], 'full')
    np.convolve([1, 2, 3], [0, 1, 0.5])
    np.convolve([1, 2, 3], [0, 1, 0.5], 'same')
    np.convolve([1, 2, 3], [0, 1, 0.5], 'valid')
    rl = np.outer(np.ones((5, )), np.linspace(-2, 2, 5))
    # im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
    # grid = rl + im
    x = np.array(['a', 'b', 'c'], dtype=object)
    np.outer(x, [1, 2, 3])
    a = np.arange(60.).reshape(3, 4, 5)
    b = np.arange(24.).reshape(4, 3, 2)
    c = np.tensordot(a, b, axes=([1, 0], [0, 1]))
    c.shape
    # A slower but equivalent way of computing the same...
    d = np.zeros((5, 2))
    a = np.array(range(1, 9))
    A = np.array(('a', 'b', 'c', 'd'), dtype=object)
    x = np.arange(10)
    np.roll(x, 2)
    x2 = np.reshape(x, (2, 5))
    np.roll(x2, 1)
    np.roll(x2, 1, axis=0)
    np.roll(x2, 1, axis=1)
    a = np.ones((3, 4, 5, 6))
    np.rollaxis(a, 3, 1).shape
    np.rollaxis(a, 2).shape
    np.rollaxis(a, 1, 4).shape
    x = np.zeros((3, 4, 5))
    np.moveaxis(x, 0, -1).shape
    np.moveaxis(x, -1, 0).shape
    np.transpose(x).shape
    np.moveaxis(x, [0, 1], [-1, -2]).shape
    np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
    x = [1, 2, 3]
    y = [4, 5, 6]
    np.cross(x, y)
    x = [1, 2]
    y = [4, 5, 6]
    np.cross(x, y)
    x = [1, 2, 0]
    y = [4, 5, 6]
    np.cross(x, y)
    x = [1, 2]
    y = [4, 5]
    np.cross(x, y)
    x = np.array([[1, 2, 3], [4, 5, 6]])
    y = np.array([[4, 5, 6], [1, 2, 3]])
    np.cross(x, y)
    np.cross(x, y, axisc=0)
    x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
    y = np.array([[7, 8, 9], [4, 5, 6], [1, 2, 3]])
    np.cross(x, y)
    np.cross(x, y, axisa=0, axisb=0)
    # np.array_repr(np.array([1,2]))
    # np.array_repr(np.ma.array([0.]))
    # np.array_repr(np.array([], np.int32))
    x = np.array([1e-6, 4e-7, 2, 3])
    # np.array_repr(x, precision=6, suppress_small=True)
    # np.array_str(np.arange(3))
    a = np.arange(10)
    x = np.arange(4)
    np.set_string_function(lambda x: 'random', repr=False)
    grid = np.indices((2, 3))
    grid.shape
    grid[0]  # row indices
    grid[1]  # column indices
    x = np.arange(20).reshape(5, 4)
    row, col = np.indices((2, 3))
    x[row, col]
    np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
    np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
    np.isscalar(3.1)
    np.isscalar([3.1])
    np.isscalar(False)
    # np.binary_repr(3)
    # np.binary_repr(-3)
    # np.binary_repr(3, width=4)
    # np.binary_repr(-3, width=3)
    # np.binary_repr(-3, width=5)
    # np.base_repr(5)
    # np.base_repr(6, 5)
    # np.base_repr(7, base=5, padding=3)
    # np.base_repr(10, base=16)
    # np.base_repr(32, base=16)
    np.identity(3)
    np.allclose([1e10, 1e-7], [1.00001e10, 1e-8])
    np.allclose([1e10, 1e-8], [1.00001e10, 1e-9])
    np.allclose([1e10, 1e-8], [1.0001e10, 1e-9])
    # np.allclose([1.0, np.nan], [1.0, np.nan])
    # np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
    np.isclose([1e10, 1e-7], [1.00001e10, 1e-8])
    np.isclose([1e10, 1e-8], [1.00001e10, 1e-9])
    np.isclose([1e10, 1e-8], [1.0001e10, 1e-9])
    # np.isclose([1.0, np.nan], [1.0, np.nan])
    # np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
    np.array_equal([1, 2], [1, 2])
    np.array_equal(np.array([1, 2]), np.array([1, 2]))
    np.array_equal([1, 2], [1, 2, 3])
    np.array_equal([1, 2], [1, 4])
    np.array_equiv([1, 2], [1, 2])
    np.array_equiv([1, 2], [1, 3])
    np.array_equiv([1, 2], [[1, 2], [1, 2]])
    np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
    np.array_equiv([1, 2], [[1, 2], [1, 3]])
Esempio n. 11
0
    NDArrayIter(data[0], data[1]),
    NDArrayIter(data[0], data[1]),
  )
  solver.init()
else:
  model = builder.Model(network_in_network, 'softmax', (3, 32, 32,))
  for arg, setting in model.param_configs.items():
    print arg
    shape = setting['shape']
    if 'weight' in arg:
      if len(shape) == 2:
        n = shape[0]
      elif len(shape) == 4:
        n = np.prod(shape[1:])
      else:
        raise Exception()
      std = (2 / float(n)) ** 0.5
      model.params[arg] = np.random.normal(0, std, shape)
    elif 'bias' in arg:
      model.params[arg] = np.zeros(shape)
    elif 'lower' in arg:
      model.params[arg] = np.zeros(shape)
    elif 'upper' in arg:
      model.params[arg] = np.ones(shape)
    else:
      raise Exception()

parameters = {key : value.asnumpy() for key, value in model.params.items()}
output_file = 'NIN-%s-%s-initial-parameters' % (activation, ini_mode)
pickle.dump(parameters, open(output_file, 'wb'))
Esempio n. 12
0
def activations(weights, *args):
    cat_state = np.concatenate(args + (np.ones((args[0].shape[0], 1)), ),
                               axis=1)
    return np.dot(cat_state, weights)
Esempio n. 13
0
DEVICE = 0
DR_INTERVAL = 10
shapes = (1024, ) * 4 + (10, )

activation = getattr(builder, ACTIVATION)
set_context(gpu(DEVICE))

storage = {}
chd_list = []
mlp = builder.Sequential()
for i, shape in enumerate(shapes[:-1]):
    mlp.append(builder.Affine(shape))
    mlp.append(builder.Export('affine%d' % i, storage))
    mlp.append(activation())
    mlp.append(builder.Export('activation%d' % i, storage))
    mlp.append(ChannelDivision(np.ones(shape)))
    chd_list.append(mlp[-1])
    mlp.append(builder.Export('chd%d' % i, storage))

mlp.append(builder.Affine(shapes[-1]))
model = builder.Model(mlp, 'softmax', (3072, ))

batch_size = 100
batches = len(data[0]) // batch_size
batch_index = 0

iterations = 25000
interval = 10

settings = {'learning_rate': 0.05}
initialize(model)
Esempio n. 14
0
from data_iter import SyntheticData
import logging

a = mx.nd.array([1, 2, 3])
b = mx.nd.array([[1, 2, 3], [4, 5, 6]])
c = np.array([1, 2, 3])
d = np.array([[1, 2, 3], [4, 5, 6]])
c
a.context
a
a.size
a.dtype
mx.nd.array(numpy.array([1, 2, 3]))
type(numpy.array([1, 2, 3]))
type(c)
np.ones((2, 3))
np.ones([2, 3])
mx.nd.ones([2, 3])
mx.nd.ones([2, 3]).asnumpy()


def foo(x):
    return (5 * (x**2) + 3 * x + 2)


print(foo(4))
d_foo = grad(foo)
d_l_foo = grad_and_loss(foo)
d_foo(4)
d_l_foo(4)
Esempio n. 15
0
            h = self._nonlinear(h)
            self.previous_h.append(h)

        Y0 = layers.relu(layers.affine(h, WY0, bias_Y0))
        Y = layers.affine(Y0, WY, bias_Y)
        return Y

    def loss(self, prediction, Y):
        return layers.softmax_loss(prediction, Y)


model = RNN()
# initialize model
for key, value in model.param_configs.items():
    model.params[key] = getattr(initializers,
                                value['init_rule'])(value['shape'],
                                                    value.get(
                                                        'init_config', {}))

N = 64
X, Y = np.ones((N, 11, 128)), np.ones((N, ))


def loss_function(X, Y, *args):
    predictions = model.forward(X, 'train')
    return model.loss(predictions, Y)


gl = gradient_loss(loss_function, range(2, len(model.params) + 2))
g, loss = gl(X, Y, *list(model.params.values()))
Esempio n. 16
0
def test_ufunc():
    x = np.array([-1.2, 1.2])
    np.absolute(x)
    np.absolute(1.2 + 1j)
    x = np.linspace(start=-10, stop=10, num=101)
    np.add(1.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.add(x1, x2)
    np.arccos([1, -1])
    x = np.linspace(-1, 1, num=100)
    np.arccosh([np.e, 10.0])
    np.arccosh(1)
    np.arcsin(0)
    np.arcsinh(np.array([np.e, 10.0]))
    np.arctan([0, 1])
    np.pi / 4
    x = np.linspace(-10, 10)
    x = np.array([-1, +1, +1, -1])
    y = np.array([-1, -1, +1, +1])
    np.arctan2(y, x) * 180 / np.pi
    np.arctan2([1., -1.], [0., 0.])
    np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
    np.arctanh([0, -0.5])
    np.bitwise_and(13, 17)
    np.bitwise_and(14, 13)
    # np.binary_repr(12)    return str
    np.bitwise_and([14, 3], 13)
    np.bitwise_and([11, 7], [4, 25])
    np.bitwise_and(np.array([2, 5, 255]), np.array([3, 14, 16]))
    np.bitwise_and([True, True], [False, True])
    np.bitwise_or(13, 16)
    # np.binary_repr(29)
    np.bitwise_or(32, 2)
    np.bitwise_or([33, 4], 1)
    np.bitwise_or([33, 4], [1, 2])
    np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
    # np.array([2, 5, 255]) | np.array([4, 4, 4])
    np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32),
                  np.array([4, 4, 4, 2147483647], dtype=np.int32))
    np.bitwise_or([True, True], [False, True])
    np.bitwise_xor(13, 17)
    # np.binary_repr(28)
    np.bitwise_xor(31, 5)
    np.bitwise_xor([31, 3], 5)
    np.bitwise_xor([31, 3], [5, 6])
    np.bitwise_xor([True, True], [False, True])
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.ceil(a)
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.trunc(a)
    np.cos(np.array([0, np.pi / 2, np.pi]))
    np.cosh(0)
    x = np.linspace(-4, 4, 1000)
    rad = np.arange(12.) * np.pi / 6
    np.degrees(rad)
    out = np.zeros((rad.shape))
    r = np.degrees(rad, out)
    # np.all(r == out) return bool
    np.rad2deg(np.pi / 2)
    np.divide(2.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.divide(2, 4)
    np.divide(2, 4.)
    np.equal([0, 1, 3], np.arange(3))
    np.equal(1, np.ones(1))
    x = np.linspace(-2 * np.pi, 2 * np.pi, 100)
    np.exp2([2, 3])
    np.expm1(1e-10)
    np.exp(1e-10) - 1
    np.fabs(-1)
    np.fabs([-1.2, 1.2])
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.floor(a)
    np.floor_divide(7, 3)
    np.floor_divide([1., 2., 3., 4.], 2.5)
    np.fmod([-3, -2, -1, 1, 2, 3], 2)
    np.remainder([-3, -2, -1, 1, 2, 3], 2)
    np.fmod([5, 3], [2, 2.])
    a = np.arange(-3, 3).reshape(3, 2)
    np.fmod(a, [2, 2])
    np.greater([4, 2], [2, 2])
    a = np.array([4, 2])
    b = np.array([2, 2])
    a > b
    np.greater_equal([4, 2, 1], [2, 2, 2])
    np.hypot(3 * np.ones((3, 3)), 4 * np.ones((3, 3)))
    np.hypot(3 * np.ones((3, 3)), [4])
    np.bitwise_not is np.invert
    np.invert(np.array([13], dtype=np.uint8))
    # np.binary_repr(242, width=8)
    np.invert(np.array([13], dtype=np.uint16))
    np.invert(np.array([13], dtype=np.int8))
    # np.binary_repr(-14, width=8)
    np.invert(np.array([True, False]))
    # np.isfinite(1)
    # np.isfinite(0)
    # np.isfinite(np.nan)
    # np.isfinite(np.inf)
    # np.isfinite(np.NINF)
    x = np.array([-np.inf, 0., np.inf])
    y = np.array([2, 2, 2])
    np.isfinite(x, y)
    # np.isinf(np.inf)
    # np.isinf(np.nan)
    # np.isinf(np.NINF)
    # np.isinf([np.inf, -np.inf, 1.0, np.nan])
    x = np.array([-np.inf, 0., np.inf])
    y = np.array([2, 2, 2])
    # np.isinf(x, y)
    # np.isnan(np.nan)
    # np.isnan(np.inf)
    # np.binary_repr(5)
    np.left_shift(5, 2)
    # np.binary_repr(20)
    np.left_shift(5, [1, 2, 3])
    np.less([1, 2], [2, 2])
    np.less_equal([4, 2, 1], [2, 2, 2])
    x = np.array([0, 1, 2, 2**4])
    xi = np.array([0 + 1.j, 1, 2 + 0.j, 4.j])
    np.log2(xi)
    prob1 = np.log(1e-50)
    prob2 = np.log(2.5e-50)
    prob12 = np.logaddexp(prob1, prob2)
    prob12
    np.exp(prob12)
    prob1 = np.log2(1e-50)
    prob2 = np.log2(2.5e-50)
    prob12 = np.logaddexp2(prob1, prob2)
    prob1, prob2, prob12
    2**prob12
    np.log1p(1e-99)
    np.log(1 + 1e-99)
    # np.logical_and(True, False)
    # np.logical_and([True, False], [False, False])
    x = np.arange(5)
    # np.logical_and(x>1, x<4)
    # np.logical_not(3)
    # np.logical_not([True, False, 0, 1])
    x = np.arange(5)
    # np.logical_not(x<3)
    # np.logical_or(True, False)
    # np.logical_or([True, False], [False, False])
    x = np.arange(5)
    # np.logical_or(x < 1, x > 3)
    # np.logical_xor(True, False)
    # np.logical_xor([True, True, False, False], [True, False, True, False])
    x = np.arange(5)
    # np.logical_xor(x < 1, x > 3)
    # np.logical_xor(0, np.eye(2))
    np.maximum([2, 3, 4], [1, 5, 2])
    # np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
    # np.maximum(np.Inf, 1)
    np.minimum([2, 3, 4], [1, 5, 2])
    # np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
    # np.minimum(-np.Inf, 1)
    np.fmax([2, 3, 4], [1, 5, 2])
    np.fmax(np.eye(2), [0.5, 2])
    # np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
    np.fmin([2, 3, 4], [1, 5, 2])
    np.fmin(np.eye(2), [0.5, 2])
    # np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
    np.modf([0, 3.5])
    np.modf(-0.5)
    np.multiply(2.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.multiply(x1, x2)
    np.negative([1., -1.])
    np.not_equal([1., 2.], [1., 3.])
    np.not_equal([1, 2], [[1, 3], [1, 4]])
    x1 = range(6)
    np.power(x1, 3)
    x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
    np.power(x1, x2)
    x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
    np.power(x1, x2)
    deg = np.arange(12.) * 30.
    np.radians(deg)
    out = np.zeros((deg.shape))
    ret = np.radians(deg, out)
    ret is out
    np.deg2rad(180)
    np.reciprocal(2.)
    np.reciprocal([1, 2., 3.33])
    np.remainder([4, 7], [2, 3])
    np.remainder(np.arange(7), 5)
    # np.binary_repr(10)
    np.right_shift(10, 1)
    # np.binary_repr(5)
    np.right_shift(10, [1, 2, 3])
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.rint(a)
    np.sign([-5., 4.5])
    np.sign(0)
    # np.sign(5-2j)
    # np.signbit(-1.2)
    np.signbit(np.array([1, -2.3, 2.1]))
    np.copysign(1.3, -1)
    np.copysign([-1, 0, 1], -1.1)
    np.copysign([-1, 0, 1], np.arange(3) - 1)
    np.sin(np.pi / 2.)
    np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.)
    x = np.linspace(-np.pi, np.pi, 201)
    np.sinh(0)
    # np.sinh(np.pi*1j/2)
    np.sqrt([1, 4, 9])
    np.sqrt([4, -1, -3 + 4J])
    np.cbrt([1, 8, 27])
    np.square([-1j, 1])
    np.subtract(1.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.subtract(x1, x2)
    np.tan(np.array([-pi, pi / 2, pi]))
    np.tanh((0, np.pi * 1j, np.pi * 1j / 2))
    x = np.arange(5)
    np.true_divide(x, 4)
    x = np.arange(9)
    y1, y2 = np.frexp(x)
    y1 * 2**y2
    np.ldexp(5, np.arange(4))
    x = np.arange(6)
    np.ldexp(*np.frexp(x))
Esempio n. 17
0
    def __init__(self,
                 hidden_dims,
                 input_dim=3 * 32 * 32,
                 num_classes=10,
                 dropout=0,
                 use_batchnorm=False,
                 reg=0.0,
                 weight_scale=1e-2,
                 seed=None,
                 dtype=py_np.float64,
                 conv_mode='lazy'):
        """
    Initialize a new FullyConnectedNet.
    
    Inputs:
    - hidden_dims: A list of integers giving the size of each hidden layer.
    - input_dim: An integer giving the size of the input.
    - num_classes: An integer giving the number of classes to classify.
    - dropout: Scalar between 0 and 1 giving dropout strength. If dropout=0 then
      the network should not use dropout at all.
    - use_batchnorm: Whether or not the network should use batch normalization.
    - reg: Scalar giving L2 regularization strength.
    - weight_scale: Scalar giving the standard deviation for random
      initialization of the weights.
    - seed: If not None, then pass this random seed to the dropout layers. This
      will make the dropout layers deteriminstic so we can gradient check the
      model.
    """
        super(FullyConnectedNet, self).__init__(conv_mode)
        self.use_batchnorm = use_batchnorm
        self.use_dropout = dropout > 0
        self.reg = reg
        self.num_layers = 1 + len(hidden_dims)
        self.params = {}

        #Define parameter name given # layer
        self.w_name = lambda l: 'W' + str(l)
        self.b_name = lambda l: 'b' + str(l)
        self.bn_ga_name = lambda l: 'bn_ga' + str(l)
        self.bn_bt_name = lambda l: 'bn_bt' + str(l)

        for l in range(self.num_layers):
            if l == 0:
                input_d = input_dim
            else:
                input_d = hidden_dims[l - 1]

            if l < self.num_layers - 1:
                out_d = hidden_dims[l]
            else:
                out_d = num_classes

            self.params[self.w_name(l)] = random.randn(input_d,
                                                       out_d) * weight_scale
            self.params[self.b_name(l)] = np.zeros((out_d))
            if l < self.num_layers and self.use_batchnorm:
                self.params[self.bn_ga_name(l)] = np.ones((out_d))
                self.params[self.bn_bt_name(l)] = np.zeros((out_d))

        self.param_keys = self.params.keys()

        # When using dropout we need to pass a dropout_param dictionary to each
        # dropout layer so that the layer knows the dropout probability and the mode
        # (train / test). You can pass the same dropout_param to each dropout layer.
        self.dropout_param = {}
        if self.use_dropout:
            self.dropout_param = {'mode': 'train', 'p': dropout}
            if seed is not None:
                self.dropout_param['seed'] = seed

        # With batch normalization we need to keep track of running means and
        # variances, so we need to pass a special bn_param object to each batch
        # normalization layer.
        self.bn_params = []
        if self.use_batchnorm:
            self.bn_params = [{
                'mode': 'train'
            } for i in xrange(self.num_layers - 1)]

        # Build key's index in loss func's arglist
        self.key_args_index = {}
        for i, key in enumerate(self.param_keys):
            # data, targets would be the first two elments in arglist
            self.key_args_index[key] = self.data_target_cnt + i

        # Init Key to index in loss_function args
        self.w_idx = self.wrap_param_idx(self.w_name)
        self.b_idx = self.wrap_param_idx(self.b_name)
        self.bn_ga_idx = self.wrap_param_idx(self.bn_ga_name)
        self.bn_bt_idx = self.wrap_param_idx(self.bn_bt_name)
Esempio n. 18
0
            molecularTrajectory.positionZ[-1, :]
        ]

        #reaction simulation
        if k_Matrix[0][1] > 0:
            reactionTrajectory = reaction_3state(dt, totalTime, moleculeNum,
                                                 initState)
            reactionTrajectory.react(k_Matrix, QA, QB, QC)
            initState = reactionTrajectory.state[-1, :]
            fluoreDonor = fluorescence_wzq(dt, Qfluor)
            fluoreDonor.collectPhoton(molecularTrajectory.positionX,
                                      molecularTrajectory.positionY,
                                      molecularTrajectory.positionZ,
                                      reactionTrajectory.state)
        else:
            state = np.ones([totalTime / dt, moleculeNum])
            fluoreDonor = fluorescence_wzq(dt)
            fluoreDonor.collectPhoton(molecularTrajectory.positionX,
                                      molecularTrajectory.positionY,
                                      molecularTrajectory.positionZ, state)

        #collection fluorescence
        #Donor channel
        ##Acceptor channel
        #fluoreAcceptor = fluorescence(dt, Qfluor = Qacceptor)
        #fluoreAcceptor.collectPhoton(molecularTrajectory.positionX, molecularTrajectory.positionY, molecularTrajectory.positionZ)

        with open(path + '/donor_' + str(fileNum) + '.txt', 'a') as f:
            np.savetxt(f, fluoreDonor.trace[:-10000], fmt='%.3f')

        with open(path + '/donor_' + str(fileNum) + 'nr.txt', 'a') as f: