コード例 #1
0
def train(net_shapes, net_params, optimizer, utility, pool):
    # pass seed instead whole noise matrix to parallel will save your time
    noise_seed = np.random.randint(
        0, 2**32 - 1, size=N_KID,
        dtype=np.uint32)  #.repeat(2)    # mirrored sampling
    noise_seed = np.concatenate([noise_seed, noise_seed
                                 ]).reshape([2, N_KID]).T.reshape([N_KID * 2])

    # serially train with GPU
    rewards = []
    for k_id in range(N_KID * 2):
        reward = get_reward(net_shapes, net_params, env, CONFIG['ep_max_step'],
                            CONFIG['continuous_a'], [noise_seed[k_id], k_id])
    rewards = np.array(rewards)
    kids_rank = np.argsort(rewards)[::-1]  # rank kid id by reward

    cumulative_update = np.zeros_like(net_params)  # initialize update values
    for ui, k_id in enumerate(kids_rank):
        np.random.seed(noise_seed[k_id])  # reconstruct noise using seed
        cumulative_update += utility[ui] * sign(k_id) * np.random.randn(
            net_params.size)

    gradients = optimizer.get_gradients(cumulative_update /
                                        (2 * N_KID * SIGMA))
    return net_params + gradients, rewards
コード例 #2
0
def studentSize(teacherSize,
                studentScaling=STUDENT_SCALING,
                scaleLayers=False):
    # creates the arrays of sizes for students from size  of teacher network scaled
    # number of layers for each student is multiplied by scaling
    if scaleLayers:
        layers = [int(x * teacherSize.shape) for x in studentScaling]
    else:
        layers = [int(1 * teacherSize.size) for x in studentScaling]
    sizes = []
    print(layers)
    # for each student
    for i in range(len(layers)):
        # if the scaling is one return the same size as teacher
        if studentScaling[i] == 1:
            sizes.append(teacherSize)
        else:
            # scale max number of units added to each layer
            maxU = int(MAX_UNITS * studentScaling[i])
            # add random number of units to the layers in teacher
            newSize = teacherSize + np.random.randint(
                1, maxU, size=teacherSize.shape)
            print(newSize)
            if scaleLayers:
                # add layers with random number of units to the expanded layers of teacher
                newSize = np.concatenate((newSize,np.random.randint(MIN_UNITS,maxU,\
                size=(layers[i]-teacherSize.shape[0]))))
            sizes.append(newSize)
            # change number inputs and outputs to same as teacher
            sizes[i][0] = teacherSize[0]
            sizes[i][-1] = teacherSize[-1]
    return sizes
コード例 #3
0
    def concatenateMatrixInList(self, matrixList, dim, axis):
        if axis == 0:
            E = np.empty([0, dim])
        elif axis == 1:
            E = np.empty([dim, 0])
        for i in range(len(matrixList)):
            E = np.concatenate((E, matrixList[i]), axis=axis)

        return E
コード例 #4
0
def build_net():
    def linear(n_in, n_out):  # network linear layer
        w = np.random.randn(n_in * n_out) * .1
        b = np.random.randn(n_out) * .1
        return (n_in, n_out), np.concatenate([w, b])

    s0, p0 = linear(CONFIG['n_feature'], 30)
    s1, p1 = linear(30, 20)
    s2, p2 = linear(20, CONFIG['n_action'])
    return [s0, s1, s2], np.concatenate([p0, p1, p2])
コード例 #5
0
ファイル: net-minpy.py プロジェクト: nikuya3/neural-net-image
def get_training_data():
    """
    Reads input data from the 'data' directory.
    :return: A matrix of form 50000 x 3072
    """
    x_data = None
    y_data = []
    for nr in range(1, 6):
        batch = unpickle('data/data_batch_' + str(nr))
        if x_data is None:
            x_data = np.asarray(batch[b'data'])
            y_data = batch[b'labels']
        else:
            x_data = np.concatenate(
                [np.asarray(x_data),
                 np.asarray(batch[b'data'])], axis=0)
            y_data += batch[b'labels']
    return x_data, y_data
コード例 #6
0
 def linear(n_in, n_out):  # network linear layer
     w = np.random.randn(n_in * n_out) * .1
     b = np.random.randn(n_out) * .1
     return (n_in, n_out), np.concatenate([w, b])
コード例 #7
0
ファイル: deparser.py プロジェクト: wddabc/minpy
    def parse(self, tokens, oracle_actions=None):
        def _valid_actions(stack, buffer):
            valid_actions = []
            if len(buffer) > 0:
                valid_actions += [SHIFT]
            if len(stack) >= 2:
                valid_actions += [REDUCE_L, REDUCE_R]
            return valid_actions

        if oracle_actions: oracle_actions = list(oracle_actions)
        buffer = StackRNN(self.buffRNN, self.params['empty_buffer_emb'])
        stack = StackRNN(self.stackRNN)

        # Put the parameters in the cg
        W_comp = self.params['pW_comp']  # syntactic composition
        b_comp = self.params['pb_comp']
        W_s2h = self.params['pW_s2h']  # state to hidden
        b_s2h = self.params['pb_s2h']
        W_act = self.params['pW_act']  # hidden to action
        b_act = self.params['pb_act']
        emb = self.params['wemb']

        # We will keep track of all the losses we accumulate during parsing.
        # If some decision is unambiguous because it's the only thing valid given
        # the parser state, we will not model it. We only model what is ambiguous.
        loss = 0.

        # push the tokens onto the buffer (tokens is in reverse order)
        for tok in tokens:
            # TODO: I remember numpy ndarray supports python built-in list indexing
            tok_embedding = emb[np.array([tok])]
            buffer.push(tok_embedding, (tok_embedding, self.vocab.i2w[tok]))
        while not (len(stack) == 1 and len(buffer) == 0):
            # compute probability of each of the actions and choose an action
            # either from the oracle or if there is no oracle, based on the model
            valid_actions = _valid_actions(stack, buffer)
            log_probs = None
            action = valid_actions[0]
            if len(valid_actions) > 1:
                p_t = np.transpose(
                    np.concatenate([buffer.top(), stack.top()], axis=1))
                h = np.tanh(np.dot(W_s2h, p_t) + b_s2h)
                logits = np.dot(W_act, h) + b_act
                log_probs = logsoftmax(logits, valid_actions)
                if oracle_actions is None:
                    # Temporary work around by manually back-off to numpy https://github.com/dmlc/minpy/issues/15
                    action = numpy.argmax(map(lambda x: x[0], list(log_probs)))
            if oracle_actions is not None:
                action = oracle_actions.pop()
            if log_probs is not None:
                # append the action-specific loss
                # print action, log_probs[action], map(lambda x: x[0], list(log_probs))
                loss += log_probs[action]

            # execute the action to update the parser state
            if action == SHIFT:
                tok_embedding, token = buffer.pop()
                stack.push(tok_embedding, (tok_embedding, token))
            else:  # one of the REDUCE actions
                right = stack.pop()  # pop a stack state
                left = stack.pop()  # pop another stack state
                # figure out which is the head and which is the modifier
                head, modifier = (left,
                                  right) if action == REDUCE_R else (right,
                                                                     left)

                # compute composed representation
                head_rep, head_tok = head
                mod_rep, mod_tok = modifier
                composed_rep = np.tanh(
                    np.dot(
                        W_comp,
                        np.transpose(
                            np.concatenate([head_rep, mod_rep], axis=1))) +
                    b_comp)
                composed_rep = np.transpose(composed_rep)
                stack.push(composed_rep, (composed_rep, head_tok))
                if oracle_actions is None:
                    print('{0} --> {1}'.format(head_tok, mod_tok))

        # the head of the tree that remains at the top of the stack is the root
        if oracle_actions is None:
            head = stack.pop()[1]
            print('ROOT --> {0}'.format(head))
        return -loss
コード例 #8
0
ファイル: lstm.py プロジェクト: pombredanne/minpy
def activations(weights, *args):
    cat_state = np.concatenate(args + (np.ones((args[0].shape[0],1)),), axis=1)
    return np.dot(cat_state, weights)
コード例 #9
0
def activations(weights, *args):
    cat_state = np.concatenate(args + (np.ones((args[0].shape[0], 1)), ),
                               axis=1)
    return np.dot(cat_state, weights)
コード例 #10
0
def test_concatenate():
    arr = [rnd.randn(3, 4) for _ in range(10)]
    res = np.concatenate(arr, axis=1)
    assert res.shape == (3, 40)