コード例 #1
0
def test_candidate_scorer():
    pytest.importorskip("mxnet")
    from mxnet import np
    import sockeye.beam_search

    scorer = sockeye.beam_search.CandidateScorer(length_penalty_alpha=1.0,
                                                 length_penalty_beta=0.0,
                                                 brevity_penalty_weight=0.1)
    scorer.initialize()
    scorer.hybridize(static_alloc=True)

    # np.array input
    raw_scores = np.random.uniform(0, 1, (5, ))
    lengths = np.array([1, 2, 3, 4, 5])
    reference_lengths = np.array([2, 3, 4, 5, 6])

    scores = scorer(raw_scores, lengths, reference_lengths)
    unnormalized_scores = scorer.unnormalize(scores, lengths,
                                             reference_lengths)
    assert np.allclose(unnormalized_scores, raw_scores)

    # int/float input
    raw_scores = 5.6
    lengths = 3
    reference_lengths = 4

    scores = scorer(raw_scores, lengths, reference_lengths)
    unnormalized_scores = scorer.unnormalize(scores, lengths,
                                             reference_lengths)
    assert np.allclose(unnormalized_scores, raw_scores)
コード例 #2
0
def predict_seq2seq(net, src_sentence, src_vocab, tgt_vocab, num_steps,
                    device, save_attention_weights=False):
    """Predict for sequence to sequence."""
    src_tokens = src_vocab[src_sentence.lower().split(' ')] + [
        src_vocab['<eos>']]
    enc_valid_len = np.array([len(src_tokens)], ctx=device)
    src_tokens = d2l.truncate_pad(src_tokens, num_steps, src_vocab['<pad>'])
    # Add the batch axis
    enc_X = np.expand_dims(np.array(src_tokens, ctx=device), axis=0)
    enc_outputs = net.encoder(enc_X, enc_valid_len)
    dec_state = net.decoder.init_state(enc_outputs, enc_valid_len)
    # Add the batch axis
    dec_X = np.expand_dims(np.array([tgt_vocab['<bos>']], ctx=device), axis=0)
    output_seq, attention_weight_seq = [], []
    for _ in range(num_steps):
        Y, dec_state = net.decoder(dec_X, dec_state)
        # We use the token with the highest prediction likelihood as the input
        # of the decoder at the next time step
        dec_X = Y.argmax(axis=2)
        pred = dec_X.squeeze(axis=0).astype('int32').item()
        # Save attention weights (to be covered later)
        if save_attention_weights:
            attention_weight_seq.append(net.decoder.attention_weights)
        # Once the end-of-sequence token is predicted, the generation of the
        # output sequence is complete
        if pred == tgt_vocab['<eos>']:
            break
        output_seq.append(pred)
    return ' '.join(tgt_vocab.to_tokens(output_seq)), attention_weight_seq
コード例 #3
0
def _add_workload_roll():
    # test_roll1d(self)
    OpArgMngr.add_workload('roll', np.array(_np.arange(10)), 2)

    # test_roll2d(self)
    x2 = np.array(_np.reshape(_np.arange(10), (2, 5)))
    OpArgMngr.add_workload('roll', x2, 1)
    OpArgMngr.add_workload('roll', x2, 1, axis=0)
    OpArgMngr.add_workload('roll', x2, 1, axis=1)
    # # Roll multiple axes at once.
    OpArgMngr.add_workload('roll', x2, 1, axis=(0, 1))
    OpArgMngr.add_workload('roll', x2, (1, 0), axis=(0, 1))
    OpArgMngr.add_workload('roll', x2, (-1, 0), axis=(0, 1))
    OpArgMngr.add_workload('roll', x2, (0, 1), axis=(0, 1))
    OpArgMngr.add_workload('roll', x2, (0, -1), axis=(0, 1))
    OpArgMngr.add_workload('roll', x2, (1, 1), axis=(0, 1))
    OpArgMngr.add_workload('roll', x2, (-1, -1), axis=(0, 1))
    # # Roll the same axis multiple times.
    # OpArgMngr.add_workload('roll', x2, 1, axis=(0, 0)) # Check failed: axes[i - 1] < axes[i] (0 vs. 0) : axes have duplicates [0,0]
    # OpArgMngr.add_workload('roll', x2, 1, axis=(1, 1)) # Check failed: axes[i - 1] < axes[i] (1 vs. 1) : axes have duplicates [1,1]
    # # Roll more than one turn in either direction.
    OpArgMngr.add_workload('roll', x2, 6, axis=1)
    OpArgMngr.add_workload('roll', x2, -4, axis=1)
    # # test_roll_empty
    OpArgMngr.add_workload('roll', np.array([]), 1)
コード例 #4
0
def _get_random_bucketed_data(buckets: List[Tuple[int, int]],
                              min_count: int,
                              max_count: int,
                              bucket_counts: Optional[List[Optional[int]]] = None):
    """
    Get random bucket data.

    :param buckets: The list of buckets.
    :param min_count: The minimum number of samples that will be sampled if no exact count is given.
    :param max_count: The maximum number of samples that will be sampled if no exact count is given.
    :param bucket_counts: For each bucket an optional exact example count can be given. If it is not given it will be
                         sampled.
    :return: The random source, target and label arrays.
    """
    pytest.importorskip('mxnet')
    from mxnet import np
    if bucket_counts is None:
        bucket_counts = [None for _ in buckets]
    bucket_counts = [random.randint(min_count, max_count) if given_count is None else given_count
                     for given_count in bucket_counts]
    source = [np.array(np.random.randint(0, 10, (count, random.randint(1, bucket[0]), 1))) for count, bucket in
              zip(bucket_counts, buckets)]
    target = [np.array(np.random.randint(0, 10, (count, random.randint(2, bucket[1]), 1))) for count, bucket in
              zip(bucket_counts, buckets)]
    return source, target
コード例 #5
0
def evaluate_ranking(net, test_input, seq, candidates, num_users, num_items,
                     devices):
    ranked_list, ranked_items, hit_rate, auc = {}, {}, [], []
    all_items = set([i for i in range(num_users)])
    for u in range(num_users):
        neg_items = list(all_items - set(candidates[int(u)]))
        user_ids, item_ids, x, scores = [], [], [], []
        [item_ids.append(i) for i in neg_items]
        [user_ids.append(u) for _ in neg_items]
        x.extend([np.array(user_ids)])
        if seq is not None:
            x.append(seq[user_ids, :])
        x.extend([np.array(item_ids)])
        test_data_iter = gluon.data.DataLoader(gluon.data.ArrayDataset(*x),
                                               shuffle=False,
                                               last_batch="keep",
                                               batch_size=1024)
        for index, values in enumerate(test_data_iter):
            x = [
                gluon.utils.split_and_load(v, devices, even_split=False)
                for v in values
            ]
            scores.extend([list(net(*t).asnumpy()) for t in zip(*x)])
        scores = [item for sublist in scores for item in sublist]
        item_scores = list(zip(item_ids, scores))
        ranked_list[u] = sorted(item_scores, key=lambda t: t[1], reverse=True)
        ranked_items[u] = [r[0] for r in ranked_list[u]]
        temp = hit_and_auc(ranked_items[u], test_input[u], 50)
        hit_rate.append(temp[0])
        auc.append(temp[1])
    return np.mean(np.array(hit_rate)), np.mean(np.array(auc))
コード例 #6
0
 def test_boolean_indexing_onedim():
     # adapted from numpy's test_indexing.py
     # Indexing a 2-dimensional array with
     # boolean array of length one
     a = np.array([[0.,  0.,  0.]])
     b = np.array([True], dtype=bool)
     assert same(a[b].asnumpy(), a.asnumpy())
コード例 #7
0
def predict_s2s_ch9(model, src_sentence, src_vocab, tgt_vocab, num_steps,
                    device):
    """Predict sequences (defined in Chapter 9)."""
    #src_tokens = src_vocab[src_sentence.lower().split(' ')] + [src_vocab['<eos>']]
    src_tokens = src_vocab[get_word_list(
        src_sentence.lower())] + [src_vocab['<eos>']]
    enc_valid_len = np.array([len(src_tokens)], ctx=device)
    src_tokens = truncate_pad(src_tokens, num_steps, src_vocab['<pad>'])
    # Add the batch axis
    enc_X = np.expand_dims(np.array(src_tokens, ctx=device), axis=0)
    enc_outputs = model.encoder(enc_X, enc_valid_len)
    dec_state = model.decoder.init_state(enc_outputs, enc_valid_len)
    # Add the batch axis
    dec_X = np.expand_dims(np.array([tgt_vocab['<bos>']], ctx=device), axis=0)
    output_seq = []
    for _ in range(num_steps):
        Y, dec_state = model.decoder(dec_X, dec_state)
        # We use the token with the highest prediction likelihood as the input
        # of the decoder at the next time step
        dec_X = Y.argmax(axis=2)
        pred = dec_X.squeeze(axis=0).astype('int32').item()
        # Once the end-of-sequence token is predicted, the generation of
        # the output sequence is complete
        if pred == tgt_vocab['<eos>']:
            break
        output_seq.append(pred)
    return ' '.join(tgt_vocab.to_tokens(output_seq))
コード例 #8
0
def test_rotate():
    transformer = transforms.Rotate(10.)
    assertRaises(TypeError, transformer, mx.np.ones((3, 30, 60),
                                                    dtype='uint8'))
    single_image = mx.np.ones((3, 30, 60), dtype='float32')
    single_output = transformer(single_image)
    assert same(single_output.shape, (3, 30, 60))
    batch_image = mx.np.ones((3, 3, 30, 60), dtype='float32')
    batch_output = transformer(batch_image)
    assert same(batch_output.shape, (3, 3, 30, 60))

    input_image = np.array([[[0., 0., 0.], [0., 0., 1.], [0., 0., 0.]]])
    rotation_angles_expected_outs = [
        (90., np.array([[[0., 1., 0.], [0., 0., 0.], [0., 0., 0.]]])),
        (180., np.array([[[0., 0., 0.], [1., 0., 0.], [0., 0., 0.]]])),
        (270., np.array([[[0., 0., 0.], [0., 0., 0.], [0., 1., 0.]]])),
        (360., np.array([[[0., 0., 0.], [0., 0., 1.], [0., 0., 0.]]])),
    ]
    for rot_angle, expected_result in rotation_angles_expected_outs:
        transformer = transforms.Rotate(rot_angle)
        ans = transformer(input_image)
        print(type(ans), ans, type(expected_result), expected_result)
        assert_almost_equal(ans.asnumpy(),
                            expected_result.asnumpy(),
                            atol=1e-6)
コード例 #9
0
def predict_s2s_ch9(model, src_sentence, src_vocab, tgt_vocab, num_steps, ctx):
    # ORG src_tokens = src_vocab[src_sentence.lower().split(' ')] # TODO : if vocab doesn't contain, instead of 0, assign other index
    src_tokens = src_vocab[src_sentence.lower().split(',')]
    num_steps = len(src_tokens) # Fix
    enc_valid_len = np.array([len(src_tokens)], ctx=ctx)
    src_tokens = d2l.truncate_pad(src_tokens, num_steps, src_vocab['<pad>'])
    enc_X = np.array(src_tokens, ctx=ctx)
    # Add the batch_size dimension
    enc_outputs = model.encoder(np.expand_dims(enc_X, axis=0), enc_valid_len)
    dec_state = model.decoder.init_state(enc_outputs, enc_valid_len)
    dec_X = np.expand_dims(np.array([tgt_vocab['<bos>']], ctx=ctx), axis=0)
    predict_tokens = []
    for _ in range(num_steps):
        Y, dec_state = model.decoder(dec_X, dec_state)
        # The token with highest score is used as the next timestep input
        dec_X = Y.argmax(axis=2)
        py = dec_X.squeeze(axis=0).astype('int32').item()
        #print("debug : ", py)
        if py == tgt_vocab['<eos>']:
            print('py : ', py)  #ToDo
            break
        elif py == tgt_vocab['<unk>']:
            print('py : ', py)  #ToDo
            break
        predict_tokens.append(py)
    return ' '.join(tgt_vocab.to_tokens(predict_tokens))
コード例 #10
0
def test_to_tensor():
    # 3D Input
    data_in = np.random.uniform(0, 255, (300, 300, 3)).astype(dtype=np.uint8)
    out_nd = transforms.ToTensor()(np.array(data_in, dtype='uint8'))
    assert_almost_equal(
        out_nd.asnumpy(),
        np.transpose(data_in.astype(dtype=np.float32) / 255.0, (2, 0, 1)))

    # 4D Input
    data_in = np.random.uniform(0, 255,
                                (5, 300, 300, 3)).astype(dtype=np.uint8)
    out_nd = transforms.ToTensor()(np.array(data_in, dtype='uint8'))
    assert_almost_equal(
        out_nd.asnumpy(),
        np.transpose(data_in.astype(dtype=np.float32) / 255.0, (0, 3, 1, 2)))

    # Invalid Input
    invalid_data_in = np.random.uniform(
        0, 255, (5, 5, 300, 300, 3)).astype(dtype=np.uint8)
    transformer = transforms.ToTensor()
    assertRaises(MXNetError, transformer, invalid_data_in)

    # Bounds (0->0, 255->1)
    data_in = np.zeros((10, 20, 3)).astype(dtype=np.uint8)
    out_nd = transforms.ToTensor()(np.array(data_in, dtype='uint8'))
    assert same(
        out_nd.asnumpy(),
        np.transpose(np.zeros(data_in.shape, dtype=np.float32), (2, 0, 1)))

    data_in = np.full((10, 20, 3), 255).astype(dtype=np.uint8)
    out_nd = transforms.ToTensor()(np.array(data_in, dtype='uint8'))
    assert same(
        out_nd.asnumpy(),
        np.transpose(np.ones(data_in.shape, dtype=np.float32), (2, 0, 1)))
コード例 #11
0
ファイル: data_preprocessing.py プロジェクト: TomasBahnik/ml
def read_csv():
    data = pd.read_csv(data_file)
    inputs, outputs = data.iloc[:, 0:2], data.iloc[:, 2]
    inputs = inputs.fillna(inputs.mean())
    inputs = pd.get_dummies(inputs, dummy_na=True)
    X, y = np.array(inputs.values), np.array(outputs.values)
    print("examples '{}/{}', labels '{}/{}'".format(X, type(X), y, type(y)))
コード例 #12
0
def _get_vocab_slice_ids(restrict_lexicon: Optional[lexicon.TopKLexicon],
                         source_words: np.ndarray,
                         raw_constraint_list: List[Optional[constrained.RawConstraintList]],
                         eos_id: int,
                         beam_size: int) -> Tuple[np.ndarray, int, List[Optional[constrained.RawConstraintList]]]:
    vocab_slice_ids = np.array(restrict_lexicon.get_trg_ids(source_words.astype("int32", copy=False).asnumpy()), dtype='int32')
    ctx = source_words.ctx
    if any(raw_constraint_list):
        # Add the constraint IDs to the list of permissibled IDs, and then project them into the reduced space
        constraint_ids = np.array(word_id for sent in raw_constraint_list for phr in sent for word_id in phr)
        vocab_slice_ids = onp.lib.arraysetops.union1d(vocab_slice_ids, constraint_ids)  # type: ignore
        full_to_reduced = dict((val, i) for i, val in enumerate(vocab_slice_ids))
        raw_constraint_list = [[[full_to_reduced[x] for x in phr] for phr in sent] for sent in
                               raw_constraint_list]
    # pad to a multiple of 8.
    vocab_slice_ids = np.pad(vocab_slice_ids, (0, 7 - ((len(vocab_slice_ids) - 1) % 8)),
                             mode='constant', constant_values=eos_id)

    vocab_slice_ids_shape = vocab_slice_ids.shape[0]
    if vocab_slice_ids_shape < beam_size + 1:
        # This fixes an edge case for toy models, where the number of vocab ids from the lexicon is
        # smaller than the beam size.
        logger.warning("Padding vocab_slice_ids (%d) with EOS to have at least %d+1 elements to expand",
                       vocab_slice_ids_shape, beam_size)
        n = beam_size - vocab_slice_ids_shape + 1
        vocab_slice_ids = np.concatenate((vocab_slice_ids, np.full((n,), fill_value=eos_id, ctx=ctx, dtype='int32')),
                                         axis=0)

    logger.debug(f'decoder softmax size: {vocab_slice_ids_shape}')
    return vocab_slice_ids, vocab_slice_ids_shape, raw_constraint_list
コード例 #13
0
def predict_snli(net, vocab, premise, hypothesis):
    premise = np.array(vocab[premise], ctx=d2l.try_gpu())
    hypothesis = np.array(vocab[hypothesis], ctx=d2l.try_gpu())
    label = np.argmax(net([premise.reshape((1, -1)),
                           hypothesis.reshape((1, -1))]), axis=1)
    return 'entailment' if label == 0 else 'contradiction' if label == 1 \
            else 'neutral'
コード例 #14
0
def test_acc(net,
             test_feats,
             test_thrpts,
             train_thrpt_avg,
             train_thrpt_std,
             print_log=True):
    """Test the model accuracy."""
    valid_error = 0
    thrpt_errors = []

    if print_log:
        log.info(
            '\nExpected\tValid-Pred0\tValid-Pred1\tThrpt-Pred\tThrpt-Error')

    thrpt_pred_range = (float('inf'), 0)

    for start in range(0, len(test_feats), 512):
        valid_preds, thrpt_preds = net(
            test_feats[start:min(start + 512, len(test_feats))])

        # Recover prediction results.
        thrpt_preds = (thrpt_preds * train_thrpt_std) + train_thrpt_avg
        #thrpt_preds = np.exp(thrpt_preds)

        for idx, (valid_pred,
                  thrpt_pred) in enumerate(zip(valid_preds, thrpt_preds)):
            real = test_thrpts[start + idx].tolist()
            valid_real = 1 if real > INVALID_THD else 0
            valid_prob = valid_pred.tolist()
            valid_pred = 1 if valid_prob[0] <= valid_prob[1] else 0
            valid_error += 1 if valid_real != valid_pred else 0

            thrpt_pred = thrpt_pred.tolist()[0]
            if valid_real == 1:
                if valid_pred == 0:  # Predict invalid will output 0 GFLOPs.
                    error = 1
                else:
                    error = abs(thrpt_pred - real) / real
                    thrpt_pred_range = (min(thrpt_pred_range[0], thrpt_pred),
                                        max(thrpt_pred_range[1], thrpt_pred))

                thrpt_errors.append(error)
                if print_log:
                    log.info('\t%.6f\t%.2f\t%.2f\t%.6f\t%.2f%%', real,
                             valid_prob[0], valid_prob[1], thrpt_pred,
                             100 * error)
            elif print_log:
                log.info('\t0\t%.2f\t%.2f\t%.6f', valid_prob[0], valid_prob[1],
                         thrpt_pred)

    valid_err_rate = 100.0 * valid_error / len(test_feats)
    thrpt_err = (np.array(thrpt_errors).mean().tolist(),
                 np.array(thrpt_errors).std().tolist())
    log.info('Thrpt predict range: %.6f, %.6f', thrpt_pred_range[0],
             thrpt_pred_range[1])
    if print_log:
        log.info('Valid error %.2f%%', valid_err_rate)
        log.info('Average error: %.2f (std %.2f)', thrpt_err[0], thrpt_err[1])
    return (valid_err_rate, thrpt_err)
コード例 #15
0
def _add_workload_power(array_pool):
    OpArgMngr.add_workload('power', array_pool['4x1'], array_pool['1x2'])
    OpArgMngr.add_workload('power', array_pool['4x1'], 2)
    OpArgMngr.add_workload('power', 2, array_pool['4x1'])
    OpArgMngr.add_workload('power', array_pool['4x1'], array_pool['1x1x0'])
    OpArgMngr.add_workload('power', np.array([1, 2, 3], np.int32), 2.00001)
    OpArgMngr.add_workload('power', np.array([15, 15], np.int64), np.array([15, 15], np.int64))
    OpArgMngr.add_workload('power', 0, np.arange(1, 10))
コード例 #16
0
def get_bert_encoding(net, tokens_a, tokens_b=None):
    tokens, segments = d2l.get_tokens_and_segments(tokens_a, tokens_b)
    ctx = d2l.try_gpu()
    token_ids = np.expand_dims(np.array(vocab[tokens], ctx=ctx), axis=0)
    segments = np.expand_dims(np.array(segments, ctx=ctx), axis=0)
    valid_len = np.expand_dims(np.array(len(tokens), ctx=ctx), axis=0)
    encoded_X, _, _ = net(token_ids, segments, valid_len)
    return encoded_X
コード例 #17
0
 def test_single_bool_index():
     # adapted from numpy's test_indexing.py
     # Single boolean index
     a = np.array([[1, 2, 3],
                   [4, 5, 6],
                   [7, 8, 9]], dtype=np.int32)
     assert same(a[np.array(True, dtype=np.bool_)].asnumpy(), a[None].asnumpy())
     assert same(a[np.array(False, dtype=np.bool_)].asnumpy(), a[None][0:0].asnumpy())
コード例 #18
0
def _add_workload_stack(array_pool):
    OpArgMngr.add_workload('stack', [array_pool['4x1']] * 2)
    OpArgMngr.add_workload('stack', [array_pool['4x1']] * 2, 1)
    OpArgMngr.add_workload('stack', [array_pool['4x1']] * 2, -1)
    OpArgMngr.add_workload('stack', [array_pool['4x1']] * 2, -2)
    OpArgMngr.add_workload('stack', np.random.normal(size=(2, 4, 3)), 2)
    OpArgMngr.add_workload('stack', np.random.normal(size=(2, 4, 3)), -3)
    OpArgMngr.add_workload('stack', np.array([[], [], []]), 1)
    OpArgMngr.add_workload('stack', np.array([[], [], []]))
コード例 #19
0
def test_ndarray_container():
    x = np.array([1, 2, 3])
    y = np.array([4, 5, 6])
    arr = mxnet._ffi.convert_to_node([x, y])
    assert _np.array_equal(arr[0].asnumpy(), x.asnumpy())
    assert isinstance(arr[0], NDArray)
    amap = mxnet._ffi.convert_to_node({'x': x, 'y': y})
    assert "x" in amap
    assert _np.array_equal(amap["y"].asnumpy(), y.asnumpy())
    assert isinstance(amap["y"], NDArray)
コード例 #20
0
 def test_boolean_indexing_twodim():
     # adapted from numpy's test_indexing.py
     # Indexing a 2-dimensional array with
     # 2-dimensional boolean array
     a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
     b = np.array(
         [[True, False, True], [False, True, False], [True, False, True]],
         dtype=np.bool_)
     assert same(a[b].asnumpy(), _np.array([1, 3, 5, 7, 9], dtype=a.dtype))
     assert same(a[b[1]].asnumpy(), _np.array([[4, 5, 6]], dtype=a.dtype))
     assert same(a[b[0]].asnumpy(), a[b[2]].asnumpy())
コード例 #21
0
def vectors():
    # scalars are also ndarrays
    x = np.array(3.0)
    y = np.array(2.0)
    print("shape of scalar = {}".format(x.shape))
    print("size of scalar = {}".format(x.size))
    # vector
    xv = np.arange(4)
    print("shape of vector {} = {}".format(xv, xv.shape))
    print("size of vector = {}".format(xv.size))
    print(x + y, x * y, x / y, x ** y)
コード例 #22
0
ファイル: dataloader.py プロジェクト: azton/MLAIprojects
    def __getitem__(self, idx):
        file = h5py.File(self.data[idx], 'r')
        vol = torch.zeros((len(self.fields), 128,128,128))
        ## assign requested fields to data category
        for i, field in enumerate(self.fields):
            if 'velocity' not in field:
                shp = file[field][:].shape
                vol[i] = F.pad(torch.from_numpy(file[field][:]), \
                            (0, 128-shp[2], 0, 128-shp[1], 0, 128-shp[0]))
                vol[i] = (vol[i]-self.scaling[field]['mean']) \
                                /self.scaling[field]['std']
            if "velocity_divergence" in field:
                div = torch.zeros_like(vol[0])
                for ii,ax in enumerate('xyz'):
                    fld = 'velocity_%s'%ax
                    shp = file[fld].shape
                    vel_field = F.pad(torch.from_numpy(file[fld][:]), \
                            (0, 128-shp[2], 0, 128-shp[1], 0, 128-shp[0]))
                                    
                    div += numpy.gradient(\
                                    (vel_field-self.scaling[fld]['mean']\
                                        /self.scaling[fld]['std']))[ii]
                vol[i] = div

        le = file.attrs['cover_grid_left_edge']
        dx = file.attrs['dx']
        star_inds = []
        for c in file['new_stars_position'][:]:
            rel_pos = c - le
            star_inds.append((rel_pos)//dx+1)
        star_inds = numpy.array(star_inds)
        ### return matrix of pixel labels
        label = torch.zeros((128,128,128))

        offset = range(-self.star_blur, (self.star_blur)+1)

        ## set all cells hosting stars (and those within radius star_blur) to 1
        ## for segmentations
        for s in star_inds:
            for dx in offset:
                for dy in offset:
                    for dz in offset:
                        ind = (s + [dx, dy, dz]).astype(int)
                        if not numpy.any(ind >= 128):
                            label[ind[0],ind[1],ind[2]] = 1
        if self.transforms != None:
            vol, label, le = self.transform(vol, label, dx, le)

        ## if cropping, have to check the new volumee for stars
        if self.classifier == 'classification':
            label = (label.sum() >= 1).long()
        ## apply transforms to data
        file.close()
        return np.array(vol.numpy()), np.array(label.long().numpy())
コード例 #23
0
def test_np_meshgrid():
    nx, ny = (4, 5)
    x = np.array(_np.linspace(0, 1, nx), dtype=np.float32)
    y = np.array(_np.linspace(0, 1, ny), dtype=np.float32)
    z = np.ones(())
    xv, yv, zv = np.meshgrid(x, y, z)
    xv_expected, yv_expected, zv_expected = _np.meshgrid(
        x.asnumpy(), y.asnumpy(), z.asnumpy())
    assert same(xv.asnumpy(), xv_expected)
    assert same(yv.asnumpy(), yv_expected)
    assert same(zv.asnumpy(), zv_expected)
コード例 #24
0
def test_divide():
    # np.divide and np.true_divide are the same thing
    inp = np.ones((INT_OVERFLOW, 2))
    inp[-1, -1] = 10
    inp.attach_grad()
    with mx.autograd.record():
        out = np.divide(inp, np.array([2, 3]))
        out.backward()
    assert out.shape == inp.shape
    assert_almost_equal(out[-1, -1], np.array([10 / 3]), rtol=1e-5, atol=1e-5)
    assert inp.grad.shape == inp.shape
    assert_almost_equal(inp.grad[-1, -1], np.array([1.0 / 3]), rtol=1e-5, atol=1e-5)
コード例 #25
0
 def batch_check(funcs, exp):
     inp.attach_grad()
     for f, e in zip(funcs, exp):
         with mx.autograd.record():
             out = f(inp)
             out.backward()
         assert out.shape == inp.shape
         assert_almost_equal(out[-1, -1], np.array([e[0]]), \
             rtol=1e-5, atol=1e-5)
         assert inp.grad.shape == inp.shape
         assert_almost_equal(inp.grad[-1, -1], np.array([e[1]]), \
             rtol=1e-5, atol=1e-5)
コード例 #26
0
def test_expm1():
    inp = np.ones((2, INT_OVERFLOW))
    inp[-1, -1] = 2
    inp.attach_grad()
    with mx.autograd.record():
        out = np.expm1(inp)
        out.backward()
    assert out.shape == inp.shape
    assert_almost_equal(out[0, 0], np.array(np.e**1 - 1), rtol=1e-5, atol=1e-5)
    assert_almost_equal(out[-1, -1], np.array(np.e**2 - 1), rtol=1e-5, atol=1e-5)
    assert inp.grad.shape == inp.shape
    assert_almost_equal(inp.grad[-1, -1], np.array(np.e**2), rtol=1e-5, atol=1e-5)
コード例 #27
0
def test_reciprocal():
    inp = np.ones((2, INT_OVERFLOW))
    inp[-1, -1] = 3
    inp.attach_grad()
    with mx.autograd.record():
        out = np.reciprocal(inp)
        out.backward()
    assert out.shape == inp.shape
    assert_almost_equal(out[-1, -1], np.array([1.0/3]), rtol=1e-5, atol=1e-5)
    assert inp.grad.shape == inp.shape
    assert_almost_equal(inp.grad[-1, -1], np.array([-1.0/3**2]), \
        rtol=1e-5, atol=1e-5)
コード例 #28
0
def test_index_update():
    A = np.zeros((2, INT_OVERFLOW))
    ind = np.array([[0, 0], [0, 1]], dtype='int32')
    val = np.array([100, 200])
    A.attach_grad()
    with mx.autograd.record():
        B = npx.index_update(A, ind, val)
    assert B.shape == (2, INT_OVERFLOW)
    assert B[0][0] == 100 and B[0][1] == 200
    B.backward()
    assert A.grad.shape == (2, INT_OVERFLOW)
    assert A.grad[0][0] == 0
コード例 #29
0
def test_length_penalty_default():
    pytest.importorskip("mxnet")
    from mxnet import np
    import sockeye.beam_search

    lengths = np.array([[1], [2], [3]])
    length_penalty = sockeye.beam_search.LengthPenalty(1.0, 0.0)
    expected_lp = np.array([[1.0], [2.], [3.]])

    assert np.allclose(length_penalty(lengths), expected_lp)
    length_penalty.hybridize()
    assert np.allclose(length_penalty(lengths), expected_lp)
コード例 #30
0
 def test_setitem_autograd(np_array, index):
     x = np.array(np_array, dtype=np_array.dtype)
     out_shape = x[index].shape
     y = np.array(_np.random.uniform(size=out_shape))
     y.attach_grad()
     try:
         with autograd.record():
             x[index] = y
             assert False  # should not reach here
     except mx.base.MXNetError as err:
         assert str(err).find(
             'Inplace operations (+=, -=, x[:]=, etc) are not supported when recording with'
         ) != -1