コード例 #1
0
ファイル: problem23.py プロジェクト: tmr232/ProjectEuler
def main():
    print take(10, iter_abundant_numbers())
    # print len(list(takewhile(lambda n: n < MAX, iter_abundant_numbers())))
    table = {
        sum(x)
        for x in combinations_with_replacement(
            takewhile(lambda x: x <= MAX / 2, iter_abundant_numbers()), 2)
    }
    print "Got table"
    print sum(x for x in range(MAX) if x not in table)
コード例 #2
0
def normalize_to_single_token_edits(edit):
    def positions_it(region):
        return itertools.chain(
            (m2format.Region(pos, pos + 1)
             for pos in range(region.beg, region.end)),
            itertools.repeat(m2format.Region(region.end, region.end)),
        )

    def tokens_it(tokens):
        return itertools.chain(
            ((token, ) for token in tokens),
            itertools.repeat(tuple()),
        )

    region = edit.region
    tokens = edit.tokens

    if region.beg == region.end:
        yield edit
        return

    n = max(region.end - region.beg, len(tokens))
    for st_region, st_tokens in utils.take(
            n, zip(positions_it(region), tokens_it(tokens))):
        assert region.beg <= st_region.beg and st_region.end <= region.end
        yield m2format.Edit(st_region, st_tokens, edit.type)
コード例 #3
0
def first_child(elem, child_name):
    '''
    Get the first child element with a specific element name.
    '''
    first = take(1, elem.iter(child_name))
    if (not first) or (len(first) == 0):
        return None
    return first[0]
コード例 #4
0
ファイル: xmlutil.py プロジェクト: chaser3/Projects
def first_child(elem, child_name):
    '''
    Get the first child element with a specific element name.
    '''
    first = take(1, elem.iter(child_name))
    if (not first) or (len(first) == 0):
        return None
    return first[0]
コード例 #5
0
ファイル: exp2.py プロジェクト: milesrout/experiment1
def forall_left(premise, premises, goal, tg):
    ts = take(randnat(), guessterm(tg, premises, goal))
    if KEEP_USED_PREMISES:
        return [(ByForallLeft, [(premises | {premise.instantiate(t)}, goal)])
                for t in ts]
    else:
        return [(ByForallLeft,
                 [(premises - {premise} | {premise.instantiate(t)}, goal)])
                for t in ts]
コード例 #6
0
def test3():
    from chords import generateChords
    allNotes = utils.take(100, generateChords(noteRange= (3,3)))
    for notes in allNotes:
        print "="*30, notes
        chains = findBestChains(notes)
        for chain in chains:
            for c in chain:
                print c
            print
コード例 #7
0
ファイル: epimorics.py プロジェクト: bntre/py-harmony
def test3():
    from chords import generateChords
    allNotes = utils.take(100, generateChords(noteRange=(3, 4)))
    for notes in allNotes:
        print "=" * 20, notes
        chains = findBestChains(notes)
        for chain in chains:
            for chord in chain:
                #print chord
                print formatChord(chord)
            #print formatChain(chain)
            print
コード例 #8
0
def test_generate():
    from rationals import Rational
    chords = utils.take(1000, generateChords((3, 3)))
    chords = [(c, getHarmonicDistance(c)) for c in chords]
    chords.sort(key=lambda (c, d): d)

    for (i, (c, d)) in enumerate(chords[:50]):
        chord = map(Rational, c)
        for j in range(len(chord))[::-1]:
            chord[j] /= chord[0]  # normalize
        # print
        print "%03d." % (i + 1), c,
        for r in chord:
            print r.getFraction(0),
            #print r.getSonant(),
        print d
コード例 #9
0
def frames_generator_rnn(dataset_dir,
                         split_key,
                         batch_size,
                         additional_data=None):
    """
        Internally this method uses _sequential_infinite_iterator_rnn to iterate on the validation set.
        If the batch_size is not divisible by the number of videos, the last videos will not be
        returned, but they will be returned at the next iteration over the valid set, so we might
        end with statistics that will be slightly different if computed multiple times on the
        validation set returned by this generator.
    """

    base_dir = join(dataset_dir, split_key)

    dataset_structure = get_dataset_split_structure(base_dir)
    all_classes = dataset_structure.keys()

    d_iterator = _get_iterator(split_key)(base_dir, dataset_structure)

    tot_videos = next(count_num_videos(dataset_dir, split_key))
    num_calls = tot_videos // batch_size
    yield num_calls

    while True:

        for _ in range(num_calls):

            data = take(d_iterator, batch_size)
            class_names, video_names, videos, labels = map(
                np.array, zip(*data))
            labels = to_categorical(labels, len(all_classes))

            if additional_data is not None:

                additional_data_batch = np.array([
                    additional_data[cl][v]
                    for cl, v in zip(class_names, video_names)
                ])

                yield [additional_data_batch, videos], [labels]

            else:
                yield videos, labels
コード例 #10
0
def read_batch(idx, paths, batch_size=32, img_size=(48, 48)):
    '''
    Read batch tuple (X, y) on index idx from paths. X are grayscale images, y are target RGB images

    Args:
        idx (number): Index of batch to take
        paths (list): List of paths to read
        batch_size (number): Batch size - number of tuples to read
        img_size (tuple): Images size after squarification, in (W, H) format
    '''
    X, y = [], []

    for img_rgb in take(batch_size,
                        paths[idx * batch_size:],
                        extract_fn=partial(read_image, size=img_size)):
        img_gray = rgb2gray(img_rgb).reshape(img_size + (1, ))
        X.append(np.repeat(img_gray, 3, axis=-1))
        y.append(img_rgb)

    return np.array(X), np.array(y)
コード例 #11
0
def frames_generator_rnn(dataset_dir, split_key, batch_size):
    """
        Internally this method uses _sequential_infinite_iterator_rnn to iterate on the validation set.
        If the batch_size is not divisible by the number of videos, the last videos will not be
        returned, but they will be returned at the next iteration over the valid set, so we might
        end with statistics that will be slightly different if computed multiple times on the
        validation set returned by this generator.
    """

    base_dir = join(dataset_dir, split_key)

    dataset_structure = get_dataset_split_structure(base_dir)
    all_classes = dataset_structure.keys()

    d_iterator = _get_iterator(split_key)(base_dir, dataset_structure)

    tot_videos = next(count_num_videos(dataset_dir, split_key))
    num_calls = tot_videos // batch_size
    yield num_calls

    while True:

        for _ in range(num_calls):

            data = take(d_iterator, batch_size)
            videos, labels = map(np.array, zip(*data))

            # if batch_size == 1:
            #    videos = np.array([videos])
            #    labels = np.array([labels])
            # else:
            #    videos = np.array(videos)
            #    labels = np.array(labels)

            videos = preprocess_images_tf(videos)
            labels = to_categorical(labels, len(all_classes))

            yield videos, labels
コード例 #12
0
def pos_from_files(files, max_sents=INF, rem_id=True, shuffle=False, shuffle_seed=448):
    sents = (sent for f in files for sent in pos_from_file(f, rem_id=rem_id))
    return take(iter(shuffle_seq(sents, shuffle_seed)) if shuffle else sents, max_sents)
コード例 #13
0
ファイル: day13.py プロジェクト: yonax/advent-of-code
def max_happiness(happiness, attendees):
  h = lambda a, b, c: happiness[b][a] + happiness[b][c]
  seating = lambda xs: take(len(attendees), window(cycle(xs), 3))
  total = lambda xs: sum(starmap(h, seating(xs)))
  optimal = max(permutations(attendees), key=total)
  return optimal, total(optimal)
コード例 #14
0
def max_happiness(happiness, attendees):
    h = lambda a, b, c: happiness[b][a] + happiness[b][c]
    seating = lambda xs: take(len(attendees), window(cycle(xs), 3))
    total = lambda xs: sum(starmap(h, seating(xs)))
    optimal = max(permutations(attendees), key=total)
    return optimal, total(optimal)
コード例 #15
0
ファイル: problem23.py プロジェクト: tmr232/ProjectEuler
def main():
    print take(10, iter_abundant_numbers())
    # print len(list(takewhile(lambda n: n < MAX, iter_abundant_numbers())))
    table = {sum(x) for x in combinations_with_replacement(takewhile(lambda x: x <= MAX / 2, iter_abundant_numbers()), 2)}
    print "Got table"
    print sum(x for x in range(MAX) if x not in table)
コード例 #16
0
ファイル: exp2.py プロジェクト: milesrout/experiment1
def exists_right(premises, goal, tg):
    ts = take(randnat(), guessterm(tg, premises, goal))
    return [(ByExistsRight, [(premises, goal.instantiate(t))]) for t in ts]
コード例 #17
0
def random_positions(n):
    return take(shuffle(positions()), n)
コード例 #18
0
ファイル: wordvecs.py プロジェクト: sklam/word2vec
    for _ in it.repeat(None):
        neg_sampler_jit_pad_arr_(a)
        yield a[pad:].copy()
#         for i in a[pad:]:
#             yield i


# ### Check distributions
# 
# Just as a sanity check that the different implementations do the same thing, I randomly generate words according to how frequently they occur in the text with each of the samplers and scatter-plot them against the actual word frequency to check that they mostly lie on $y=x$. 

# In[ ]:

from sklearn.preprocessing import LabelEncoder
from nltk.corpus import brown
some_text = take(brown.words(), int(1e6))


# In[ ]:

le = LabelEncoder()
smtok = le.fit_transform(some_text)


# In[ ]:

class NegSampler:
    """Container for the sampler generator functions.
    This keeps track of the number of samples $K$ and the padding."""
    def __init__(self, sampler, toks, K=None, ret_type=None, pad=None,
                 nxt=True, **kw):
コード例 #19
0
ファイル: train.py プロジェクト: shayan-kousha/jax-flows
@jit
def update(rng, i, opt_state, batch):
    params = get_params(opt_state)
    grads = grad(loss)(params, batch)
    return opt_update(i, grads, opt_state)


if __name__ == '__main__':
    key = random.PRNGKey(0)

    # Create dataset
    X_full = utils.get_datasets(dataset)

    kfold = model_selection.KFold(pieces, shuffle=True, random_state=0)
    for fold_iter, (idx_train, idx_test) in enumerate(
            utils.take(pieces_to_run, kfold.split(X_full))):
        X, X_test = X_full[idx_train], X_full[idx_test]

        scaler = preprocessing.StandardScaler()
        X = scaler.fit_transform(X)
        X_test = scaler.transform(X_test)

        delta = 1. / (X.shape[0]**1.1)

        print('X: {}'.format(X.shape))
        print('X test: {}'.format(X_test.shape))
        print('Delta: {}'.format(delta))

        # Create flow
        modules = flow_utils.get_modules(flow, num_blocks, normalization,
                                         num_hidden)