Exemple #1
0
 def use_internal_state(self):
     """Use a specific RNG state."""
     old_state = random.getstate()
     random.setstate(self._random_state)
     yield
     self._random_state = random.getstate()
     random.setstate(old_state)
Exemple #2
0
    def batch_generator(self, batch_size: int, data_type: str = 'train',
                        shuffle: bool = None) -> Generator:
        r"""This function returns a generator, which serves for generation of raw
        (no preprocessing such as tokenization)
         batches
        Args:
            batch_size (int): number of samples in batch
            data_type (str): can be either 'train', 'test', or 'valid'
            shuffle (bool): whether to shuffle dataset before batching
        Returns:
            batch_gen (Generator): a generator, that iterates through the part (defined by data_type) of the dataset
        """
        if shuffle is None:
            shuffle = self.shuffle

        data = self.data[data_type]
        data_len = len(data)
        order = list(range(data_len))
        if shuffle:
            rs = random.getstate()
            random.setstate(self.random_state)
            random.shuffle(order)
            self.random_state = random.getstate()
            random.setstate(rs)

        for i in range((data_len - 1) // batch_size + 1):
            yield list(zip(*[data[o] for o in order[i * batch_size:(i + 1) * batch_size]]))
Exemple #3
0
    def __init__(self, data: Dict[str, List[Tuple[Any, Any]]],
                 seed: int = None, shuffle: bool = True,
                 *args, **kwargs) -> None:
        r""" Dataiterator takes a dict with fields 'train', 'test', 'valid'. A list of samples (pairs x, y) is stored
        in each field.
        Args:
            data: list of (x, y) pairs. Each pair is a sample from the dataset. x as well as y can be a tuple
                of different input features.
            seed (int): random seed for data shuffling. Defaults to None
            shuffle: whether to shuffle data when batching (from config)
        """
        self.shuffle = shuffle

        rs = random.getstate()
        random.seed(seed)
        self.random_state = random.getstate()
        random.setstate(rs)

        self.train = data.get('train', [])
        self.valid = data.get('valid', [])
        self.test = data.get('test', [])
        self.split(*args, **kwargs)
        self.data = {
            'train': self.train,
            'valid': self.valid,
            'test': self.test,
            'all': self.train + self.test + self.valid
        }
def determine_action(state, dict, random_state, score, max_score):
	#print state
	random.setstate(random_state[1])
	
	state_click = dict.get((state, True), 0)
	state_nothing = dict.get((state, False), 0)
	print state
	print state_click
	print state_nothing
	
	
	value = random.randint(1,10)
	random_state[1] = random.getstate()
	if value < 3 and score >= max_score:
		value = random.randint(0,1)
		random_state[1] = random.getstate()
		if value == 1:
			print "RNG VALUE OF 1"
			return True
		else:
			print "RNG VALUE OF 0"
			return False
	elif state_click > state_nothing:
		print "state_click greater than state_nothing"
		return True
	print "state_nothing greater than state_click"
	return False
    def __init__(self, data, seed=None, classes=None,
                 fields_to_merge=None, merged_field=None,
                 field_to_split=None, splitted_fields=None, splitting_proportions=None,
                 *args, **kwargs):

        rs = random.getstate()
        random.seed(seed)
        self.random_state = random.getstate()
        random.setstate(rs)

        self.train = data.get('train', [])
        self.test = data.get('test', [])
        self.data = {
            'train': self.train,
            'test': self.test,
            'all': self.train + self.test
        }

        self.classes = classes
        if fields_to_merge is not None:
            if merged_field is not None:
                # print("Merging fields <<{}>> to new field <<{}>>".format(fields_to_merge, merged_field))
                self._merge_data(fields_to_merge=fields_to_merge.split(' '), merged_field=merged_field)
            else:
                raise IOError("Given fields to merge BUT not given name of merged field")

        if field_to_split is not None:
            if splitted_fields is not None:
                # print("Splitting field <<{}>> to new fields <<{}>>".format(field_to_split, splitted_fields))
                self._split_data(field_to_split=field_to_split,
                                 splitted_fields=splitted_fields.split(" "),
                                 splitting_proportions=[float(s) for s in splitting_proportions.split(" ")])
            else:
                raise IOError("Given field to split BUT not given names of splitted fields")
Exemple #6
0
	def __call__(self):
		random.setstate(self.randstate)
		if self.game.day < 0:
			return True
		if self.state.winner() is not None:
			self.game.day = -1
			self.game.save()
			return True
		a = [None for i in xrange(len(self.players))]
		if not self.read_actions(a) and time.time() < self.game.countdown:
			return False
		if self.game.phase == qwr.PHASE_NIGHT:
			self.apply(a)
			self.state += 1
			self.game.day = self.state.day
			self.game.phase = qwr.PHASE_DAY
			self.game.countdown = int(time.time()) + (1000000 if self.game.limit_day is None else self.game.limit_day)
			self.game.save()
			self.randstate = random.getstate()
			return False
		if not self.lynch(a):
			self.game.phase += 1
			self.game.countdown = int(time.time()) + (1000000 if self.game.limit_day is None else self.game.limit_day)
			self.game.save()
			return False
		self.game.phase = qwr.PHASE_NIGHT
		self.game.countdown = int(time.time()) + (1000000 if self.game.limit_night is None else self.game.limit_night)
		self.game.save()
		self.randstate = random.getstate()
		return False
Exemple #7
0
 def custom_random(*args, **kwargs):
     test = random.getstate() == unit.random_state
     msg = ('Use no other method from the random ' + 
            'module other than random().')
     unit.assertTrue(test, msg)
     result = temp_random(*args, **kwargs)
     unit.random_state = random.getstate()
     return result
Exemple #8
0
def passGen(password):
	''' Generator that yields eight bits of psudo random data at a time seeded with the supplied password '''
	random.seed(password)
	state = random.getstate()
	while True:
		random.setstate(state)
		out = random.getrandbits(8)
		state = random.getstate()
		yield out
Exemple #9
0
def main(args):
    #Setup that does not use the random number generator.
    randstate=random.getstate()#Just for verification purposes
    sm, original_sm, ofilename, energy, energies_to_track = setup_deterministic(args)
    assert randstate==random.getstate()#Just for verification purposes
    fud.pv("energies_to_track")
    #Eval-energy mode
    if args.eval_energy:
        sm.bg.add_all_virtual_residues()
        fud.pv('energy.eval_energy(sm, verbose=True, background=False)')
        if sm.constraint_energy:
            fud.pv('sm.constraint_energy.eval_energy(sm, verbose=True, background=False)')
        if sm.junction_constraint_energy:
            fud.pv('sm.junction_constraint_energy.eval_energy(sm, verbose=True, background=False)')
        for track_energy in energies_to_track:
            fud.pv('track_energy.eval_energy(sm, verbose=True, background=False)')
        sys.exit(0) 
  
    #Set-up the random Number generator.
    #Until here, no call to random should be made.
    if args.seed:
        seed_num=args.seed
    else:
        seed_num = random.randint(0,4294967295) #sys.maxint) #4294967295 is maximal value for numpy
    random.seed(seed_num)
    np.random.seed(seed_num)
    #Main function, dependent on random.seed        
    with open_for_out(ofilename) as out_file:
        if isinstance(energy, fbe.CombinedEnergy):
            energies_to_track+=energy.uncalibrated_energies
        elif isinstance(energy, fbe.CoarseGrainEnergy):
            energies_to_track+=[energy]
        stat=setup_stat(out_file, sm, args, energies_to_track, original_sm)
        try:
            print ("# Random Seed: {}".format(seed_num), file=out_file)
            print ("# Command: `{}`".format(" ".join(sys.argv)), file=out_file)
            for e in energy.iterate_energies():
                if isinstance(e, fbe.FPPEnergy):
                    print("# Used FPP energy with options: --scale {} --ref-img {} "
                          "--fpp-landmarks {}".format(e.scale, e.ref_image, 
                                                      ":".join(",".join(map(str,x)) for x in e.landmarks)),
                          file=out_file)
            if args.exhaustive:
                sampler = fbs.ExhaustiveExplorer(sm, energy, stat, args.exhaustive, args.start_from_scratch)
            elif args.new_ml:
                sampler = fbs.ImprovedMultiloopMCMC(sm, energy, stat, 
                                          start_from_scratch=args.start_from_scratch,
                                          dump_measures=args.dump_energies)
            else:
                sampler = fbs.MCMCSampler(sm, energy, stat, 
                                          start_from_scratch=args.start_from_scratch,
                                          dump_measures=args.dump_energies)
            for i in range(args.iterations):
                sampler.step()
        finally: #Clean-up 
            print("INFO: Random seed was {}".format(seed_num), file=sys.stderr)
def test_find_does_not_pollute_state():
    with deterministic_PRNG():

        find(st.random_module(), lambda r: True)
        state_a = random.getstate()

        find(st.random_module(), lambda r: True)
        state_b = random.getstate()

        assert state_a != state_b
 def __iter__(self):
     y = self._y
     num_rows = self.__num_rows
     seed(self.__random_state_seed)
     random_state = getstate()
     for rows in num_rows:
         setstate(random_state)
         all_indices = np.sort(sample(np.arange(0, y.shape[0]), rows))
         yield (all_indices, self._col_names, {'rows': rows})
         random_state=getstate()
def test_uses_provided_seed():
    import random
    initial = random.getstate()

    @given(integers())
    @seed(42)
    def test_foo(x):
        pass
    test_foo()
    assert random.getstate() == initial
def test_uses_provided_seed():
    import random
    random.seed(0)
    initial = random.getstate()

    @given(integers())
    @seed(42)
    def test_foo(x):
        pass

    test_foo()
    assert hash(repr(random.getstate())) == hash(repr(initial))
def randomize():
    global im
    listoper = [shifter,degrader,tear,blur, pixelate, disperse, graindrip, seamer]
    randint1 = random.randint(0, 30)
    randint2 = random.randint(0,2000)
    randint3 = random.randint(0,2000)
    randint4 = random.randint(1,3)
    randint5 = random.randint(1, 300)
    cmd = random.choice(listoper)
    print cmd
    if cmd == pixelate:
        im2 = pixelate(im, randint1)
        im = im2
        tkimage2 = ImageTk.PhotoImage(im2)
        canvas.configure(image = tkimage2)
        canvas.image = tkimage2
        imagecopy = im2.copy()
        imagelist.append(imagecopy)
        statelist.append(random.getstate())
        operationlist.append('pixelate')
    elif cmd == disperse:
        im2 = disperse(im, randint1, randint2, randint3)
        im = im2
        tkimage2 = ImageTk.PhotoImage(im2)
        canvas.configure(image = tkimage2)
        canvas.image = tkimage2
        imagecopy = im2.copy()
        imagelist.append(imagecopy)
        statelist.append(random.getstate())
        operationlist.append('disperse')
    elif cmd == graindrip:
        Gain = float(random.random())
        Height = int(randint2)
        im2 = graindrip(im, Gain, Height)
        tkimage2 = ImageTk.PhotoImage(im2)
        canvas.configure(image = tkimage2)
        canvas.image = tkimage2
        imagecopy = im2.copy()
        imagelist.append(imagecopy)
        statelist.append(random.getstate())
        operationlist.append('ndrip')
    elif cmd == seamer:
        im2 = seamer(im,randint5, randint4)
        im = im2
        tkimage2 = ImageTk.PhotoImage(im2)
        canvas.configure(image = tkimage2)
        canvas.image = tkimage2
        imagecopy = im2.copy()
        imagelist.append(imagecopy)
        statelist.append(random.getstate())
        operationlist.append('seamer')
    else:
        cmd()
def test_prng_state_unpolluted_by_given_issue_1266():
    # Checks that @given doesn't leave the global PRNG in a particular
    # modified state; there may be no effect or random effect but not
    # a consistent end-state.
    first = random.getstate()
    random_func()
    second = random.getstate()
    random_func()
    third = random.getstate()
    if first == second:
        assert second == third
    else:
        assert second != third
def test_given_does_not_pollute_state():
    with deterministic_PRNG():

        @given(st.random_module())
        def test(r):
            pass

        test()
        state_a = random.getstate()

        test()
        state_b = random.getstate()

        assert state_a != state_b
 def changeseed(self):
     global seed, seedlabel, statelist
     try:
         statelist = []
         seed = int(self.entry1.get())
         random.seed(seed)
         seedlabel.config(text = "Seed #: " + str(seed))
         print "Your new seed is: " + str(seed)
         root.update()
         statelist.append(random.getstate())
         random.setstate(random.getstate())
         self.top.destroy()
     except ValueError:
         InvalidSeed(root)
Exemple #18
0
def test_jumpahead():
    """jumpahead will change the pseudo-number generator's internal state
    """
    random.seed()
    state1 = random.getstate()
    random.jumpahead(20)
    state2 = random.getstate()
    rep = 0
    for ind in range(len(state1)):
        elem1 = state1[ind]
        elem2 = state2[ind]
        if (elem1 == elem2): rep += 1
    if (rep > len(state1) / 2):
        raise "state1 and state2 can't be the same"
Exemple #19
0
 def wrapper(*args, **kw):
     global _RANDOM_STATE, _SANDBOXED_MODE
     if not _SANDBOXED_MODE:
         old_state = random.getstate()
         random.setstate(_RANDOM_STATE)
         _SANDBOXED_MODE = True
         try:
             val = func(*args, **kw)
         finally:
             _RANDOM_STATE = random.getstate()
             random.setstate(old_state)
             _SANDBOXED_MODE = False
     else:
         val = func(*args, **kw)
     return val
Exemple #20
0
def lorem(randseed=None, count=1, method=None):
    u"""
    Creates Lorem Ipsum text.

    Usage format:

        {% lorem [randseed] [count] [method] %}

    ``randseed`` is any hashable object used to initialize the random numbers generator.
    If ``randseed`` is not given the common "Lorem ipsum dolor sit..." text is used.

    ``count`` is a number of paragraphs or sentences to generate (default is 1).

    ``method`` is either ``p`` for HTML paragraphs enclosed in ``<p>`` tags, or ``b`` for
    plain-text paragraph blocks (default is ``b``).

    Notice: This filter is rewrited ``lorem`` filter from ``webdesign`` modul from default Django
    package ``django.contrib.webdesign``. The original ``lorem`` filter does not give stable random
    text, thus its generated paragraphs change on every page refresh. We stabilize the generated
    text by setting a fixed randseed before generating the paragraph.
    """

    state = random.getstate()
    random.seed(randseed)
    res = paragraphs(count, common=(randseed is None))
    random.setstate(state)

    if method == u'p':
        res = [u'<p>{}</p>'.format(p) for p in res]
    return u'\n'.join(res)
Exemple #21
0
 def test_reseed_if_needed(self):
     # we have to set a seed in order to be able to get state
     random.seed(codecs.encode("abcdefg", "utf-8"))
     state = random.getstate()
     _reseed_if_needed(using_sysrandom=True, secret_key=None)
     # did NOT reseed
     assert state == random.getstate()
     # monkeypatch
     saved = bokeh.util.session_id.random
     try:
         bokeh.util.session_id.random = random
         _reseed_if_needed(using_sysrandom=False, secret_key="abc")
         # DID reseed
         assert state != random.getstate()
     finally:
         bokeh.util.session_id.random = saved
    def __makeResistanceToon(self):
        if self.resistanceToon:
            return
        npc = Toon.Toon()
        npc.setName(TTLocalizer.ResistanceToonName)
        npc.setPickable(0)
        npc.setPlayerType(NametagGroup.CCNonPlayer)
        dna = ToonDNA.ToonDNA()
        dna.newToonRandom(11237, 'f', 1)
        dna.head = 'pls'
        npc.setDNAString(dna.makeNetString())
        npc.animFSM.request('neutral')
        self.resistanceToon = npc
        self.resistanceToon.setPosHpr(*ToontownGlobals.CashbotRTBattleOneStartPosHpr)
        state = random.getstate()
        random.seed(self.doId)
        self.resistanceToon.suitType = SuitDNA.getRandomSuitByDept('m')
        random.setstate(state)
        self.fakeGoons = []
        for i in range(self.numFakeGoons):
            goon = DistributedCashbotBossGoon.DistributedCashbotBossGoon(base.cr)
            goon.doId = -1 - i
            goon.setBossCogId(self.doId)
            goon.generate()
            goon.announceGenerate()
            self.fakeGoons.append(goon)

        self.__hideFakeGoons()
Exemple #23
0
    def full_restart(self):
        """Fully restart the game, clearing all history, etc."""
        # Initialize real and virtual keyboards.
        self.keyboard = Keyboard()
        self.virtual_keyboards = []
        
        # Initialize game objects
        self.players = [Ship(self.keyboard, self.view_size / 2.0)]
        self.bullets = []
        self.rand_state = random.getstate()

        self.asteroids = [Asteroid(self, clear_zone = self.view_size / 2.0,
                                   seed = random.random())
                          for i in range(self.num_asteroids)]
        
        self.DEBUG = DataToggle(source = self.keyboard[sf.Key.NUM0],
                                initVal = False)
                                
#        self.RECORDING = Toggle(source = self.keyboard[sf.Key.T],
#                                initVal = False)

        self.won = False

        # Start the system running
        self.running = DataToggle(
            source = DataOr(self.keyboard[sf.Key.ESCAPE],
                            DataAnd(self.keyboard[sf.Key.Q], 
                                    DataOr(self.keyboard[sf.Key.L_SYSTEM],
                                           self.keyboard[sf.Key.R_SYSTEM]))),
                                  initVal = True)
Exemple #24
0
def gradcheck(f, x):
    """ 
    Gradient check for a function f 
    - f should be a function that takes a single argument and outputs the cost
    - x is the point (numpy array) to check the gradient at
    """ 

    rndstate = random.getstate()
    random.setstate(rndstate)  
    fx = f(x) # Evaluate function value at original point
    h = 1e-4

    eps = 1e-5


    numgrad = np.zeros_like(x)
    # iterate over all indexes in x
    it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
    while not it.finished:

        # evaluate function at x+h
        ix = it.multi_index
        oldval = x[ix]
        x[ix] = oldval + h # increment by h
        fxph = f(x) # evalute f(x + h)
        x[ix] = oldval - h
        fxmh = f(x) # evaluate f(x - h)
        x[ix] = oldval # restore
        numgrad[ix] = (fxph - fxmh) / (2 * h) # the slope
        it.iternext() # step to next dimension

    return numgrad
def test_seeds_off_random():
    s = settings(max_shrinks=0, database=None)
    r = random.getstate()
    x = find(st.integers(), lambda x: True, settings=s)
    random.setstate(r)
    y = find(st.integers(), lambda x: True, settings=s)
    assert x == y
Exemple #26
0
    def shuffle(self, i, seed=None):

        if seed is not None:
            rand_state = random.getstate()
            random.seed(seed)

        move_list = []
        last_face = None

        for _ in range(i):

            face = last_face
            while face == last_face:
                face = Face.random()

            turn_type = TurnType.random()

            move_list.append((face, turn_type))

            last_face = face

        shuffle_algorithm = Algorithm(reversed(move_list))
        self.apply_algorithm(shuffle_algorithm)

        if seed is not None:
            random.setstate(rand_state)

        return shuffle_algorithm
def simplified_data(num_train, num_dev, num_test):
    rndstate = random.getstate()
    random.seed(0)
    trees = loadTrees('train') + loadTrees('dev') + loadTrees('test')
    
    #filter extreme trees
    pos_trees = [t for t in trees if t.root.label==4]
    neg_trees = [t for t in trees if t.root.label==0]

    #binarize labels
    binarize_labels(pos_trees)
    binarize_labels(neg_trees)
    
    #split into train, dev, test
    print len(pos_trees), len(neg_trees)
    pos_trees = sorted(pos_trees, key=lambda t: len(t.get_words()))
    neg_trees = sorted(neg_trees, key=lambda t: len(t.get_words()))
    num_train/=2
    num_dev/=2
    num_test/=2
    train = pos_trees[:num_train] + neg_trees[:num_train]
    dev = pos_trees[num_train : num_train+num_dev] + neg_trees[num_train : num_train+num_dev]
    test = pos_trees[num_train+num_dev : num_train+num_dev+num_test] + neg_trees[num_train+num_dev : num_train+num_dev+num_test]
    random.shuffle(train)
    random.shuffle(dev)
    random.shuffle(test)
    random.setstate(rndstate)


    return train, dev, test
Exemple #28
0
def get_random_state():
    state = random.getstate()
    state = pickle.dumps(state)
    state = binascii.b2a_hex(state)
    state.decode('ascii')
    state = int(state, 16)
    return state
def get_random_string(length=12,
                      allowed_chars='abcdefghijklmnopqrstuvwxyz'
                                    'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',
                      secret_key=None):
    """
    Returns a securely generated random string.
    The default length of 12 with the a-z, A-Z, 0-9 character set returns
    a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
    """
    assert secret_key is not None, "Need secret_key"
    if not using_sysrandom:
        # This is ugly, and a hack, but it makes things better than
        # the alternative of predictability. This re-seeds the PRNG
        # using a value that is hard for an attacker to predict, every
        # time a random string is required. This may change the
        # properties of the chosen random sequence slightly, but this
        # is better than absolute predictability.
        random.seed(
            hashlib.sha256(
                ("%s%s%s" % (
                    random.getstate(),
                    time.time(),
                    secret_key)).encode('utf-8')
            ).digest())
    return ''.join(random.choice(allowed_chars) for i in range(length))
Exemple #30
0
    def __init__(self, data, dataset_path, shuffle=False):
        r""" Dataset takes a dict with fields 'train', 'test', 'valid'. A list of samples (pairs x, y) is stored
             in each field.

             Args:
                data: list of (x, y) pairs. Each pair is a sample from the dataset. x as well as y can be a tuple
                    of different input features.
        """
        self.shuffle = shuffle
        self.random_state = random.getstate()
        # TODO: include slot vals to dstc2.tar.gz
        dataset_path = pathlib.Path(dataset_path) / 'slot_vals.json'
        self._build_slot_vals(dataset_path)
        with open(dataset_path) as f:
            self._slot_vals = json.load(f)
        for data_type in ['train', 'test', 'valid']:
            bio_markup_data = self._preprocess(data.get(data_type, []))
            setattr(self, data_type, bio_markup_data)
        self.data = {
            'train': self.train,
            'valid': self.valid,
            'test': self.test,
            'all': self.train + self.test + self.valid
        }
        self.shuffle = shuffle
        self.seed = None
if args.dataset == 'cifar10':
    acc_type = 'ori-test'
    val_acc_type = 'x-valid'
else:
    acc_type = 'x-test'
    val_acc_type = 'x-valid'

runs = trange(args.n_runs, desc='acc: ')
for N in runs:
    start = time.time()
    indices = np.random.randint(0, len(searchspace), args.n_samples)
    scores = []

    npstate = np.random.get_state()
    ranstate = random.getstate()
    torchstate = torch.random.get_rng_state()
    for arch in indices:
        try:
            uid = searchspace[arch]
            network = searchspace.get_network(uid)
            network.to(device)
            if args.dropout:
                add_dropout(network, args.sigma)
            if args.init != '':
                init_network(network, args.init)
            if 'hook_' in args.score:
                network.K = np.zeros((args.batch_size, args.batch_size))

                def counting_forward_hook(module, inp, out):
                    try:
Exemple #32
0
def load_data(
        data_cfg: dict) -> (Dataset, Dataset, Dataset, Vocabulary, Vocabulary):
    """
    Load train, dev and optionally test data as specified in configuration.
    Vocabularies are created from the training set with a limit of `voc_limit`
    tokens and a minimum token frequency of `voc_min_freq`
    (specified in the configuration dictionary).

    The training data is filtered to include sentences up to `max_sent_length`
    on source and target side.

    If you set ``random_train_subset``, a random selection of this size is used
    from the training set instead of the full training set.

    If you set ``random_dev_subset``, a random selection of this size is used
    from the dev development instead of the full development set.

    :param data_cfg: configuration dictionary for data
        ("data" part of configuration file)
    :return:
        - train_data: training dataset
        - dev_data: development dataset
        - test_data: test dataset if given, otherwise None
        - gls_vocab: gloss vocabulary extracted from training data
        - txt_vocab: spoken text vocabulary extracted from training data
    """

    data_path = data_cfg.get("data_path", "./data")
    train_paths = [os.path.join(data_path, x) for x in data_cfg["train"]]
    dev_paths = os.path.join(data_path, data_cfg["dev"])
    test_paths = os.path.join(data_path, data_cfg["test"])
    pad_feature_size = data_cfg["feature_size"]
    pad_feature_size_keypoints = data_cfg["feature_size_keypoints"]

    level = data_cfg["level"]
    txt_lowercase = data_cfg["txt_lowercase"]
    max_sent_length = data_cfg["max_sent_length"]
    name_keypoint = str(data_cfg['name'])

    def tokenize_text(text):
        if level == "char":
            return list(text)
        else:
            return text.split()

    def tokenize_features(features):
        ft_list = torch.split(features, 1, dim=0)
        return [ft.squeeze() for ft in ft_list]

    # NOTE (Cihan): The something was necessary to match the function signature.
    def stack_features(features, something):
        return torch.stack([torch.stack(ft, dim=0) for ft in features], dim=0)

    sequence_field = data.RawField()
    signer_field = data.RawField()

    sgn_field = data.Field(
        use_vocab=False,
        init_token=None,
        dtype=torch.float32,
        preprocessing=tokenize_features,
        tokenize=lambda features: features,  # TODO (Cihan): is this necessary?
        batch_first=True,
        include_lengths=True,
        postprocessing=stack_features,
        pad_token=torch.zeros((pad_feature_size, )),
    )

    keypoints_field = data.Field(
        use_vocab=False,
        init_token=None,
        dtype=torch.float32,
        preprocessing=tokenize_features,
        tokenize=lambda features: features,  # TODO (Cihan): is this necessary?
        batch_first=True,
        include_lengths=True,
        postprocessing=stack_features,
        pad_token=torch.zeros((pad_feature_size_keypoints, )),
    )

    gls_field = data.Field(
        pad_token=PAD_TOKEN,
        tokenize=tokenize_text,
        batch_first=True,
        lower=False,
        include_lengths=True,
    )

    txt_field = data.Field(
        init_token=BOS_TOKEN,
        eos_token=EOS_TOKEN,
        pad_token=PAD_TOKEN,
        tokenize=tokenize_text,
        unk_token=UNK_TOKEN,
        batch_first=True,
        lower=txt_lowercase,
        include_lengths=True,
    )

    train_data = SignTranslationDataset(
        path=train_paths,
        name=name_keypoint,
        fields=(sequence_field, signer_field, sgn_field, keypoints_field,
                gls_field, txt_field),
        filter_pred=lambda x: len(vars(x)["sgn"]) <= max_sent_length and len(
            vars(x)["txt"]) <= max_sent_length,
    )

    gls_max_size = data_cfg.get("gls_voc_limit", sys.maxsize)
    gls_min_freq = data_cfg.get("gls_voc_min_freq", 1)
    txt_max_size = data_cfg.get("txt_voc_limit", sys.maxsize)
    txt_min_freq = data_cfg.get("txt_voc_min_freq", 1)

    gls_vocab_file = data_cfg.get("gls_vocab", None)
    txt_vocab_file = data_cfg.get("txt_vocab", None)

    gls_vocab = build_vocab(
        field="gls",
        min_freq=gls_min_freq,
        max_size=gls_max_size,
        dataset=train_data,
        vocab_file=gls_vocab_file,
    )
    txt_vocab = build_vocab(
        field="txt",
        min_freq=txt_min_freq,
        max_size=txt_max_size,
        dataset=train_data,
        vocab_file=txt_vocab_file,
    )
    random_train_subset = data_cfg.get("random_train_subset", -1)
    if random_train_subset > -1:
        # select this many training examples randomly and discard the rest
        keep_ratio = random_train_subset / len(train_data)
        keep, _ = train_data.split(split_ratio=[keep_ratio, 1 - keep_ratio],
                                   random_state=random.getstate())
        train_data = keep

    dev_data = SignTranslationDataset(
        path=dev_paths,
        name=name_keypoint,
        fields=(sequence_field, signer_field, sgn_field, keypoints_field,
                gls_field, txt_field),
    )
    random_dev_subset = data_cfg.get("random_dev_subset", -1)
    if random_dev_subset > -1:
        # select this many development examples randomly and discard the rest
        keep_ratio = random_dev_subset / len(dev_data)
        keep, _ = dev_data.split(split_ratio=[keep_ratio, 1 - keep_ratio],
                                 random_state=random.getstate())
        dev_data = keep

    # check if target exists
    test_data = SignTranslationDataset(
        path=test_paths,
        name=name_keypoint,
        fields=(sequence_field, signer_field, sgn_field, keypoints_field,
                gls_field, txt_field),
    )

    gls_field.vocab = gls_vocab
    txt_field.vocab = txt_vocab
    return train_data, dev_data, test_data, gls_vocab, txt_vocab
def worker_init_fn(worker_id):
    np.random.seed(np.random.get_state()[1][0] + worker_id)
    random.seed(random.getstate()[1][0] + worker_id)
Exemple #34
0
def sample_user(number_records=1482, seed=42, pct_in_network=0.8):
    old_state = random.getstate()
    random.seed(seed)

    towers = {
        701: (42.3555, -71.099541),
        702: (42.359039, -71.094595),
        703: (42.360481, -71.087321),
        704: (42.361013, -71.097868),
        705: (42.370849, -71.114613),
        706: (42.3667427, -71.1069847),
        707: (42.367589, -71.076537)
    }
    towers_position = [
        Position(antenna=k, location=v) for k, v in towers.items()
    ]

    ego_records = [
        random_record(position=random.choice(towers_position))
        for _ in xrange(number_records)
    ]
    user, _ = bc.io.load("sample_user",
                         ego_records,
                         towers,
                         None,
                         describe=False)

    # create network
    correspondents = set([record.correspondent_id for record in ego_records])
    correspondent_records = {}
    connections = {}

    n_in_network = int(len(correspondents) * pct_in_network)
    if n_in_network % 2 != 0:
        n_in_network = n_in_network - 1

    in_network_correspondents = random.sample(correspondents, n_in_network)

    def reverse_records(records, current_owner):
        for r in records:
            r.direction = 'out' if r.direction == 'in' else 'in'
            r.correspondent_id = current_owner
        return records

    # set records from ego
    for c_id in sorted(in_network_correspondents):
        reciprocal_records = filter(lambda r: r.correspondent_id == c_id,
                                    ego_records)
        reciprocal_records = reverse_records(copy.deepcopy(reciprocal_records),
                                             "ego")
        correspondent_records[c_id] = reciprocal_records

    def generate_group_with_random_links(pct_users_in_group):
        n_in_group = int(len(correspondents) * pct_users_in_group)
        group = random.sample(non_grouped_correspondents, n_in_group)
        networkusers_group = list()
        for user in group:
            if user in in_network_correspondents:
                networkusers_group.append(user)

        def create_pair(source):
            user_pair = [source, random.sample(group, 1)[0]]
            if user_pair[0] in non_grouped_correspondents:
                non_grouped_correspondents.remove(user_pair[0])
            if user_pair[1] in non_grouped_correspondents:
                non_grouped_correspondents.remove(user_pair[1])

            extra_records = [
                random_record(position=random.choice(towers_position),
                              interaction=random.choice(
                                  ['text', 'call', 'call']),
                              correspondent_id=user_pair[1])
                for _ in xrange(random.randrange(25, 150))
            ]
            correspondent_records[user_pair[0]].extend(extra_records)
            if (user_pair[1] in in_network_correspondents):
                correspondent_records[user_pair[1]].extend(
                    reverse_records(copy.deepcopy(extra_records),
                                    user_pair[0]))

        # create pairs of users
        for i in range(len(networkusers_group)):
            create_pair(networkusers_group[i])
            if random.choice(range(2)) == 0:
                create_pair(networkusers_group[i])

    non_grouped_correspondents = copy.deepcopy(correspondents)
    for i in range(4):
        generate_group_with_random_links(pct_users_in_group=0.4 - i * 0.1)

    # create user object
    for c_id in sorted(correspondents):
        if (c_id in in_network_correspondents):
            correspondent_user, _ = bc.io.load(c_id,
                                               correspondent_records[c_id],
                                               towers,
                                               None,
                                               describe=False)
        else:
            correspondent_user = None
        connections[c_id] = correspondent_user

    # return the network dictionary sorted by key
    user.network = OrderedDict(sorted(connections.items(), key=lambda t: t[0]))
    user.recompute_missing_neighbors()

    random.setstate(old_state)
    return user
Exemple #35
0
    A_TEXT = Field(tokenize=lambda sen: list(sen),
                   init_token="<sos>",
                   eos_token="<eos>")

    # associate the text in the 'Question' column with the Q_TEXT field,
    # and 'Answer' with A_TEXT field
    data_fields = [('Question', Q_TEXT), ('Answer', A_TEXT)]

    # train, val = TabularDataset.splits(path=PATH, train='train.csv', validation='val.csv', format='csv',
    #                                    fields=data_fields, skip_header=True)
    tab_dataset = TabularDataset(path=f'{args.path}/all.csv',
                                 format='csv',
                                 fields=data_fields,
                                 skip_header=True)
    train, val, test = tab_dataset.split(split_ratio=[0.5, 0.2, 0.3],
                                         random_state=random.getstate())

    Q_TEXT.build_vocab(train)
    A_TEXT.build_vocab(train)
    print('Question Tokenize')
    print(list(Q_TEXT.vocab.stoi.items()))
    print('Answer Tokenize')
    print(list(A_TEXT.vocab.stoi.items()))
    # print(list(A_TEXT.vocab.itos))

    INPUT_DIM = len(Q_TEXT.vocab)
    OUTPUT_DIM = len(A_TEXT.vocab)

    # BATCH_SIZE = 512
    # ENC_EMB_DIM = 256  # 256
    # DEC_EMB_DIM = 256  # 256
Exemple #36
0
def main(settings, window=dummy_window()):

    start = time.clock()

    logger = logging.getLogger('')

    worlds = []

    allowed_tricks = {}
    for trick in logic_tricks.values():
        settings.__dict__[
            trick['name']] = trick['name'] in settings.allowed_tricks

    # we load the rom before creating the seed so that error get caught early
    if settings.compress_rom == 'None' and not settings.create_spoiler:
        raise Exception(
            '`No Output` must have spoiler enabled to produce anything.')

    if settings.compress_rom != 'None':
        window.update_status('Loading ROM')
        rom = LocalRom(settings)

    if not settings.world_count:
        settings.world_count = 1
    if settings.world_count < 1 or settings.world_count > 255:
        raise Exception('World Count must be between 1 and 255')
    if settings.player_num > settings.world_count or settings.player_num < 1:
        if settings.compress_rom not in ['None', 'Patch']:
            raise Exception('Player Num must be between 1 and %d' %
                            settings.world_count)
        else:
            settings.player_num = 1

    settings.remove_disabled()

    logger.info('OoT Randomizer Version %s  -  Seed: %s\n\n', __version__,
                settings.seed)
    random.seed(settings.numeric_seed)
    for i in range(0, settings.world_count):
        worlds.append(World(settings))

    window.update_status('Creating the Worlds')
    for id, world in enumerate(worlds):
        world.id = id
        logger.info('Generating World %d.' % id)

        window.update_progress(0 + 1 * (id + 1) / settings.world_count)
        logger.info('Creating Overworld')

        # Determine MQ Dungeons
        td_count = len(world.dungeon_mq)
        if world.mq_dungeons_random:
            world.mq_dungeons = random.randint(0, td_count)
        mqd_count = world.mq_dungeons
        mqd_picks = random.sample(list(world.dungeon_mq), mqd_count)
        for dung in mqd_picks:
            world.dungeon_mq[dung] = True

        overworld_data = os.path.join(data_path('World'), 'Overworld.json')
        world.load_regions_from_json(overworld_data)

        create_dungeons(world)

        world.initialize_entrances()

        if settings.shopsanity != 'off':
            world.random_shop_prices()

        window.update_progress(0 + 4 * (id + 1) / settings.world_count)
        logger.info('Calculating Access Rules.')
        set_rules(world)

        window.update_progress(0 + 5 * (id + 1) / settings.world_count)
        logger.info('Generating Item Pool.')
        generate_itempool(world)

    window.update_status('Placing the Items')
    logger.info('Fill the world.')
    distribute_items_restrictive(window, worlds)
    window.update_progress(35)

    spoiler = Spoiler(worlds)
    cosmetics_log = None
    if settings.create_spoiler:
        window.update_status('Calculating Spoiler Data')
        logger.info('Calculating playthrough.')
        create_playthrough(spoiler)
        window.update_progress(50)
    if settings.create_spoiler or settings.hints != 'none':
        window.update_status('Calculating Hint Data')
        State.update_required_items(spoiler)
        for world in worlds:
            world.update_useless_areas(spoiler)
            buildGossipHints(spoiler, world)
        window.update_progress(55)
    spoiler.build_file_hash()

    logger.info('Patching ROM.')

    settings_string_hash = hashlib.sha1(
        settings.settings_string.encode('utf-8')).hexdigest().upper()[:5]
    if settings.world_count > 1:
        outfilebase = 'OoT_%s_%s_W%d' % (settings_string_hash, settings.seed,
                                         settings.world_count)
    else:
        outfilebase = 'OoT_%s_%s' % (settings_string_hash, settings.seed)

    output_dir = default_output_path(settings.output_dir)

    if settings.compress_rom == 'Patch':
        rng_state = random.getstate()
        file_list = []
        window.update_progress(65)
        for world in worlds:
            if settings.world_count > 1:
                window.update_status('Patching ROM: Player %d' %
                                     (world.id + 1))
                patchfilename = '%sP%d.zpf' % (outfilebase, world.id + 1)
            else:
                window.update_status('Patching ROM')
                patchfilename = '%s.zpf' % outfilebase

            random.setstate(rng_state)
            if not settings.cosmetics_only:
                patch_rom(spoiler, world, rom)
            patch_cosmetics(settings, rom)
            window.update_progress(65 + 20 *
                                   (world.id + 1) / settings.world_count)

            window.update_status('Creating Patch File')
            output_path = os.path.join(output_dir, patchfilename)
            file_list.append(patchfilename)
            create_patch_file(rom, output_path)
            rom.restore()
            window.update_progress(65 + 30 *
                                   (world.id + 1) / settings.world_count)

        if settings.world_count > 1:
            window.update_status('Creating Patch Archive')
            output_path = os.path.join(output_dir, '%s.zpfz' % outfilebase)
            with zipfile.ZipFile(output_path, mode="w") as patch_archive:
                for file in file_list:
                    file_path = os.path.join(output_dir, file)
                    patch_archive.write(file_path,
                                        file,
                                        compress_type=zipfile.ZIP_DEFLATED)
            for file in file_list:
                os.remove(os.path.join(output_dir, file))
        window.update_progress(95)

    elif settings.compress_rom != 'None':
        window.update_status('Patching ROM')
        patch_rom(spoiler, worlds[settings.player_num - 1], rom)
        cosmetics_log = patch_cosmetics(settings, rom)
        window.update_progress(65)

        window.update_status('Saving Uncompressed ROM')
        if settings.world_count > 1:
            filename = "%sP%d.z64" % (outfilebase, settings.player_num)
        else:
            filename = '%s.z64' % outfilebase
        output_path = os.path.join(output_dir, filename)
        rom.write_to_file(output_path)
        if settings.compress_rom == 'True':
            window.update_status('Compressing ROM')
            logger.info('Compressing ROM.')

            if is_bundled():
                compressor_path = "."
            else:
                compressor_path = "Compress"

            if platform.system() == 'Windows':
                if 8 * struct.calcsize("P") == 64:
                    compressor_path += "\\Compress.exe"
                else:
                    compressor_path += "\\Compress32.exe"
            elif platform.system() == 'Linux':
                if platform.uname()[4] == 'aarch64' or platform.uname(
                )[4] == 'arm64':
                    compressor_path += "/Compress_ARM64"
                else:
                    compressor_path += "/Compress"
            elif platform.system() == 'Darwin':
                compressor_path += "/Compress.out"
            else:
                compressor_path = ""
                logger.info('OS not supported for compression')

            if compressor_path != "":
                run_process(window, logger, [
                    compressor_path, output_path,
                    output_path[:output_path.rfind('.')] + '-comp.z64'
                ])
            os.remove(output_path)
        window.update_progress(95)

    for world in worlds:
        for setting in world.settings.__dict__:
            world.settings.__dict__[setting] = world.__dict__[setting]

    if settings.create_spoiler:
        window.update_status('Creating Spoiler Log')
        spoiler.to_file(
            os.path.join(output_dir, '%s_Spoiler.txt' % outfilebase))
    else:
        window.update_status('Creating Settings Log')
        spoiler.to_file(
            os.path.join(output_dir, '%s_Settings.txt' % outfilebase))

    if settings.create_cosmetics_log and cosmetics_log:
        window.update_status('Creating Cosmetics Log')
        if settings.world_count > 1:
            filename = "%sP%d_Cosmetics.txt" % (outfilebase,
                                                settings.player_num)
        else:
            filename = '%s_Cosmetics.txt' % outfilebase
        cosmetics_log.to_file(os.path.join(output_dir, filename))

    window.update_progress(100)
    window.update_status('Success: Rom patched successfully')
    logger.info('Done. Enjoy.')
    logger.debug('Total Time: %s', time.clock() - start)

    return worlds[settings.player_num - 1]
Exemple #37
0
def save_checkpoint(iteration,
                    model,
                    optimizer,
                    lr_scheduler,
                    args,
                    tag=None,
                    barrier=True,
                    only_changed_parameters=False,
                    no_deepspeed=False,
                    no_save_optim=False):
    """Save a model checkpoint."""
    if tag is None:
        tag = str(iteration)
    if args.deepspeed and not no_deepspeed:
        save_ds_checkpoint(iteration, model, lr_scheduler, args, tag=tag)
    else:
        # Only rank zer0 of the data parallel writes to the disk.

        if mpu.get_data_parallel_rank() == 0:
            checkpoint_name = get_checkpoint_name(args.save, tag)
            print(
                'global rank {} is saving checkpoint at iteration {:7d} to {}'.
                format(torch.distributed.get_rank(), iteration,
                       checkpoint_name))
            sd = {'iteration': iteration}
            if args.deepspeed:
                model = model.module
            state_dict = model.state_dict()
            if only_changed_parameters:
                requires_grad_dict = {}
                for name, parameter in model.named_parameters():
                    requires_grad_dict[name] = parameter.requires_grad
                state_dict = {
                    key: value
                    for key, value in state_dict.items()
                    if requires_grad_dict[key]
                }
            sd['module'] = state_dict

            # Optimizer stuff.
            if not args.no_save_optim and not no_save_optim:
                if optimizer is not None:
                    sd['optimizer'] = optimizer.state_dict()
                if lr_scheduler is not None:
                    sd['lr_scheduler'] = lr_scheduler.state_dict()

            # rng states.
            if not args.no_save_rng:
                sd['random_rng_state'] = random.getstate()
                sd['np_rng_state'] = np.random.get_state()
                sd['torch_rng_state'] = torch.get_rng_state()
                sd['cuda_rng_state'] = torch.cuda.get_rng_state()
                sd['rng_tracker_states'] = mpu.get_cuda_rng_tracker(
                ).get_states()

            ensure_directory_exists(checkpoint_name)
            torch.save(sd, checkpoint_name)
            print('  successfully saved {}'.format(checkpoint_name))

    # Wait so everyone is done (necessary)
    if barrier:
        torch.distributed.barrier()
    # And update the latest iteration
    if torch.distributed.get_rank() == 0:
        tracker_filename = get_checkpoint_tracker_filename(args.save)
        with open(tracker_filename, 'w') as f:
            f.write(tag)
Exemple #38
0
def qBezier((x0,y0), (x1,y1), (x2,y2), n = 8):
    """Quadratic bezier curve"""
    ts = [float(j) / n for j in range(n+1)]
    cs = [((1-t)**2, 2*t*(1-t), t**2) for t in ts]
    return [(a*x0+b*x1+c*x2, a*y0+b*y1+c*y2) for a,b,c in cs]    

def mutatecolor((r, g, b), f = 1, d = 10):
    def mutate(x):
        return min(max(x + random.randint(-d, d), 0), 255)
    return mutate(r*f), mutate(g*f), mutate(b*f)


def drawblobs(surf, color, (x0, y0), (x1, y1), width = None, r0 = None, s0 = 0):
    """A set of random circles within a rectangle going between the two
    specified endpoints. Simulates blobbiness."""
    rstate = random.getstate()
    seed = x0, y0, x1, y1, width, r0, s0
    random.seed(seed)
    dx, dy = x1 - x0, y1 - y0
    d = math.sqrt(dx ** 2 + dy ** 2)
    if width is None: width = d
    if r0 is None: r0 = width / 8
    circs = []
    ncirc = int(4 * width * d / r0 ** 2)
    for j in range(ncirc):
        r = int(random.uniform(r0, 2*r0))
        z = random.uniform(-width/2, width/2)
        q = random.uniform(-width/2, width/2)
        if math.sqrt(z**2 + q**2) + r > width/2: continue
        p = random.uniform(0, d)
        shade = mutatecolor(color, 1 - abs(q / width), 0)
Exemple #39
0
        output += u'\U0001F47D' + " Alien Victory " + u'\U0001F47D' + "\n\nAll Time Stats\n" + u'\U0001F47D' + " Deaths: "
        output += str(stats["alien_deaths"]) + " + " + str(alienDeaths) + "\n" + u'\U0001F916' + " Deaths: "
        output += str(stats["robot_deaths"]) + " + " + str(robotDeaths)
        stats["alien_deaths"] += alienDeaths
        stats["robot_deaths"] += robotDeaths
        world["reset"] = True

    elif robotVictory:
        output += u'\U0001F916' + " Robot Victory " + u'\U0001F916' + "\n\nAll Time Stats\n" + u'\U0001F47D' + " Deaths: "
        output += str(stats["alien_deaths"]) + " + " + str(alienDeaths) + "\n" + u'\U0001F916' + " Deaths: "
        output += str(stats["robot_deaths"]) + " + " + str(robotDeaths)
        stats["alien_deaths"] += alienDeaths
        stats["robot_deaths"] += robotDeaths
        world["reset"] = True

    else:
        output += stringifyWorld(world["map"])

    world["rand_state"] = random.getstate()
    worldFile.seek(0)
    worldFile.truncate()
    json.dump(world, worldFile)

    twitter.post(output)

with open("config.json", "w") as configFile:
    json.dump(config, configFile)

with open("stats.json", "w") as statsFile:
    json.dump(stats, statsFile)
 def flip(self, *args):
     random.setstate(self.state)
     bit = super(SeededCoin, self).flip()
     self.state = random.getstate()
     return bit
 def reseed(self, seed):
     random.seed(seed)
     self.state = random.getstate()
 def __init__(self, seed):
     random.seed(seed)
     self.state = random.getstate()
    with open('data/5.3.3-state.dat', 'rb') as f:
        state = pickle.load(f)
    random.setstate(state)
else:
    # Use a known start state
    print "No '5.3.3-state.dat', seeding"
    random.seed(1)

# Produce random values
for i in xrange(3):
    print '%04.3f' % random.random(),
print '\n'
 
# Save state for next time
with open('data/5.3.3-state.dat', 'wb') as f:
    pickle.dump(random.getstate(), f)

# Produce more random values
print "After saving state:"
for i in xrange(3):
    print '%04.3f' % random.random(),
print '\n'

# Random integers
print '[1, 100]:',
for i in xrange(3):
    print random.randint(1,100),

print '\n[-5 ,5]:',
for i in xrange(3):
    print random.randint(-5, 5),
Exemple #44
0
# random.setstate(state)

# The setstate() function restores the internal state of the generator to the
# state object. i.e. it applies the same state again.
# This state object can be obtained by calling the getstate function.

# If you get a previous state and restore it, then you can reproduce the same random
# data again and again.  Remember you cannot use a different random function,
# nor you can change parameters value. By doing this, you are altering the state

number_list = [3, 6, 9, 12, 15, 18, 21, 24, 27, 30]

print("First Sample is ", random.sample(number_list, k=5))

state = random.getstate()  # store this current state in state object
print("Second Sample is ", random.sample(number_list, k=5))

random.setstate(state)  # restore state now using setstate
print("Third Sample is ", random.sample(number_list, k=5))
# Now it will print the same second sample list

random.setstate(state)  # restore state now
print("Fourth Sample is ", random.sample(number_list, k=5))
#again it will print the same second sample list again

#######=========================================================

# Numpy.random

# Generate a random n-dimensional array of float numbers
Exemple #45
0
def not_so_random():
    old_state = random.getstate()
    random.seed(42)
    yield
    random.setstate(old_state)
Exemple #46
0
"""
The following script does BEC quantization using regular and
irregular codes of various sizes.  
"""

import random
import pycodes
from pycodes.utils import CodeMaker
from pycodes.utils.bec_quant_perf import *

seed = '(unknown)'
seed = (1, (12597, 22473, 295), None)
random.setstate(seed)
seed = random.getstate()
print 'Using seed', seed

N=[120,1200, 12000, 120000]

col_w = [3]*len(N)
row_w = [6]*len(N)

leftDegrees = {2:0.33241, 3:.24632, 4:.11014, 6:0.31112}
rightDegrees = {6:.76611, 7:.23389}

# the following degree distribution is from Luby, Mitzenmacher et al. ISIT '98
# it doesn't seem to work...
#leftDegrees = {3:.44506, 5:.26704, 9:0.14835, 17:.07854, 33:0.04046,65:0.02055}
#rightDegrees = {7:.35282,8:.29548,19:.10225, 20:0.18321,84:0.04179,85:0.02445}

K=map(lambda n,c,r: n*c/r,N,col_w,row_w)
Exemple #47
0
 def __init__(self, random_state=None):
     self._random_state = random_state
     if self._random_state is None:
         self._random_state = random.getstate()
def test_l33t():
    state = random.getstate()
    random.seed(1)
    assert l33t('Money') == 'moNeY{'
    assert l33t('Dollars') == 'D0ll4r5`'
    random.setstate(state)
Exemple #49
0
    def _checkpoint(self, training_state: _TrainingState, output_folder: str,
                    train_iter: data_io.ParallelBucketSentenceIter):
        """
        Saves checkpoint. Note that the parameters are saved in _save_params.
        """
        # Create temporary directory for storing the state of the optimization process
        training_state_dirname = os.path.join(output_folder,
                                              C.TRAINING_STATE_TEMP_DIRNAME)
        if not os.path.exists(training_state_dirname):
            os.mkdir(training_state_dirname)
        # Link current parameter file
        params_base_fname = C.PARAMS_NAME % training_state.checkpoint
        os.symlink(
            os.path.join("..", params_base_fname),
            os.path.join(training_state_dirname, C.TRAINING_STATE_PARAMS_NAME))

        # Optimizer state (from mxnet)
        opt_state_fname = os.path.join(training_state_dirname,
                                       C.MODULE_OPT_STATE_NAME)
        if self.bucketing:
            # This is a bit hacky, as BucketingModule does not provide a
            # save_optimizer_states call. We take the current active module and
            # save its state. This should work, as the individual modules in
            # BucketingModule share the same optimizer through
            # borrow_optimizer.
            self.module._curr_module.save_optimizer_states(opt_state_fname)
        else:
            self.module.save_optimizer_states(opt_state_fname)

        # State of the bucket iterator
        train_iter.save_state(
            os.path.join(training_state_dirname, C.BUCKET_ITER_STATE_NAME))

        # RNG states: python's random and np.random provide functions for
        # storing the state, mxnet does not, but inside our code mxnet's RNG is
        # not used AFAIK
        with open(os.path.join(training_state_dirname, C.RNG_STATE_NAME),
                  "wb") as fp:
            pickle.dump(random.getstate(), fp)
            pickle.dump(np.random.get_state(),
                        fp)  # Yes, one uses _, the other does not

        # Monitor state, in order to get the full information about the metrics
        self.training_monitor.save_state(
            os.path.join(training_state_dirname, C.MONITOR_STATE_NAME))

        # Our own state
        self.save_state(
            training_state,
            os.path.join(training_state_dirname, C.TRAINING_STATE_NAME))

        # The lr scheduler
        with open(os.path.join(training_state_dirname, C.SCHEDULER_STATE_NAME),
                  "wb") as fp:
            pickle.dump(self.lr_scheduler, fp)

        # We are now finished with writing. Rename the temporary directory to
        # the actual directory
        final_training_state_dirname = os.path.join(output_folder,
                                                    C.TRAINING_STATE_DIRNAME)

        # First we rename the existing directory to minimize the risk of state
        # loss if the process is aborted during deletion (which will be slower
        # than directory renaming)
        delete_training_state_dirname = os.path.join(
            output_folder, C.TRAINING_STATE_TEMP_DELETENAME)
        if os.path.exists(final_training_state_dirname):
            os.rename(final_training_state_dirname,
                      delete_training_state_dirname)
        os.rename(training_state_dirname, final_training_state_dirname)
        if os.path.exists(delete_training_state_dirname):
            shutil.rmtree(delete_training_state_dirname)
Exemple #50
0
def run_config(suite_name, config, basedir, output_dir, random_seed,
               report_all, generate_only, args, testConfigKit):
    for key in config.iterkeys():
        print "in run_config key = '%s', config[key] = '%s'" % (key,
                                                                config[key])
        if not os.path.isabs(config[key]):
            config[key] = os.path.abspath(os.path.join(basedir, config[key]))
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    statements_path = os.path.abspath(
        os.path.join(output_dir, "statements.data"))
    hsql_path = os.path.abspath(os.path.join(output_dir, "hsql.data"))
    jni_path = os.path.abspath(os.path.join(output_dir, "jni.data"))
    template = config["template"]

    global normalize
    if "normalizer" in config:
        normalize = imp.load_source("normalizer",
                                    config["normalizer"]).normalize
        # print "DEBUG: using normalizer ", config["normalizer"], " for ", template
    else:
        normalize = lambda x, y: x
        # print "DEBUG: using no normalizer for ", template

    command = " ".join(args[2:])
    command += " schema=" + os.path.basename(config['ddl'])

    random_state = random.getstate()
    if "template-jni" in config:
        template = config["template-jni"]
    generator = SQLGenerator(config["schema"], template, True)
    counter = 0

    statements_file = open(statements_path, "wb")
    for i in generator.generate():
        cPickle.dump({"id": counter, "SQL": i}, statements_file)
        counter += 1
    statements_file.close()

    if (generate_only):
        # Claim success without running servers.
        return [0, 0]

    if run_once("jni", command, statements_path, jni_path, testConfigKit) != 0:
        print >> sys.stderr, "Test with the JNI backend had errors."
        print >> sys.stderr, "  jni_path: %s" % (jni_path)
        sys.stderr.flush()
        exit(1)

    random.seed(random_seed)
    random.setstate(random_state)
    # To get around the timestamp issue. Volt and HSQLDB use different units
    # for timestamp (microsec vs. millisec), so we have to use different
    # template file for regression test, since all the statements are not
    # generated in this case.
    if "template-hsqldb" in config:
        template = config["template-hsqldb"]
    generator = SQLGenerator(config["schema"], template, False)
    counter = 0

    statements_file = open(statements_path, "wb")
    for i in generator.generate():
        cPickle.dump({"id": counter, "SQL": i}, statements_file)
        counter += 1
    statements_file.close()

    if run_once("hsqldb", command, statements_path, hsql_path,
                testConfigKit) != 0:
        print >> sys.stderr, "Test with the HSQLDB backend had errors."
        exit(1)

    global compare_results
    compare_results = imp.load_source("normalizer",
                                      config["normalizer"]).compare_results
    success = compare_results(suite_name, random_seed, statements_path,
                              hsql_path, jni_path, output_dir, report_all)
    return success
 def shuffle(self, x, y):
     import random
     start_state = random.getstate()
     random.shuffle(x)
     random.setstate(start_state)
     random.shuffle(y)
def test_ransom():
   state = random.getstate()
   random.seed(1)
   assert ransom('Money') == 'moNeY'
   assert ransom('Dollars') == 'DOLlaRs'
   random.setstate(state)
Exemple #53
0
        if firstrun:
            for ii in range(len(population[0])):
                population[0][ii] = para_first2[ii]
            population[0][-1] = 0.

    effmax = np.zeros(shape=(ngen + 1, 1)) * np.NaN
    effmin = np.zeros(shape=(ngen + 1, 1)) * np.NaN
    effavg = np.zeros(shape=(ngen + 1, 1)) * np.NaN
    effstd = np.zeros(shape=(ngen + 1, 1)) * np.NaN
    if startlater == False:
        halloffame = tools.ParetoFront()

        # saving population
        cp = dict(population=population,
                  generation=gen,
                  rndstate=random.getstate())
        with open(checkpoint, "wb") as cp_file:
            pickle.dump(cp, cp_file)
        cp_file.close()

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in population if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        halloffame.update(population)

        # Loop through the different objective functions and calculate some statistics
        # from the Pareto optimal population
Exemple #54
0
 def setUp(self):
     self.state = random.getstate()
Exemple #55
0
 def __getstate__(self):
     state = self.__dict__.copy()
     state['_random_state'] = random.getstate()
     state['_np_random_state'] = np.random.get_state()
     return state
Exemple #56
0
    data[i] = v

data = [0] * 624 + [untemper(v) for v in data[:624]]

for i in range(623, -1, -1):
    v3 = data[i + 397]
    v4 = data[i + 624]

    v = v3 ^ v4
    if v & 0x80000000:
        y = ((v ^ 0x9908b0df) << 1) | 1
    else:
        y = v << 1

    data[i] |= y & 0x80000000
    data[i + 1] |= y & 0x7fffffff


def randomdpoly(d1, d2):
    result = d1 * [1] + d2 * [-1] + (263 - d1 - d2) * [0]
    random.shuffle(result)
    return result


for i in range(623, -1, -1):
    state = (3, tuple(data[:624] + [i]), None)
    random.setstate(state)
    result = randomdpoly(18, 18)
    if random.getstate()[1][-1] == 624:
        print(result)
Exemple #57
0
                        default="main",
                        help="Prefix for experiment related files.")

    parser.add_argument(
        "--skip-train",
        action='store_true',
        help="Skip Training to reach the evaluation code. Used for debugging")

    return parser.parse_args()


args = load_args()

# Get rand state to be used for splitting the training file
random.seed(args.train_val_split_randseed)
train_val_split_randstate = random.getstate()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

# Directory Name
args.exp_name = f"{args.exp_id}" \
                f"--em-dim-{args.embedding_dim}" \
                f"--h-dim-{args.hidden_dim}"

writer = SummaryWriter(os.path.join(args.log_dir, args.exp_name))
# [TODO] Figureout why this is used
#hparams_to_track = ["lr", "embedding_dim", "hidden_dim", "max_decoding_len", "exp_id", "data_dir"]
#hparams = {k:v for k,v in vars(args).items() if k in hparams_to_track}
#writer.add_hparams(hparam_dict=hparams)


class WMT14Dataset(Dataset):
Exemple #58
0
 def save_checkpoint(self, directory, gen):
     random_state = random.getstate()
     numpy_random_state = np.random.get_state()
     data = [self, random_state, numpy_random_state]
     with open('{0}/pickledPops/Gen_{1}.pickle'.format(directory, gen), 'wb') as handle:
         cPickle.dump(data, handle, protocol=cPickle.HIGHEST_PROTOCOL)
Exemple #59
0
def save_params(iter, params):
    with open("saved_params_%d.npy" % iter, "w") as f:
        pickle.dump(params, f)
        pickle.dump(random.getstate(), f)
Exemple #60
0
def save_params(iter, params):
    params_file = "saved_params_%d.npy" % iter
    np.save(params_file, params)
    with open("saved_state_%d.pickle" % iter, "wb") as f:
        pickle.dump(random.getstate(), f)