Example #1
0
	def agent_rng(self):
		# Swap to the agent RNG state if necessary, then return random
		if not self.rng_for_agent:
			self.internal_state = random.getstate()
			random.setstate(self.agent_state)
			self.rng_for_agent = True
		return random
Example #2
0
    def test_choice_distribution(self):
        from faker.utils.distribution import choice_distribution

        a = ('a', 'b', 'c', 'd')
        p = (0.5, 0.2, 0.2, 0.1)

        sample = choice_distribution(a, p)
        self.assertTrue(sample in a)

        with open(os.path.join(TEST_DIR, 'random_state.json'), 'r') as fh:
            random_state = json.load(fh)
        random_state[1] = tuple(random_state[1])

        random.setstate(random_state)
        samples = [choice_distribution(a, p) for i in range(100)]
        a_pop = len([i for i in samples if i == 'a'])
        b_pop = len([i for i in samples if i == 'b'])
        c_pop = len([i for i in samples if i == 'c'])
        d_pop = len([i for i in samples if i == 'd'])

        boundaries = []
        tolerance = 5
        for probability in p:
            boundaries.append([100 * probability + tolerance,  100 * probability - tolerance])

        self.assertTrue(boundaries[0][0] > a_pop > boundaries[0][1])
        self.assertTrue(boundaries[1][0] > b_pop > boundaries[1][1])
        self.assertTrue(boundaries[2][0] > c_pop > boundaries[2][1])
        self.assertTrue(boundaries[3][0] > d_pop > boundaries[3][1])
Example #3
0
def gradcheck(f, x):
    """ 
    Gradient check for a function f 
    - f should be a function that takes a single argument and outputs the cost
    - x is the point (numpy array) to check the gradient at
    """ 

    rndstate = random.getstate()
    random.setstate(rndstate)  
    fx = f(x) # Evaluate function value at original point
    h = 1e-4

    eps = 1e-5


    numgrad = np.zeros_like(x)
    # iterate over all indexes in x
    it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
    while not it.finished:

        # evaluate function at x+h
        ix = it.multi_index
        oldval = x[ix]
        x[ix] = oldval + h # increment by h
        fxph = f(x) # evalute f(x + h)
        x[ix] = oldval - h
        fxmh = f(x) # evaluate f(x - h)
        x[ix] = oldval # restore
        numgrad[ix] = (fxph - fxmh) / (2 * h) # the slope
        it.iternext() # step to next dimension

    return numgrad
Example #4
0
	def internal_rng(self):
		# Swap to the internal RNG state if necessary, then return random
		if self.rng_for_agent:
			self.agent_state = random.getstate()
			random.setstate(self.internal_state)
			self.rng_for_agent = False
		return random
Example #5
0
def lorem(randseed=None, count=1, method=None):
    u"""
    Creates Lorem Ipsum text.

    Usage format:

        {% lorem [randseed] [count] [method] %}

    ``randseed`` is any hashable object used to initialize the random numbers generator.
    If ``randseed`` is not given the common "Lorem ipsum dolor sit..." text is used.

    ``count`` is a number of paragraphs or sentences to generate (default is 1).

    ``method`` is either ``p`` for HTML paragraphs enclosed in ``<p>`` tags, or ``b`` for
    plain-text paragraph blocks (default is ``b``).

    Notice: This filter is rewrited ``lorem`` filter from ``webdesign`` modul from default Django
    package ``django.contrib.webdesign``. The original ``lorem`` filter does not give stable random
    text, thus its generated paragraphs change on every page refresh. We stabilize the generated
    text by setting a fixed randseed before generating the paragraph.
    """

    state = random.getstate()
    random.seed(randseed)
    res = paragraphs(count, common=(randseed is None))
    random.setstate(state)

    if method == u'p':
        res = [u'<p>{}</p>'.format(p) for p in res]
    return u'\n'.join(res)
Example #6
0
    def start_again(self):
        """Restart the game, preserving state.  Save a new virtual keyboard,
           and add the most recent set of commands to the army of ghost ships.
           """
        # Save a new virtual keyboard.
        self.keyboard.history += [(self.keyboard.time, sf.Key.SPACE, 'u')]
        self.virtual_keyboards += [VirtualKeyboard(self.keyboard)]
        
        # Switch over the last bot to running on the virtual keyboard.
        self.players[-1].reset(self.virtual_keyboards[-1],
                               self.view_size / 2.0)
        
        self.players += [Ship(self.keyboard, self.view_size / 2.0)]
        
        # Reset all of the keyboards, real and virtual.
        for vkb in self.virtual_keyboards:
            vkb.reset()
        self.keyboard.reset()
        
        # Reset player locations
        for player in self.players:
            player.reset(None, self.view_size / 2.0)
        self.players[-1].is_clone = False

        self.bullets = []
        random.setstate(self.rand_state)
        self.asteroids = [Asteroid(self, clear_zone = self.view_size / 2.0,
                                   seed = random.random())
                          for i in range(self.num_asteroids)]
    def __makeResistanceToon(self):
        if self.resistanceToon:
            return
        npc = Toon.Toon()
        npc.setName(TTLocalizer.ResistanceToonName)
        npc.setPickable(0)
        npc.setPlayerType(NametagGroup.CCNonPlayer)
        dna = ToonDNA.ToonDNA()
        dna.newToonRandom(11237, 'f', 1)
        dna.head = 'pls'
        npc.setDNAString(dna.makeNetString())
        npc.animFSM.request('neutral')
        self.resistanceToon = npc
        self.resistanceToon.setPosHpr(*ToontownGlobals.CashbotRTBattleOneStartPosHpr)
        state = random.getstate()
        random.seed(self.doId)
        self.resistanceToon.suitType = SuitDNA.getRandomSuitByDept('m')
        random.setstate(state)
        self.fakeGoons = []
        for i in range(self.numFakeGoons):
            goon = DistributedCashbotBossGoon.DistributedCashbotBossGoon(base.cr)
            goon.doId = -1 - i
            goon.setBossCogId(self.doId)
            goon.generate()
            goon.announceGenerate()
            self.fakeGoons.append(goon)

        self.__hideFakeGoons()
def test_seeds_off_random():
    s = settings(max_shrinks=0, database=None)
    r = random.getstate()
    x = find(st.integers(), lambda x: True, settings=s)
    random.setstate(r)
    y = find(st.integers(), lambda x: True, settings=s)
    assert x == y
Example #9
0
def sgd(f, x0, step, iterations, useSaved = False, PRINT_EVERY=10):
  # possibly more arguments for postprocessing, save trained variables,
  # print status lines

  # Anneal learning rate every several iterations
  ANNEAL_EVERY = 20000

  if useSaved:
    start_iter, oldx, state = load_saved_params()
    if start_iter > 0:
      x0 = oldx
      step *= 0.5 ** (start_iter / ANNEAL_EVERY)

    if state:
      random.setstate(state)
  else:
    start_iter = 0

  x = x0

  for iter in xrange(1, iterations + 1):
    cost, grad = f(x)
    x -= step * grad
    if iter % PRINT_EVERY == 0:
      print "iter %d: %f" % (iter, cost)

    if iter % SAVE_PARAMS_EVERY == 0 and useSaved:
      save_params(iter, x)

    if iter % ANNEAL_EVERY == 0:
      step *= 0.5
  return x
Example #10
0
    def shuffle(self, i, seed=None):

        if seed is not None:
            rand_state = random.getstate()
            random.seed(seed)

        move_list = []
        last_face = None

        for _ in range(i):

            face = last_face
            while face == last_face:
                face = Face.random()

            turn_type = TurnType.random()

            move_list.append((face, turn_type))

            last_face = face

        shuffle_algorithm = Algorithm(reversed(move_list))
        self.apply_algorithm(shuffle_algorithm)

        if seed is not None:
            random.setstate(rand_state)

        return shuffle_algorithm
Example #11
0
def simplified_data(num_train, num_dev, num_test):
    rndstate = random.getstate()
    random.seed(0)
    trees = loadTrees('train') + loadTrees('dev') + loadTrees('test')
    
    #filter extreme trees
    pos_trees = [t for t in trees if t.root.label==4]
    neg_trees = [t for t in trees if t.root.label==0]

    #binarize labels
    binarize_labels(pos_trees)
    binarize_labels(neg_trees)
    
    #split into train, dev, test
    print len(pos_trees), len(neg_trees)
    pos_trees = sorted(pos_trees, key=lambda t: len(t.get_words()))
    neg_trees = sorted(neg_trees, key=lambda t: len(t.get_words()))
    num_train/=2
    num_dev/=2
    num_test/=2
    train = pos_trees[:num_train] + neg_trees[:num_train]
    dev = pos_trees[num_train : num_train+num_dev] + neg_trees[num_train : num_train+num_dev]
    test = pos_trees[num_train+num_dev : num_train+num_dev+num_test] + neg_trees[num_train+num_dev : num_train+num_dev+num_test]
    random.shuffle(train)
    random.shuffle(dev)
    random.shuffle(test)
    random.setstate(rndstate)


    return train, dev, test
def determine_action(state, dict, random_state, score, max_score):
	#print state
	random.setstate(random_state[1])
	
	state_click = dict.get((state, True), 0)
	state_nothing = dict.get((state, False), 0)
	print state
	print state_click
	print state_nothing
	
	
	value = random.randint(1,10)
	random_state[1] = random.getstate()
	if value < 3 and score >= max_score:
		value = random.randint(0,1)
		random_state[1] = random.getstate()
		if value == 1:
			print "RNG VALUE OF 1"
			return True
		else:
			print "RNG VALUE OF 0"
			return False
	elif state_click > state_nothing:
		print "state_click greater than state_nothing"
		return True
	print "state_nothing greater than state_click"
	return False
Example #13
0
    def load(self, filename):
        data = Data.load(filename)

        random.setstate(data['random'])
        self.tiles = data['tiles']
        self.initindexes()
        self.popcache = {}
Example #14
0
def gradcheck_naive(f, x):
    """ 
    Gradient check for a function f 
    - f should be a function that takes a single argument and outputs the cost and its gradients
    - x is the point (numpy array) to check the gradient at
    """ 

    rndstate = random.getstate()
    random.setstate(rndstate)  
    fx, grad = f(x) # Evaluate function value at original point
    h = 1e-4

    # Iterate over all indexes in x
    it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
    while not it.finished:
        ix = it.multi_index

        ### try modifying x[ix] with h defined above to compute numerical gradients
        ### make sure you call random.setstate(rndstate) before calling f(x) each time, this will make it 
        ### possible to test cost functions with built in randomness later
        ### YOUR CODE HERE:
        raise NotImplementedError
        ### END YOUR CODE

        # Compare gradients
        reldiff = abs(numgrad - grad[ix]) / max(1, abs(numgrad), abs(grad[ix]))
        if reldiff > 1e-5:
            print "Gradient check failed."
            print "First gradient error found at index %s" % str(ix)
            print "Your gradient: %f \t Numerical gradient: %f" % (grad[ix], numgrad)
            return
    
        it.iternext() # Step to next dimension

    print "Gradient check passed!"
Example #15
0
def movement_phase(process):
    repeat = process.current_actions
    Action.set_process(process)
    rand_state = random.getstate()
    random.shuffle(repeat)
    random.setstate(rand_state)
    repeat.sort()
    sorted_moves = []
    repeat = [Movement(a, find_shortest_path(process.map, a.pos, a.dest)) for a in repeat]
    occupied, que = set(), []
    while len(repeat) != len(que):
        que, repeat = repeat, []
        for turn in que:
            if turn.path and turn.path[0] not in occupied:
                occupied.add(turn.path[0])
                sorted_moves.append(turn.action)
            else:
                repeat.append(turn)
    for turn in repeat:
        for node in turn.path:
            if node not in occupied:
                occupied.add(node)
                sorted_moves.append(Action(turn.action.pos, node.pos, turn.action.attack))
                break
        else:
            sorted_moves.append(Action(turn.action.pos, turn.action.pos, turn.action.attack))
    return sorted_moves
Example #16
0
def main(id, checkpoint_name=None):
    # random.seed(64)

    if checkpoint_name:
        # A file name has been given, then load the data from the file
        cp = pickle.load(open(checkpoint_name, "rb"))
        pop = cp["population"]
        start_gen = cp["generation"] + 1
        hof = cp["halloffame"]
        logbook = cp["logbook"]
        random.setstate(cp["rndstate"])
    else:
        pop = toolbox.population(n=Config.MU)
        start_gen = 0
        hof = tools.HallOfFame(1)
        logbook = tools.Logbook()
    
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)
    
    pop, log = alg.myEAMuCommaLambda(pop, start_gen, toolbox, Config.MU, Config.LAMBDA,
                                     cxpb=0.6, mutpb=0.2, ngen=Config.ngen,
                                     stats=stats, halloffame=hof, logbook=logbook, verbose=True,
                                     id=id)

    return pop, log, hof
Example #17
0
def optimize(p, k, distances, mode='clusters', seed=12345, granularity=1.):
    if k == 1:
        return [p]

    random_state = random.getstate()
    try:
        # we want the same output on every call on the same data, so we use
        # a fixed random seed at this point.
        random.seed(seed)

        clusterer = _Clusterer(
            len(p), tuple(range(len(p))), None, [], p, distances.distance)

        if isinstance(k, tuple) and len(k) == 2:
            criterion = k[0](**k[1])
            assert isinstance(criterion, SplitCriterion)
            clusters = clusterer.without_k(criterion)
        elif isinstance(k, int):
            clusters = [c.members for c in clusterer.with_k(k)]
        else:
            raise ValueError('illegal parameter k "%s"' % str(k))

        # sort clusters by order of their first element in the original list.
        clusters = sorted(clusters, key=lambda c: c[0])

        if mode == 'clusters':
            return list(map(lambda c: map(lambda i: p[i], c), clusters))
        elif mode == 'components':
            return _components(clusters, len(p))
        else:
            raise ValueError('illegal mode %s' % mode)
    finally:
        random.setstate(random_state)
Example #18
0
 def test_unique_filename_exact_match(self):
     with execution(None) as ex:
         st = random.getstate()
         f = touch(ex)
         random.setstate(st)
         g = touch(ex)
         self.assertNotEqual(f, g)
Example #19
0
 def test_state_determines_filename(self):
     with execution(None) as ex:
         st = random.getstate()
         f = unique_filename_in()
         random.setstate(st)
         g = unique_filename_in()
         self.assertEqual(f, g)
Example #20
0
 def reify(self, langs):
     timing = self._timing.routine('reifying language {}'.format(langs.index(self)))
     state = random.getstate()
     dictionary = reify(langs, langs.index(self), len(self._changes), set(self._concepts), timing, '')
     random.setstate(state)
     timing.done()
     return dictionary
Example #21
0
def sgd(f, x0, step, iterations, postprocessing = None, useSaved = False, PRINT_EVERY=10):
    """ Stochastic Gradient Descent """

    # Inputs:                                                         
    # - f: the function to optimize, it should take a single        
    #     argument and yield two outputs, a cost and the gradient  
    #     with respect to the arguments                            
    # - x0: the initial point to start SGD from                     
    # - step: the step size for SGD                                 
    # - iterations: total iterations to run SGD for                 
    # - postprocessing: postprocessing function for the parameters  
    #     if necessary.    
    # - PRINT_EVERY: specifies every how many iterations to output  

    # Output:                                                         
    # - x: the parameter value after SGD finishes  
    
    # Anneal learning rate every several iterations
    ANNEAL_EVERY = 20000
    
    if useSaved:
        start_iter, oldx, state = load_saved_params()
        if start_iter > 0:
            x0 = oldx;
            step *= 0.5 ** (start_iter / ANNEAL_EVERY)
            
        if state:
            random.setstate(state)
    else:
        start_iter = 0
    
    x = x0
    
    if not postprocessing:
        postprocessing = lambda x: x
    
    expcost = None
    
    for iter in xrange(start_iter + 1, iterations + 1):

        cost = None
        cost, gradx = f(x)
        x += - step*gradx
        
        x = postprocessing(x)
        
        if iter % PRINT_EVERY == 0:
            if not expcost:
                expcost = cost
            else:
                expcost = .95 * expcost + .05 * cost
            print "iter %d: %f" % (iter, expcost)
        
        if iter % SAVE_PARAMS_EVERY == 0 and useSaved:
            save_params(iter, x)
            
        if iter % ANNEAL_EVERY == 0:
            step *= 0.5
    
    return x
Example #22
0
    def __call__(self, raw_bytes, avoid, pcreg=None):
        icache_flush = 1

        # If randomization is disabled, ensure that the seed
        # is always the same for the builder.
        state = random.getstate()
        if not context.randomize:
            random.seed(1)

        try:
            b = builder.builder()

            enc_data = b.enc_data_builder(raw_bytes)
            dec_loop = b.DecoderLoopBuilder(icache_flush)
            enc_dec_loop = b.encDecoderLoopBuilder(dec_loop)
            dec = b.DecoderBuilder(dec_loop, icache_flush)

            output, dec = b.buildInit(dec)

            output += dec
            output += enc_dec_loop
            output += enc_data
        finally:
            random.setstate(state)

        return output
Example #23
0
 def use_internal_state(self):
     """Use a specific RNG state."""
     old_state = random.getstate()
     random.setstate(self._random_state)
     yield
     self._random_state = random.getstate()
     random.setstate(old_state)
Example #24
0
def random_seed(seed):
    """Context manager to set random.seed() temporarily
    """
    state = random.getstate()
    random.seed(seed)
    yield
    random.setstate(state)
Example #25
0
    async def penis(self, ctx, *users: discord.Member):
        """Detects user's penis length

        This is 100% accurate.
        Enter multiple users for an accurate comparison!"""
        if not users:
            await self.bot.send_cmd_help(ctx)
            return

        dongs = {}
        msg = ""
        state = random.getstate()

        for user in users:
            random.seed(user.id)
            dongs[user] = "8{}D".format("=" * random.randint(0, 30))

        random.setstate(state)
        dongs = sorted(dongs.items(), key=lambda x: x[1])

        for user, dong in dongs:
            msg += "**{}'s size:**\n{}\n".format(user.display_name, dong)

        for page in pagify(msg):
            await self.bot.say(page)
Example #26
0
def _numpy_do_teardown():
    global _old_python_random_state
    global _old_numpy_random_state
    random.setstate(_old_python_random_state)
    numpy.random.set_state(_old_numpy_random_state)
    _old_python_random_state = None
    _old_numpy_random_state = None
def testAgainstRandom(p):

    state = random.getstate()

    score = 0

    #random.seed(0)

    for iter in range(100):
        population = []
        nIndividuals = 3
        for n in range(nIndividuals):

            newIndividual = {}
            for i in ['PE', 'LB', 'PK', 'OE', 'RD', 'YW', 'GN', 'DB', 'RR', 'UY',
                      'PE_up', 'LB_up', 'PK_up', 'OE_up', 'RD_up', 'YW_up', 'GN_up', 'DB_up', 'RR_up', 'UY_up']:
                #newIndividual[i] = random.random()
                #newIndividual[i] = 1.0

                if random.random() > .5:
                    newIndividual[i] = random.random()
                else:
                    newIndividual[i] = random.random()

            population.append(newIndividual)
        points = playGame(p, population[0], population[1], population[2])
        score += points[0]

    random.setstate(state)
    return score
Example #28
0
def randomOpening(size, seed):
    oldstate = random.getstate()
    random.seed(seed)
    r = random.randint(0, (size*size - 1))
    random.setstate(oldstate)
    move = str(chr(ord('a') + (r / size))) + str((r % size) + 1)
    return move
Example #29
0
def spatial_graph_variable_spatial_scale(cell_positions,
                                         spatial_scale=1.,
                                         connection_probability=connection_probability_vervaeke_2010,
                                         synaptic_weight=synaptic_weight_vervaeke_2010):
    state = random.getstate()
    g_2010 = spatial_graph_2010(cell_positions)
    weights_2010 = [e[2]['weight'] for e in g_2010.edges(data=True)]
    total_weight_2010 = sum(weights_2010)
    # reset RNG to make sure we will rescale strengths fairly
    random.setstate(state)

    # generate spatial network with 2010 rules but scaling all distances
    n_cells = len(cell_positions)
    edges = []
    for i, p in enumerate(cell_positions):
        for j, q in enumerate(cell_positions[i+1:]):
            d = distance(p, q) / spatial_scale
            if random.random() < connection_probability(d):
                edges.append((i, i+1+j, {'weight': synaptic_weight(d)}))

    # rescale weights to keep the same total value across the network
    weights = [e[2]['weight'] for e in edges]
    total_weight = sum(weights)
    for e in edges:
        e[2]['weight'] *= total_weight_2010 / total_weight

    # create graph object
    g = nx.Graph()
    g.add_nodes_from(range(n_cells))
    for node in g.nodes():
        g.node[node]['x'] = cell_positions[node][0]
        g.node[node]['y'] = cell_positions[node][1]
        g.node[node]['z'] = cell_positions[node][2]
    g.add_edges_from(edges)
    return g
Example #30
0
def get_random_condition(conn):
    # hits can be in one of three states:
        # jobs that are finished
        # jobs that are started but not finished
        # jobs that are never going to finish (user decided not to do it)
    # our count should be based on the first two so, lets stay anything finished or anything not finished that was started in the last 45 minutes should be counted
    starttime = datetime.datetime.now() + datetime.timedelta(minutes=-30)
    s = select([participantsdb.c.cond], and_(participantsdb.c.codeversion==CODE_VERSION, or_(participantsdb.c.endhit!=null, participantsdb.c.beginhit>starttime)), from_obj=[participantsdb])
    result = conn.execute(s)
    counts = [0]*NUMCONDS
    
    # Excluding less interesting conditions:
    counts[2] = 5000
    counts[4] = 5000
    for row in result:
        counts[row[0]]+=1
    
    # choose randomly from the ones that have the least in them (so will tend to fill in evenly)
    indicies = [i for i, x in enumerate(counts) if x == min(counts)]
    rstate = getstate()
    seed()
    if TESTINGPROBLEMSIX:
        #subj_cond = choice([5,11])
        subj_cond = 5
    else:
        subj_cond = choice(indicies)
    setstate(rstate)
    return subj_cond
Example #31
0
 def solve_again(self, new_sentences, **options):
     randstate = random.getstate()
     self.engine.start_again(new_sentences, **options)
     self.result = self.engine.get_result()
     random.setstate(randstate)
     return self.result
Example #32
0
def sgd(f,
        x0,
        step,
        iterations,
        postprocessing=None,
        use_saved=False,
        print_every=10):
    """ Stochastic Gradient Descent

    Implement the stochastic gradient descent method in this function.

    Arguments:
    f -- the function to optimize, it should take a single
         argument and yield two outputs, a loss and the gradient
         with respect to the arguments
    x0 -- the initial point to start SGD from
    step -- the step size for SGD
    iterations -- total iterations to run SGD for
    postprocessing -- postprocessing function for the parameters
                      if necessary. In the case of word2vec we will need to
                      normalize the word vectors to have unit length.
    print_every -- specifies how many iterations to output loss

    Return:
    x -- the parameter value after SGD finishes
    """
    # Anneal learning rate every several iterations
    anneal_every = 20000

    if use_saved:
        start_iter, oldx, state = load_saved_params()
        if start_iter > 0:
            x0 = oldx
            step *= 0.5**(start_iter / anneal_every)

        if state:
            random.setstate(state)
    else:
        start_iter = 0

    x = x0

    if not postprocessing:
        postprocessing = lambda x: x

    exploss = None

    for iter_ in range(start_iter + 1, iterations + 1):
        # You might want to print the progress every few iterations.

        loss = None
        ### YOUR CODE HERE
        loss, grad = f(x)
        x = x - step * grad
        ### END YOUR CODE

        x = postprocessing(x)
        if iter_ % print_every == 0:
            if not exploss:
                exploss = loss
            else:
                exploss = .95 * exploss + .05 * loss
            print("iter %d: %f" % (iter_, exploss))

        if iter_ % SAVE_PARAMS_EVERY == 0 and use_saved:
            save_params(iter_, x)

        if iter_ % anneal_every == 0:
            step *= 0.5

    return x
Example #33
0
def sgd(f,
        x0,
        step,
        iterations,
        postprocessing=None,
        useSaved=False,
        PRINT_EVERY=10):
    """ Stochastic Gradient Descent """
    # Implement the stochastic gradient descent method in this
    # function.

    # Inputs:
    # - f: the function to optimize, it should take a single
    #     argument and yield two outputs, a cost and the gradient
    #     with respect to the arguments
    # - x0: the initial point to start SGD from
    # - step: the step size for SGD
    # - iterations: total iterations to run SGD for
    # - postprocessing: postprocessing function for the parameters
    #     if necessary. In the case of word2vec we will need to
    #     normalize the word vectors to have unit length.
    # - PRINT_EVERY: specifies every how many iterations to output

    # Output:
    # - x: the parameter value after SGD finishes

    # Anneal learning rate every several iterations
    ANNEAL_EVERY = 20000

    if useSaved:
        start_iter, oldx, state = load_saved_params()
        if start_iter > 0:
            x0 = oldx
            step *= 0.5**(start_iter / ANNEAL_EVERY)

        if state:
            random.setstate(state)
    else:
        start_iter = 0

    x = x0

    if not postprocessing:
        postprocessing = lambda x: x

    expcost = None

    for iter in xrange(start_iter + 1, iterations + 1):
        ### Don't forget to apply the postprocessing after every iteration!
        ### You might want to print the progress every few iterations.

        cost = None
        ### YOUR CODE HERE

        cost, grad = f(x)
        x -= step * grad
        x = postprocessing(x)
        #raise NotImplementedError
        ### END YOUR CODE

        if iter % PRINT_EVERY == 0:
            if not expcost:
                expcost = cost
            else:
                expcost = .95 * expcost + .05 * cost
            print "iter %d: %f" % (iter, expcost)

        if iter % SAVE_PARAMS_EVERY == 0 and useSaved:
            save_params(iter, x)

        if iter % ANNEAL_EVERY == 0:
            step *= 0.5

    return x
Example #34
0
def rand_list_of(n):
    # 32 extend to 64-bit integers (to avoid overflow in summation
    from random import randrange, setstate
    init_state = (
        3, (2147483648L, 3430835514L, 2928424416L, 3147699060L, 2823572732L,
            2905216632L, 1887281517L, 14272356L, 1356039141L, 2741361235L,
            1824725388L, 2228169284L, 2679861265L, 3150239284L, 657657570L,
            1407124159L, 517316568L, 653526369L, 139268705L, 3784719953L,
            2212355490L, 3452491289L, 1232629882L, 1791207424L, 2898278956L,
            1147783320L, 1824413680L, 1993303973L, 2568444883L, 4228847642L,
            4163974668L, 385627078L, 3663560714L, 320542554L, 1565882322L,
            3416481154L, 4219229298L, 315071254L, 778331393L, 3961037651L,
            2951403614L, 3355970261L, 102946340L, 2509883952L, 215897963L,
            3361072826L, 689991350L, 3348092598L, 1763608447L, 2140226443L,
            3813151178L, 2619956936L, 51244592L, 2130725065L, 3867113849L,
            1980820881L, 2600246771L, 3207535572L, 257556968L, 2223367443L,
            3706150033L, 1711074250L, 4252385224L, 3197142331L, 4139558716L,
            748471849L, 2281163369L, 2596250092L, 2804492653L, 484240110L,
            3726117536L, 2483815933L, 2173995598L, 3765136999L, 3178931194L,
            1237068319L, 3427263384L, 3958412830L, 2268556676L, 360704423L,
            4113430429L, 3758882140L, 3743971788L, 1685454939L, 488386L,
            3511218911L, 3020688912L, 2168345327L, 3149651862L, 1472484695L,
            2011779229L, 1112533726L, 1873931730L, 2196153055L, 3806225492L,
            1515074892L, 251489714L, 1958141723L, 2081062631L, 3703490262L,
            3211541213L, 1436109217L, 2664448365L, 2350764370L, 1285829042L,
            3496997759L, 2306637687L, 1571644344L, 1020052455L, 3114491401L,
            2994766034L, 1518527036L, 994512437L, 1732585804L, 2089330296L,
            2592371643L, 2377347339L, 2617648350L, 1478066246L, 389918052L,
            1126787130L, 2728695369L, 2921719205L, 3193658789L, 2101782606L,
            4284039483L, 2704867468L, 3843423543L, 119359906L, 1882384901L,
            832276556L, 1862974878L, 1943541262L, 1823624942L, 2146680272L,
            333006125L, 929197835L, 639017219L, 1640196300L, 1424826762L,
            2119569013L, 4259272802L, 2089277168L, 2030198981L, 2950559216L,
            621654826L, 3452546704L, 4085446289L, 3038316311L, 527272378L,
            1679817853L, 450787204L, 3525043861L, 3838351358L, 1558592021L,
            3649888848L, 3328370698L, 3247166155L, 3855970537L, 1183088418L,
            2778702834L, 2820277014L, 1530905121L, 1434023607L, 3942716950L,
            41643359L, 310637634L, 1537174663L, 4265200088L, 3126624846L,
            2837665903L, 446994733L, 85970060L, 643115053L, 1751804182L,
            1480207958L, 2977093071L, 544778713L, 738954842L, 3370733859L,
            3242319053L, 2707786138L, 4041098196L, 1671493839L, 3420415077L,
            2473516599L, 3949211965L, 3686186772L, 753757988L, 220738063L,
            772481263L, 974568026L, 3190407677L, 480257177L, 3620733162L,
            2616878358L, 665763320L, 2808607644L, 3851308236L, 3633157256L,
            4240746864L, 1261222691L, 268963935L, 1449514350L, 4229662564L,
            1342533852L, 1913674460L, 1761163533L, 1974260074L, 739184472L,
            3811507072L, 2880992381L, 3998389163L, 2673626426L, 2212222504L,
            231447607L, 2608719702L, 3509764733L, 2403318909L, 635983093L,
            4233939991L, 2894463467L, 177171270L, 2962364044L, 1191007101L,
            882222586L, 1004217833L, 717897978L, 2125381922L, 626199402L,
            3694698943L, 1373935523L, 762314613L, 2291077454L, 2111081024L,
            3758576304L, 2812129656L, 4067461097L, 3700761868L, 2281420733L,
            197217625L, 460620692L, 506837624L, 1532931238L, 3872395078L,
            3629107738L, 2273221134L, 2086345980L, 1240615886L, 958420495L,
            4059583254L, 3119201875L, 3742950862L, 891360845L, 2974235885L,
            87814219L, 4067521161L, 615939803L, 1881195074L, 2225917026L,
            2775128741L, 2996201447L, 1590546624L, 3960431955L, 1417477945L,
            913935155L, 1610033170L, 3212701447L, 2545374014L, 2887105562L,
            2991635417L, 3194532260L, 1565555757L, 2142474733L, 621483430L,
            2268177481L, 919992760L, 2022043644L, 2756890220L, 881105937L,
            2621060794L, 4262292201L, 480112895L, 2557060162L, 2367031748L,
            2172434102L, 296539623L, 3043643256L, 59166373L, 2947638193L,
            1312917612L, 1798724013L, 75864164L, 339661149L, 289536004L,
            422147716L, 1134944052L, 1095534216L, 1231984277L, 239787072L,
            923053211L, 1015393503L, 2558889580L, 4194512643L, 448088150L,
            707905706L, 2649061310L, 3081089715L, 3432955562L, 2217740069L,
            1965789353L, 3320360228L, 3625802364L, 2420747908L, 3116949010L,
            442654625L, 2157578112L, 3603825090L, 3111995525L, 1124579902L,
            101836896L, 3297125816L, 136981134L, 4253748197L, 3809600572L,
            1668193778L, 4146759785L, 3712590372L, 2998653463L, 3032597504L,
            1046471011L, 2843821193L, 802959497L, 3307715534L, 3226042258L,
            1014478160L, 3105844949L, 3209150965L, 610876993L, 2563947590L,
            2482526324L, 3913970138L, 2812702315L, 4281779167L, 1026357391L,
            2579486306L, 402208L, 3457975059L, 1714004950L, 2543595755L,
            2421499458L, 478932497L, 3117588180L, 1565800974L, 1757724858L,
            1483685124L, 2262270397L, 3794544469L, 3986696110L, 2914756339L,
            1952061826L, 2672480198L, 3793151752L, 309930721L, 1861137379L,
            94571340L, 1162935802L, 3681554226L, 4027302061L, 21079572L,
            446709644L, 1587253187L, 1845056582L, 3080553052L, 3575272255L,
            2526224735L, 3569822959L, 2685900491L, 918305237L, 1399881227L,
            1554912161L, 703181091L, 738501299L, 269937670L, 1078548118L,
            2313670525L, 3495159622L, 2659487842L, 11394628L, 1222454456L,
            3392065094L, 3426833642L, 1153231613L, 1234517654L, 3144547626L,
            2148039080L, 3790136587L, 684648337L, 3956093475L, 1384378197L,
            2042781475L, 759764431L, 222267088L, 3187778457L, 3795259108L,
            2817237549L, 3494781277L, 3762880618L, 892345749L, 2153484401L,
            721588894L, 779278769L, 3306398772L, 4221452913L, 1981375723L,
            379087895L, 1604791625L, 1426046977L, 4231163093L, 1344994557L,
            1341041093L, 1072537134L, 1829925137L, 3791772627L, 3176876700L,
            2553745117L, 664821113L, 473469583L, 1076256869L, 2406012795L,
            3141453822L, 4123012649L, 3058620143L, 1785080140L, 1181483189L,
            3587874749L, 1453504375L, 707249496L, 2022787257L, 2436320047L,
            602521701L, 483826957L, 821599664L, 3333871672L, 3024431570L,
            3814441382L, 416508285L, 1217138244L, 3975201118L, 3077724941L,
            180118569L, 3754556886L, 4121534265L, 3495283397L, 700504668L,
            3113972067L, 719371171L, 910731026L, 619936911L, 2937105529L,
            2039892965L, 3853404454L, 3783801801L, 783321997L, 1135195902L,
            326690505L, 1774036419L, 3476057413L, 1518029608L, 1248626026L,
            427510490L, 3443223611L, 4087014505L, 2858955517L, 1918675812L,
            3921514056L, 3929126528L, 4048889933L, 1583842117L, 3742539544L,
            602292017L, 3393759050L, 3929818519L, 3119818281L, 3472644693L,
            1993924627L, 4163228821L, 2943877721L, 3143487730L, 4087113198L,
            1149082355L, 1713272081L, 1243627655L, 3511633996L, 3358757220L,
            3812981394L, 650044449L, 2143650644L, 3869591312L, 3719322297L,
            386030648L, 2633538573L, 672966554L, 3498396042L, 3907556L,
            2308686209L, 2878779858L, 1475925955L, 2701537395L, 1448018484L,
            2962578755L, 1383479284L, 3731453464L, 3659512663L, 1521189121L,
            843749206L, 2243090279L, 572717972L, 3400421356L, 3440777300L,
            1393518699L, 1681924551L, 466257295L, 568413244L, 3288530316L,
            2951425105L, 2624424893L, 2410788864L, 2243174464L, 1385949609L,
            2454100663L, 1113953725L, 2127471443L, 1775715557L, 3874125135L,
            1901707926L, 3152599339L, 2277843623L, 1941785089L, 3171888228L,
            802596998L, 3397391306L, 1743834429L, 395463904L, 2099329462L,
            3761809163L, 262702111L, 1868879810L, 2887406426L, 1160032302L,
            4164116477L, 2287740849L, 3312176050L, 747117003L, 4048006270L,
            3955419375L, 2724452926L, 3141695820L, 791246424L, 524525849L,
            1794277132L, 295485241L, 4125127474L, 825108028L, 1582794137L,
            1259992755L, 2938829230L, 912029932L, 1534496985L, 3075283272L,
            4052041116L, 1125808104L, 2032938837L, 4008676545L, 1638361535L,
            1649316497L, 1302633381L, 4221627277L, 1206130263L, 3114681993L,
            3409690900L, 3373263243L, 2922903613L, 349048087L, 4049532385L,
            3458779287L, 1737687814L, 287275672L, 645786941L, 1492233180L,
            3925845678L, 3344829077L, 1669219217L, 665224162L, 2679234088L,
            1986576411L, 50610077L, 1080114376L, 1881648396L, 3818465156L,
            1486861008L, 3824208930L, 1782008170L, 4115911912L, 656413265L,
            771498619L, 2709443211L, 1919820065L, 451888753L, 1449812173L,
            2001941180L, 2997921765L, 753032713L, 3011517640L, 2386888602L,
            3181040472L, 1280522185L, 1036471598L, 1243809973L, 2985144032L,
            2238294821L, 557934351L, 347132246L, 1797956016L, 624L), None)
    setstate(init_state)
    return [
        rffi.r_longlong(randrange(-(1 << 31), (1 << 31) - 1)) for _ in range(n)
    ]
def not_so_random():
    old_state = random.getstate()
    random.seed(42)
    yield
    random.setstate(old_state)
Example #36
0
 def __exit__(self, *args):
     np.random.set_state(self.np_rng_state)
     random.setstate(self.rand_rng_state)
Example #37
0
def gradcheck_naive(f, x):
    """ Gradient check for a function f.

    Arguments:
    f -- a function that takes a single argument and outputs the
         cost and its gradients(f是一个计算损失函数值和梯度的函数)
    x -- the point (numpy array) to check the gradient at(数据,gradcheck方法以该数据值为基础,
    通过梯度的定义方法对梯度的合理取值范围进行计算)
    """

    rndstate = random.getstate()
    random.setstate(rndstate)
    fx, grad = f(x)  # Evaluate function value at original point
    h = 1e-4  # Do not change this!

    # Iterate over all indexes ix in x to check the gradient.
    it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
    # np.nditer是numpy array自带的迭代器,flags=['multi_index']表示对a进行多重索引
    # op_flags=['readwrite']表示不仅可以对a进行read(读取),还可以write(写入)
    while not it.finished:
        ix = it.multi_index
        # it.multi_index表示输出元素的索引,形式类似(1,1),(1,2),...,(x,y)...

        # Try modifying x[ix] with h defined above to compute numerical
        # gradients (numgrad).

        # Use the centered difference of the gradient.
        # It has smaller asymptotic error than forward / backward difference
        # methods. If you are curious, check out here:
        # https://math.stackexchange.com/questions/2326181/when-to-use-forward-or-central-difference-approximations

        # Make sure you call random.setstate(rndstate)
        # before calling f(x) each time. This will make it possible
        # to test cost functions with built in randomness later.

        ### YOUR CODE HERE:
        x[ix] += h

        random.setstate(rndstate)
        new_f1 = f(x)[0]

        x[ix] -= 2 * h

        random.setstate(rndstate)
        new_f2 = f(x)[0]

        x[ix] += h

        numgrad = (new_f1 - new_f2) / (2 * h)
        # lim h -->0,这里用1e-4近似
        ### END YOUR CODE

        # Compare gradients
        reldiff = abs(numgrad - grad[ix]) / max(1, abs(numgrad), abs(grad[ix]))
        if reldiff > 1e-5:
            print("Gradient check failed.")
            print("First gradient error found at index %s" % str(ix))
            print("Your gradient: %f \t Numerical gradient: %f" %
                  (grad[ix], numgrad))
            return

        it.iternext()  # Step to next dimension

    print("Gradient check passed!")
Example #38
0
 def __exit__(self, type, value, traceback):
     setstate(self.state)
Example #39
0
def sgd(f, x0, step, iterations, postprocessing=None, useSaved=False,
        PRINT_EVERY=10):
    """ Stochastic Gradient Descent

    Implement the stochastic gradient descent method in this function.

    Arguments:
    f -- the function to optimize, it should take a single
         argument and yield two outputs, a cost and the gradient
         with respect to the arguments
    x0 -- the initial point to start SGD from
    step -- the step size for SGD
    iterations -- total iterations to run SGD for
    postprocessing -- postprocessing function for the parameters
                      if necessary. In the case of word2vec we will need to
                      normalize the word vectors to have unit length.
    PRINT_EVERY -- specifies how many iterations to output loss

    Return:
    x -- the parameter value after SGD finishes
    """

    # Anneal learning rate every several iterations
    ANNEAL_EVERY = 20000

    if useSaved:
        start_iter, oldx, state = load_saved_params()
        if start_iter > 0:
            x0 = oldx
            step *= 0.5 ** (start_iter / ANNEAL_EVERY)

        if state:
            random.setstate(state)
    else:
        start_iter = 0

    x = x0

    if not postprocessing:
        postprocessing = lambda x: x

    expcost = None

    for iter in xrange(start_iter + 1, iterations + 1):
        # Don't forget to apply the postprocessing after every iteration!
        # You might want to print the progress every few iterations.

        cost = None
        ### YOUR CODE HERE
        cost, nabla_J = f(x)
        del_x = -step * nabla_J
        x += del_x
        if postprocessing:
            x = postprocessing(x)
        ### END YOUR CODE

        if iter % PRINT_EVERY == 0:
            if not expcost:
                expcost = cost
            else:
                expcost = .95 * expcost + .05 * cost
            print "iter %d: %f" % (iter, expcost)

        if iter % SAVE_PARAMS_EVERY == 0 and useSaved:
            save_params(iter, x)

        if iter % ANNEAL_EVERY == 0:
            step *= 0.5

    return x
Example #40
0
 def restore(self):
     setstate(self.__state)
Example #41
0
def set_rng_state(state):
    torch.set_rng_state(state['torch'])
    random.setstate(state['random'])
    if 'numpy' in state:
        import numpy as np
        np.random.set_state(state['numpy'])
Example #42
0
def main(checkpoint=None, FREQ=2, verbose=True):
    """
    This algorithm reproduce the simplest evolutionary algorithm as
    presented in chapter 7 of [Back2000]_.

    :param checkpoint: string, checkpoint file path
    :param FREQ: int, save result to checkpoint every n*FREQ generation
    :param verbose: boolean, whether or not to log the statistics
    :return: A list of varied individuals that are independent of their
             parents.
    """
    random.seed(318)

    # probability of crossover
    cxpb = 0.5
    # probability for mutation
    mutpb = 0.2
    # number of generations
    ngen = 2

    if checkpoint:
        with open(checkpoint, "r") as cp_file:
            cp = pickle.load(cp_file)

        population = cp["population"]
        start_gen = cp["generation"]
        halloffame = cp["halloffame"]
        logbook = cp["logbook"]
        random.setstate(cp["rndstate"])
    else:
        population = toolbox.population(n=70)
        start_gen = 0
        halloffame = tools.HallOfFame(maxsize=1)
        logbook = tools.Logbook()

    # set initial configuration
    stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
    stats_size = tools.Statistics(len)
    mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
    mstats.register("avg", np.mean)
    mstats.register("std", np.std)
    mstats.register("min", np.min)
    mstats.register("max", np.max)

    logbook.header = ['gen', 'nevals'] + (mstats.fields if mstats else [])

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in population if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)

    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    if halloffame is not None:
        halloffame.update(population)

    record = mstats.compile(population) if mstats else {}
    logbook.record(gen=0, nevals=len(invalid_ind), **record)

    if verbose:
        print logbook.stream

    # Begin the generational process
    for gen in range(1, ngen+ 1):
        # Select the next generation individuals
        offspring = toolbox.select(population, len(population))
        # Vary the pool of individuals
        offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)
        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)

        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Update the hall of fame with the generated individuals
        if halloffame is not None:
            halloffame.update(offspring)

        # Replace the current population by the offspring
        population[:] = offspring
        # Append the current generation statistics to the logbook
        record = mstats.compile(population) if mstats else {}
        logbook.record(gen=gen, nevals=len(invalid_ind), **record)

        if verbose:
            print logbook.stream

        if gen % FREQ == 0:
            cp = dict(population=population, generation=gen, halloffame=halloffame,
                      logbook=logbook, rndstate=random.getstate())

            with open("checkpoint_name.pkl", "wb") as cp_file:
                pickle.dump(cp, cp_file)

    return population, logbook
Example #43
0
    pickle_idx = 0
    while not successful_restart:
        try:
            pickled_pops = glob("pickledPops{}/*".format(SEED))
            last_gen = natural_sort(pickled_pops, reverse=True)[pickle_idx]
            with open(last_gen, 'rb') as handle:
                [optimizer, random_state,
                 numpy_random_state] = cPickle.load(handle)
            successful_restart = True

            my_pop = optimizer.pop
            my_optimization = optimizer
            my_optimization.continued_from_checkpoint = True
            my_optimization.start_time = time()

            random.setstate(random_state)
            np.random.set_state(numpy_random_state)

            print "Starting from pickled checkpoint: generation {}".format(
                my_pop.gen)

        except EOFError:
            # something went wrong writing the checkpoint : use previous checkpoint and redo last generation
            sub.call("touch IO_ERROR_$(date +%F_%R)", shell=True)
            pickle_idx += 1
            pass

my_optimization.run(max_hours_runtime=MAX_TIME,
                    max_gens=GENS,
                    checkpoint_every=CHECKPOINT_EVERY,
                    directory=DIRECTORY)
Example #44
0
    def prepare(self, src: str, dst: str, force_download: bool = False) -> None:
        """See ``rainbow.preparation.preparer.Preparer``."""
        # Create the directory for saving the source files.
        tf.io.gfile.makedirs(os.path.join(src, self.JOCI["name"]))

        # Create the directory for saving the prepared files.
        tf.io.gfile.makedirs(os.path.join(dst, self.JOCI["name"]))

        src_path = os.path.join(src, self.JOCI["name"], self.JOCI["file_name"])

        # Copy the dataset to src_path from the URL.
        if not tf.io.gfile.exists(src_path) or force_download:
            logger.info(
                f"Downloading {self.JOCI['name']} from {self.JOCI['url']}"
                f" to {src_path}."
            )
            preparer_utils.copy_url_to_gfile(self.JOCI["url"], src_path)

        with tf.io.gfile.GFile(src_path, "rb") as src_file:
            # Verify the dataset file against its checksum.
            sha256 = hashlib.sha256()
            chunk = None
            while chunk != b"":
                # Read in 64KB at a time.
                chunk = src_file.read(64 * 1024)
                sha256.update(chunk)
            checksum = sha256.hexdigest()
            if checksum != self.JOCI["checksum"]:
                raise IOError(
                    f"The file for {self.JOCI['name']} did not have the"
                    f" expected checksum. Try running with force_download=True"
                    f" to redownload all files, or consider updating the"
                    f" datasets' checksums."
                )
            # Return to the beginning of the file.
            src_file.seek(0)

            # Read the data from the JOCI file.
            with zipfile.ZipFile(src_file, "r") as src_zip:
                with src_zip.open(self.JOCI["csv_path"], "r") as joci_csv:
                    joci_csv = codecs.getreader("utf-8")(joci_csv)
                    reader = csv.DictReader(joci_csv)

                    data = [x for x in reader]

        # Prepare and write the splits to dst.

        # Shuffle and split the JOCI data.
        random_state = random.getstate()
        random.seed(rainbow_utils.string_to_seed(self.JOCI["name"]))
        random.shuffle(data)
        random.setstate(random_state)

        for split in self.JOCI["splits"].values():
            dst_path = os.path.join(
                dst,
                self.JOCI["name"],
                settings.PREPROCESSED_SPLIT_FILE_NAME_TEMPLATE.format(
                    split=split["name"], dataset=self.JOCI["name"]
                ),
            )

            with tf.io.gfile.GFile(dst_path, "w") as dst_file:
                rows_written = 0

                writer = csv.DictWriter(
                    dst_file,
                    fieldnames=["index", "inputs", "targets"],
                    dialect="unix",
                )
                writer.writeheader()

                split_data, data = data[: split["size"]], data[split["size"] :]
                for i, row_in in enumerate(split_data):
                    row_out = {
                        "index": rows_written,
                        "inputs": (
                            f"[{self.JOCI['name']}]:\n"
                            f"<context>{row_in['CONTEXT']}</context>\n"
                            f"<hypothesis>{row_in['HYPOTHESIS']}</hypothesis>"
                        ),
                        "targets": row_in["LABEL"],
                    }
                    if i == 0:
                        logger.info(
                            f"\n\n"
                            f"Example {row_out['index']} from"
                            f" {self.JOCI['name']}'s {split['name']} split:\n"
                            f"inputs:\n"
                            f"{row_out['inputs']}\n"
                            f"targets:\n"
                            f"{row_out['targets']}\n"
                            f"\n"
                        )

                    # Write to the CSV.
                    writer.writerow(row_out)
                    rows_written += 1

            if rows_written != split["size"]:
                logger.error(
                    f"Expected to write {split.size} rows for the"
                    f" {split['name']} split of {self.JOCI['name']}, instead"
                    f" {rows_written} were written."
                )

        logger.info(f"Finished processing JOCI.")
Example #45
0
def patch_and_output(settings, window, spoiler, rom, start):
    logger = logging.getLogger('')
    logger.info('Patching ROM.')
    worlds = spoiler.worlds
    cosmetics_log = None

    settings_string_hash = hashlib.sha1(
        settings.settings_string.encode('utf-8')).hexdigest().upper()[:5]
    if settings.output_file:
        outfilebase = settings.output_file
    elif settings.world_count > 1:
        outfilebase = 'OoT_%s_%s_W%d' % (settings_string_hash, settings.seed,
                                         settings.world_count)
    else:
        outfilebase = 'OoT_%s_%s' % (settings_string_hash, settings.seed)

    output_dir = default_output_path(settings.output_dir)

    if settings.compress_rom == 'Patch':
        rng_state = random.getstate()
        file_list = []
        window.update_progress(65)
        for world in worlds:
            if settings.world_count > 1:
                window.update_status('Patching ROM: Player %d' %
                                     (world.id + 1))
                patchfilename = '%sP%d.zpf' % (outfilebase, world.id + 1)
            else:
                window.update_status('Patching ROM')
                patchfilename = '%s.zpf' % outfilebase

            random.setstate(rng_state)
            patch_rom(spoiler, world, rom)
            cosmetics_log = patch_cosmetics(settings, rom)
            rom.update_header()

            window.update_progress(65 + 20 *
                                   (world.id + 1) / settings.world_count)

            window.update_status('Creating Patch File')
            output_path = os.path.join(output_dir, patchfilename)
            file_list.append(patchfilename)
            create_patch_file(rom, output_path)
            rom.restore()
            window.update_progress(65 + 30 *
                                   (world.id + 1) / settings.world_count)

            if settings.create_cosmetics_log and cosmetics_log:
                window.update_status('Creating Cosmetics Log')
                if settings.world_count > 1:
                    cosmetics_log_filename = "%sP%d_Cosmetics.txt" % (
                        outfilebase, world.id + 1)
                else:
                    cosmetics_log_filename = '%s_Cosmetics.txt' % outfilebase
                cosmetics_log.to_file(
                    os.path.join(output_dir, cosmetics_log_filename))
                file_list.append(cosmetics_log_filename)
            cosmetics_log = None

        if settings.world_count > 1:
            window.update_status('Creating Patch Archive')
            output_path = os.path.join(output_dir, '%s.zpfz' % outfilebase)
            with zipfile.ZipFile(output_path, mode="w") as patch_archive:
                for file in file_list:
                    file_path = os.path.join(output_dir, file)
                    patch_archive.write(file_path,
                                        file.replace(outfilebase, ''),
                                        compress_type=zipfile.ZIP_DEFLATED)
            for file in file_list:
                os.remove(os.path.join(output_dir, file))
        logger.info("Created patchfile at: %s" % output_path)
        window.update_progress(95)

    elif settings.compress_rom != 'None':
        window.update_status('Patching ROM')
        patch_rom(spoiler, worlds[settings.player_num - 1], rom)
        cosmetics_log = patch_cosmetics(settings, rom)
        window.update_progress(65)

        window.update_status('Saving Uncompressed ROM')
        if settings.world_count > 1:
            filename = "%sP%d.z64" % (outfilebase, settings.player_num)
        else:
            filename = '%s.z64' % outfilebase
        output_path = os.path.join(output_dir, filename)
        rom.write_to_file(output_path)
        if settings.compress_rom == 'True':
            window.update_status('Compressing ROM')
            logger.info('Compressing ROM.')

            if is_bundled():
                compressor_path = "."
            else:
                compressor_path = "Compress"

            if platform.system() == 'Windows':
                if 8 * struct.calcsize("P") == 64:
                    compressor_path += "\\Compress.exe"
                else:
                    compressor_path += "\\Compress32.exe"
            elif platform.system() == 'Linux':
                if platform.uname()[4] == 'aarch64' or platform.uname(
                )[4] == 'arm64':
                    compressor_path += "/Compress_ARM64"
                else:
                    compressor_path += "/Compress"
            elif platform.system() == 'Darwin':
                compressor_path += "/Compress.out"
            else:
                compressor_path = ""
                logger.info('OS not supported for compression')

            output_compress_path = output_path[:output_path.
                                               rfind('.')] + '-comp.z64'
            if compressor_path != "":
                run_process(
                    window, logger,
                    [compressor_path, output_path, output_compress_path])
            os.remove(output_path)
            logger.info("Created compressed rom at: %s" % output_compress_path)
        else:
            logger.info("Created uncompressed rom at: %s" % output_path)
        window.update_progress(95)

    if not settings.create_spoiler or settings.output_settings:
        settings.distribution.update_spoiler(spoiler, False)
        window.update_status('Creating Settings Log')
        settings_path = os.path.join(output_dir,
                                     '%s_Settings.json' % outfilebase)
        settings.distribution.to_file(settings_path, False)
        logger.info("Created settings log at: %s" %
                    ('%s_Settings.json' % outfilebase))
    if settings.create_spoiler:
        settings.distribution.update_spoiler(spoiler, True)
        window.update_status('Creating Spoiler Log')
        spoiler_path = os.path.join(output_dir,
                                    '%s_Spoiler.json' % outfilebase)
        settings.distribution.to_file(spoiler_path, True)
        logger.info("Created spoiler log at: %s" %
                    ('%s_Spoiler.json' % outfilebase))

    if settings.create_cosmetics_log and cosmetics_log:
        window.update_status('Creating Cosmetics Log')
        if settings.world_count > 1 and not settings.output_file:
            filename = "%sP%d_Cosmetics.txt" % (outfilebase,
                                                settings.player_num)
        else:
            filename = '%s_Cosmetics.txt' % outfilebase
        cosmetic_path = os.path.join(output_dir, filename)
        cosmetics_log.to_file(cosmetic_path)
        logger.info("Created cosmetic log at: %s" % cosmetic_path)

    window.update_progress(100)
    if cosmetics_log and cosmetics_log.error:
        window.update_status(
            'Success: Rom patched successfully. Some cosmetics could not be applied.'
        )
    else:
        window.update_status('Success: Rom patched successfully')
    logger.info('Done. Enjoy.')
    logger.debug('Total Time: %s', time.process_time() - start)

    return worlds[settings.player_num - 1]
Example #46
0
    def sample(self, num_rows=1, seed=None):
        """Generating samples from vine model."""
        s1 = np.random.get_state()

        s2 = random.getstate()

        np.random.seed(seed)

        random.setstate(seed)

        unis = np.random.uniform(0, 1, self.n_var)

        # randomly select a node to start with
        first_ind = random.randint(0, self.n_var - 1)

        np.random.seed(s1)

        random.setstate(s2)

        adj = self.trees[0].get_adjacent_matrix()
        visited = []
        explore = [first_ind]

        sampled = np.zeros(self.n_var)
        itr = 0
        while explore:
            current = explore.pop(0)
            neighbors = np.where(adj[current, :] == 1)[0].tolist()
            if itr == 0:
                new_x = self.ppfs[current](unis[current])

            else:
                for i in range(itr - 1, -1, -1):
                    current_ind = -1

                    if i >= self.truncated:
                        continue

                    current_tree = self.trees[i].edges
                    # get index of edge to retrieve
                    for edge in current_tree:
                        if i == 0:
                            if (edge.L == current and edge.R == visited[0]) or\
                               (edge.R == current and edge.L == visited[0]):
                                current_ind = edge.index
                                break
                        else:
                            if edge.L == current or edge.R == current:
                                condition = set(edge.D)
                                condition.add(edge.L)
                                condition.add(edge.R)
                                visit_set = set(visited).add(current)

                                if condition.issubset(visit_set):
                                    current_ind = edge.index
                                break

                    if current_ind != -1:
                        # the node is not indepedent contional on visited node
                        copula_type = current_tree[current_ind].name
                        copula_para = current_tree[current_ind].param
                        cop = Bivariate(CopulaTypes(copula_type))
                        derivative = cop.get_h_function()
                        # start with last level
                        if i == itr - 1:
                            tmp = optimize.fminbound(derivative,
                                                     EPSILON,
                                                     1.0,
                                                     args=(unis[visited[0]],
                                                           copula_para,
                                                           unis[current]))
                        else:
                            tmp = optimize.fminbound(derivative,
                                                     EPSILON,
                                                     1.0,
                                                     args=(unis[visited[0]],
                                                           copula_para, tmp))

                        tmp = min(max(tmp, EPSILON), 0.99)

                new_x = self.ppfs[current](tmp)

            sampled[current] = new_x

            for s in neighbors:
                if s not in visited:
                    explore.insert(0, s)

            itr += 1
            visited.insert(0, current)

        return sampled
Example #47
0
    def __init__(self,
                 offline_dataset,
                 batch_size,
                 shuffle_batches=True,
                 num_buckets=None,
                 shuffle_buckets=True):
        if not isinstance(offline_dataset, OfflineDataset):
            raise TypeError('Expecting an OfflineDataset instance.')
        if not dist.is_available():
            raise RuntimeError('Expecting distributed training.')
        self._world_size = dist.get_world_size()
        self._rank = dist.get_rank()
        # Randomly drop a number of traces so that the number of all minibatches in the whole dataset is an integer multiple of world size
        num_batches_to_drop = math.floor(
            len(offline_dataset._sorted_indices) /
            batch_size) % self._world_size
        num_traces_to_drop = num_batches_to_drop * batch_size
        # Ensure all ranks choose the same traces to drop
        st = random.getstate()
        random.seed(0)
        self._batches = list(
            util.chunks(
                util.drop_items(list(offline_dataset._sorted_indices),
                                num_traces_to_drop), batch_size)
        )  # List of all minibatches, where each minibatch is a list of trace indices
        random.setstate(st)
        # Discard last minibatch if it's smaller than batch_size
        if len(self._batches[-1]) < batch_size:
            del (self._batches[-1])
        if num_buckets is None:
            num_buckets = len(self._batches) / self._world_size
        self._num_buckets = num_buckets
        self._bucket_size = math.ceil(len(self._batches) / num_buckets)
        if self._bucket_size < self._world_size:
            raise RuntimeError(
                'offline_dataset:{}, batch_size:{} and num_buckets:{} imply a bucket_size:{} smaller than world_size:{}'
                .format(len(offline_dataset), batch_size, num_buckets,
                        self._bucket_size, self._world_size))
        # List of buckets, where each bucket is a list of minibatches
        self._buckets = list(util.chunks(self._batches, self._bucket_size))
        # Unify last two buckets if the last bucket is smaller than other buckets
        if len(self._buckets[-1]) < self._bucket_size:
            if len(self._buckets) < 2:
                raise RuntimeError(
                    'offline_dataset:{} too small for given batch_size:{} and num_buckets:{}'
                    .format(len(offline_dataset), batch_size, num_buckets))
            self._buckets[-2].extend(self._buckets[-1])
            del (self._buckets[-1])
        self._shuffle_batches = shuffle_batches
        self._shuffle_buckets = shuffle_buckets
        self._epoch = 0
        self._current_bucket_id = 0

        print('DistributedTraceBatchSampler')
        print('OfflineDataset size : {:,}'.format(len(offline_dataset)))
        print('World size          : {:,}'.format(self._world_size))
        print('Batch size          : {:,}'.format(batch_size))
        print('Num. batches dropped: {:,}'.format(num_batches_to_drop))
        print('Num. batches        : {:,}'.format(len(self._batches)))
        print('Bucket size         : {:,}'.format(self._bucket_size))
        print('Num. buckets        : {:,}'.format(self._num_buckets))
Example #48
0
def gradcheck_naive(f, x):
    """
    Gradient check for a function f
    - f should be a function that takes a single argument
    and outputs the cost and its gradients
    - x is the point (numpy array) to check the gradient at
    """

    rndstate = random.getstate()
    random.setstate(rndstate)
    # Evaluate function value at original point
    fx, grad = f(x)
    h = 1e-4
    test = True
    """
     --- about nd.nditer ---
    flags = multi_index causes a multi-index, or a tuple of indices
    with one per iteration dimension, to be tracked.
    op_flags : list of list of str, optional
    this is a list of flags for each operand. At minimum, one of readonly,
    readwrite, or writeonly must be specified.
    readwrite indicates the operand will be read from and written to.
    """

    # Iterate over all indexes in x
    all_dif = np.array(x, copy=True)
    it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
    while not it.finished:
        ix = it.multi_index
        """ try modifying x[ix] with h defined above to compute numerical gradients
        make sure you call random.setstate(rndstate) before calling f(x)
        each time, this will make it possible to test cost functions with
        built in randomness later"""
        # YOUR CODE HERE:
        x_plus_h = np.array(x, copy=True)
        x_plus_h[ix] = x_plus_h[ix] + h
        random.setstate(rndstate)
        fxh_plus, _ = f(x_plus_h)
        x_minus_h = np.array(x, copy=True)
        x_minus_h[ix] = x_minus_h[ix] - h
        random.setstate(rndstate)
        fxh_minus, _ = f(x_minus_h)
        numgrad = (fxh_plus - fxh_minus) / (2 * h)
        # END YOUR CODE

        # Compare gradients
        reldiff = abs(numgrad - grad[ix]) / max(1, abs(numgrad), abs(grad[ix]))
        all_dif[ix] = reldiff
        if reldiff > 1e-5:
            test = False
            string = """
            Your gradient = {0}
            Numerical gradient = {1}""".format(grad[ix], numgrad)
            print("            " + str(ix) + ": " + string)
            print("            fx ={}".format(fx))
            print("            fxh_plus ={}".format(fxh_plus))
            print("            fxh_minus ={}\n".format(fxh_minus))
        # For debugging with a bunch of params is
        # useful to add the following:

        # else:
        #     print(str(ix) + ": OK")

        # Step to next dimension
        it.iternext()
    if test:
        print("Gradient check passed!")

    # add this return to test
    return all_dif
Example #49
0
    def _load(self) -> None:
        if not self.load_path:
            return

        # Backwards compat with older checkpoint formats. List is newest to
        # oldest known state_dict locations.
        potential_paths = [
            ["state_dict.pth"],
            ["determined", "state_dict.pth"],
            ["pedl", "state_dict.pth"],
            ["checkpoint.pt"],
        ]

        for ckpt_path in potential_paths:
            maybe_ckpt = self.load_path.joinpath(*ckpt_path)
            if maybe_ckpt.exists():
                checkpoint = torch.load(str(maybe_ckpt),
                                        map_location="cpu")  # type: ignore
                break

        if "model_state_dict" in checkpoint:
            # Backward compatible with older checkpoint format.
            check.not_in("models_state_dict", checkpoint)
            check.eq(len(self.context.models), 1)
            self.context.models[0].load_state_dict(
                checkpoint["model_state_dict"])
        else:
            for idx, model in enumerate(self.context.models):
                model.load_state_dict(checkpoint["models_state_dict"][idx])

        if "optimizer_state_dict" in checkpoint:
            # Backward compatible with older checkpoint format.
            check.not_in("optimizers_state_dict", checkpoint)
            check.eq(len(self.context.optimizers), 1)
            self.context.optimizers[0].load_state_dict(
                checkpoint["optimizer_state_dict"])
        else:
            for idx, optimizer in enumerate(self.context.optimizers):
                optimizer.load_state_dict(
                    checkpoint["optimizers_state_dict"][idx])

        if "lr_scheduler" in checkpoint:
            # Backward compatible with older checkpoint format.
            check.not_in("lr_schedulers_state_dict", checkpoint)
            check.eq(len(self.context.lr_schedulers), 1)
            self.context.lr_schedulers[0].load_state_dict(
                checkpoint["lr_scheduler"])
        else:
            for idx, lr_scheduler in enumerate(self.context.lr_schedulers):
                lr_scheduler.load_state_dict(
                    checkpoint["lr_schedulers_state_dict"][idx])

        if "amp_state" in checkpoint:
            if self.context._use_amp:
                apex.amp.load_state_dict(checkpoint["amp_state"])
            else:
                logging.warning(
                    "There exists amp_state in checkpoint but the experiment is not using AMP."
                )
        else:
            if self.context._use_amp:
                logging.warning(
                    "The experiment is using AMP but amp_state does not exist in the checkpoint."
                )

        if "rng_state" in checkpoint:
            rng_state = checkpoint["rng_state"]
            np.random.set_state(rng_state["np_rng_state"])
            random.setstate(rng_state["random_rng_state"])
            torch.random.set_rng_state(rng_state["cpu_rng_state"])

            if torch.cuda.device_count():
                if "gpu_rng_state" in rng_state:
                    torch.cuda.set_rng_state(
                        rng_state["gpu_rng_state"],
                        device=self.context.distributed.get_local_rank())
                else:
                    logging.warning(
                        "The system has a gpu but no gpu_rng_state exists in the checkpoint."
                    )
            else:
                if "gpu_rng_state" in rng_state:
                    logging.warning(
                        "There exists gpu_rng_state in checkpoint but the system has no gpu."
                    )
        else:
            logging.warning("The checkpoint has no random state to restore.")

        callback_state = checkpoint.get("callbacks", {})
        for name in self.callbacks:
            if name in callback_state:
                self.callbacks[name].load_state_dict(callback_state[name])
            elif util.is_overridden(self.callbacks[name].load_state_dict,
                                    pytorch.PyTorchCallback):
                logging.warning(
                    "Callback '{}' implements load_state_dict(), but no callback state "
                    "was found for that name when restoring from checkpoint. This "
                    "callback will be initialized from scratch")
Example #50
0
    test='sars_cov_2_Compound_Viral_interactions_for_Supervised_Learning.csv',
    format='csv',
    skip_header=
    True,  # if your csv header has a header, make sure to pass this to ensure it doesn't get proceesed as data!
    fields=datafields)

#See how the data looks like
print("Uniprot: ", data.examples[0].uniprot_accession)
print("Protein Sequence: ", data.examples[0].Sequence)
print("Canonical Smiles: ", data.examples[0].canonical_smiles)
print("Inchi Key:", data.examples[0].standard_inchi_key)
print("Target value: ", data.examples[0].pchembl_value)

#Split the data randomly into train, valid and test
train_data, valid_data = data.split(split_ratio=0.9,
                                    random_state=random.setstate(st))
print('Number of training examples: ', len(train_data.examples))
print('Number of validation examples: ', len(valid_data.examples))
print('Number of test examples: ', len(test_data.examples))
#del data
torch.cuda.empty_cache()

# +
#Build the sequence and smiles
TEXT1.build_vocab(train_data, min_freq=2)
TEXT2.build_vocab(train_data, min_freq=1)
LABEL.build_vocab(train_data, min_freq=1)
INDEX1.build_vocab(full_data, min_freq=1)
INDEX2.build_vocab(full_data, min_freq=1)

print('Unique tokens in Sequence vocabulary: ', len(TEXT1.vocab))
Example #51
0
    def close(self) -> Dict[str, Any]:
        '''Return a dict which contains

			* **fw-bleu**: fw bleu value.
			* **bw-bleu**: bw bleu value.
			* **fw-bw-bleu**: harmony mean of fw/bw bleu value.
			* **fw-bw-bleu hashvalue**: hash value for fwbwbleu metric, same hash value stands
			  for same evaluation settings.
		'''
        res = super().close()
        if not self.hyps:
            raise RuntimeError(
                "The metric has not been forwarded data correctly.")
        if not self.reference_test_list:
            raise RuntimeError("Reference cannot be empty")

        rng_state = random.getstate()
        random.seed(self.seed)
        sample_hyps = self.hyps.copy()
        sample_refs = self.reference_test_list.copy()
        random.shuffle(sample_hyps)
        random.shuffle(sample_refs)
        random.setstate(rng_state)

        n_sample_max = max(self.n_sample_hyp, self.n_sample_ref)
        sample_hyps = sample_hyps[:n_sample_max]
        sample_refs = sample_refs[:n_sample_max]

        refs: List[Any]
        hyps: List[Any]
        if self.tokenizer:
            tokenizer: Tokenizer
            if isinstance(self.tokenizer, str):
                tokenizer = SimpleTokenizer(self.tokenizer)
            else:
                tokenizer = self.tokenizer
            if isinstance(sample_refs[0], List):
                ref_sents = [
                    self.dataloader.convert_ids_to_sentence(
                        ids, remove_special=True, trim=True)
                    for ids in sample_refs
                ]
            else:
                ref_sents = sample_refs
            refs = tokenizer.tokenize_sentences(ref_sents)

            hyp_sents = [
                self.dataloader.convert_ids_to_sentence(ids,
                                                        remove_special=True,
                                                        trim=True)
                for ids in sample_hyps
            ]
            hyps = tokenizer.tokenize_sentences(hyp_sents)
        else:
            refs = [
                self.dataloader.convert_ids_to_tokens(ids,
                                                      remove_special=True,
                                                      trim=True)
                for ids in sample_refs
            ]
            hyps = [
                self.dataloader.convert_ids_to_tokens(ids,
                                                      remove_special=True,
                                                      trim=True)
                for ids in sample_hyps
            ]

        if "unk" in self.dataloader.get_special_tokens_mapping():
            refs = replace_unk(
                refs,
                self.dataloader.get_special_tokens_mapping()["unk"])

        bleu_irl_fw, bleu_irl_bw = [], []
        weights = np.ones(self.ngram) / self.ngram

        n_fw_sample = min(len(hyps), self.n_sample_hyp)
        n_fw_reference = min(len(refs), self.n_sample_ref)
        tasks = ((refs[:n_fw_reference], hyps[i], weights)
                 for i in range(n_fw_sample))
        pool: Optional[Any] = None
        values: Iterable[Any]
        if n_fw_sample >= FwBwBleuCorpusMetric.MINIMAL_PARALLEL_N and self.cpu_count > 1:
            if not pool:
                pool = Pool(self.cpu_count)
            values = pool.imap_unordered(_sentence_bleu, tasks, chunksize=20)
        else:
            values = map(_sentence_bleu, tasks)

        if n_fw_sample >= FwBwBleuCorpusMetric.MINIMAL_PARALLEL_N:
            values = tqdm.tqdm(values, total=n_fw_sample)
        for ans in values:
            bleu_irl_fw.append(ans)

        n_bw_sample = min(len(refs), self.n_sample_hyp)
        n_bw_reference = min(len(hyps), self.n_sample_ref)
        tasks = ((hyps[:n_fw_reference], refs[i], weights)
                 for i in range(n_bw_sample))
        if n_bw_sample >= FwBwBleuCorpusMetric.SHOW_PROGRESS and self.cpu_count > 1:
            if pool is None:
                pool = Pool(self.cpu_count)
            values = pool.imap_unordered(_sentence_bleu, tasks, chunksize=20)
        else:
            values = map(_sentence_bleu, tasks)
        if n_bw_sample >= FwBwBleuCorpusMetric.SHOW_PROGRESS:
            values = tqdm.tqdm(values, total=n_bw_sample)
        for ans in values:
            bleu_irl_bw.append(ans)

        if pool is not None:
            pool.close()
            pool.join()

        fw_bleu = (1.0 * sum(bleu_irl_fw) / len(bleu_irl_fw))
        bw_bleu = (1.0 * sum(bleu_irl_bw) / len(bleu_irl_bw))
        if fw_bleu + bw_bleu > 0:
            fw_bw_bleu = 2.0 * bw_bleu * fw_bleu / (fw_bleu + bw_bleu)
        else:
            fw_bw_bleu = 0

        res.update({"fw-bleu" : fw_bleu, \
         "bw-bleu" : bw_bleu, \
         "fw-bw-bleu" : fw_bw_bleu \
        })

        self._hash_unordered_list(refs)
        self._hash_ordered_data((self.ngram, self.seed, n_fw_sample,
                                 n_fw_reference, n_bw_sample, n_bw_reference))
        res.update({"fw-bw-bleu hashvalue": self._hashvalue()})
        return res
Example #52
0
def save_random_state(seed=None):
    state = random.getstate()
    if seed is not None:
        random.seed(seed)
    yield
    random.setstate(state)
Example #53
0
def eaAlphaMuPlusLambdaCheckpoint(
        population,
        toolbox,
        mu,
        cxpb,
        mutpb,
        ngen,
        stats=None,
        halloffame=None,
        cp_frequency=1,
        cp_filename=None,
        continue_cp=False):
    r"""This is the :math:`(~\alpha,\mu~,~\lambda)` evolutionary algorithm

    Args:
        population(list of deap Individuals)
        toolbox(deap Toolbox)
        mu(int): Total parent population size of EA
        cxpb(float): Crossover probability
        mutpb(float): Mutation probability
        ngen(int): Total number of generation to run
        stats(deap.tools.Statistics): generation of statistics
        halloffame(deap.tools.HallOfFame): hall of fame
        cp_frequency(int): generations between checkpoints
        cp_filename(string): path to checkpoint filename
        continue_cp(bool): whether to continue
    """

    if continue_cp:
        # A file name has been given, then load the data from the file
        cp = pickle.load(open(cp_filename, "rb"))
        population = cp["population"]
        parents = cp["parents"]
        start_gen = cp["generation"]
        halloffame = cp["halloffame"]
        logbook = cp["logbook"]
        history = cp["history"]
        random.setstate(cp["rndstate"])
    else:
        # Start a new evolution
        start_gen = 1
        parents = population[:]
        logbook = deap.tools.Logbook()
        logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
        history = deap.tools.History()

        # TODO this first loop should be not be repeated !
        invalid_count = _evaluate_invalid_fitness(toolbox, population)
        _update_history_and_hof(halloffame, history, population)
        _record_stats(stats, logbook, start_gen, population, invalid_count)

    # Begin the generational process
    for gen in range(start_gen + 1, ngen + 1):
        offspring = _get_offspring(parents, toolbox, cxpb, mutpb)

        population = parents + offspring

        invalid_count = _evaluate_invalid_fitness(toolbox, offspring)
        _update_history_and_hof(halloffame, history, population)
        _record_stats(stats, logbook, gen, population, invalid_count)

        # Select the next generation parents
        parents = toolbox.select(population, mu)

        logger.info(logbook.stream)

        if(cp_filename and cp_frequency and
           gen % cp_frequency == 0):
            cp = dict(population=population,
                      generation=gen,
                      parents=parents,
                      halloffame=halloffame,
                      history=history,
                      logbook=logbook,
                      rndstate=random.getstate())
            pickle.dump(cp, open(cp_filename, "wb"))
            logger.debug('Wrote checkpoint to %s', cp_filename)

    return population, halloffame, logbook, history
Example #54
0
def shuffle_in_unision(a,b):
    rng_state = random.getstate()
    random.shuffle(a)
    random.setstate(rng_state)
    random.shuffle(b)
Example #55
0
for k, v in chance.items():
    s = sum(v.values())
    if s > biggest:
        biggest = s
        word = k
        prefix = k
    for i in v.keys():
        chance[k][i] = chance[k][i] / s
        if chance[k][i] < epsilon:
            epsilon = chance[k][i]

if (cont == 'y'):
    with open(sys.argv[1] + '.ls') as data_file:
        pq = ast.literal_eval(data_file.readline())
        random.setstate(pq)
# zacni generovat
delta = epsilon * 10  # Pravdepodobnost pre prechod z neznameho do znameho stavu
try:
    generateWords(prefix, word, delta, epsilon, sys.argv[3])

    with open(sys.argv[1] + '.ls', 'w') as outfile:
        outfile.write(str(random.getstate()))
    print('Session saved', file=sys.stderr)
    sys.exit(0)
except KeyboardInterrupt:
    with open(sys.argv[1] + '.ls', 'w') as outfile:
        outfile.write(str(random.getstate()))
    print('Session saved', file=sys.stderr)
    sys.exit(0)
Example #56
0
    def close(self) -> Dict[str, Any]:
        '''Return a dict which contains

			* **self-bleu**: self-bleu value.
			* **self-bleu hashvalue**: hash value for self-bleu metric, same hash value stands
			  for same evaluation settings.
		'''
        res = super().close()
        if not self.hyps:
            raise RuntimeError(
                "The metric has not been forwarded data correctly.")
        if len(self.hyps) == 1:
            raise RuntimeError(
                "Selfbleu can't be computed because there is only 1 generated sentence."
            )

        rng_state = random.getstate()
        random.seed(self.seed)
        random.shuffle(self.hyps)
        random.setstate(rng_state)

        n_sample_hyp = min(self.n_sample_hyp, len(self.hyps))
        n_sample_ref = min(self.n_sample_ref, len(self.hyps))

        ref = self.hyps[:n_sample_ref]

        if self.tokenizer:
            tokenizer: Tokenizer
            if isinstance(self.tokenizer, str):
                tokenizer = SimpleTokenizer(self.tokenizer)
            else:
                tokenizer = self.tokenizer
            ref = [
                self.dataloader.convert_ids_to_sentence(ids,
                                                        remove_special=True,
                                                        trim=True)
                for ids in ref
            ]
            ref = tokenizer.tokenize_sentences(ref)
        else:
            ref = [
                self.dataloader.convert_ids_to_tokens(ids,
                                                      remove_special=True,
                                                      trim=True) for ids in ref
            ]

        if "unk" in self.dataloader.get_special_tokens_mapping():
            _ref = replace_unk(
                ref,
                self.dataloader.get_special_tokens_mapping()["unk"])
        else:
            _ref = ref
        bleu_irl = []

        weights = np.ones(self.ngram) / self.ngram
        tasks = ((ref[:i] + ref[i + 1:], _ref[i], weights)
                 for i in range(n_sample_hyp))

        pool: Optional[Any] = None
        values: Iterable[Any]
        if n_sample_hyp >= SelfBleuCorpusMetric.MINIMAL_PARALLEL_N and self.cpu_count > 1:
            # use multiprocessing
            if pool is None:
                pool = Pool(self.cpu_count)
            values = pool.imap_unordered(_sentence_bleu, tasks, chunksize=20)
        else:
            values = map(_sentence_bleu, tasks)
        if n_sample_hyp >= SelfBleuCorpusMetric.SHOW_PROGRESS:
            values = tqdm.tqdm(values, total=n_sample_hyp)  # use tqdm
        for ans in values:
            bleu_irl.append(ans)
        if pool is not None:
            pool.close()
            pool.join()

        self._hash_ordered_data((self.seed, n_sample_hyp, n_sample_ref))
        res.update({"self-bleu" : 1.0 * sum(bleu_irl) / len(bleu_irl),\
           "self-bleu hashvalue": self._hashvalue()})
        return res
Example #57
0
def set_rng_state(checkpoint: dict[str, Any]) -> None:
    if checkpoint:
        random.setstate(checkpoint["rng_state"])
        np.random.set_state(checkpoint["np_rng_state"])
        torch.set_rng_state(checkpoint["torch_rng_state"])
 def seed_random(seed):
     state = random.getstate()
     random.seed(seed)
     cleanup(lambda: random.setstate(state))
     return RandomSeeder(seed)
if args.resume:
    # Load checkpoint.
    print('==> Resuming from checkpoint..')
    assert os.path.isdir(
        args.checkpoint_dir), 'Error: no checkpoint directory found!'

    checkpoint = torch.load('./' + args.checkpoint_dir + '/ckpt.best.' +
                            args.sess + '_' + str(args.seed))

    net1 = checkpoint['net1']
    net2 = checkpoint['net2']
    start_epoch = checkpoint['epoch'] + 1
    torch.set_rng_state(checkpoint['rng_state'])
    torch.cuda.set_rng_state(checkpoint['cuda_rng_state'])
    np.random.set_state(checkpoint['np_state'])
    random.setstate(checkpoint['random_state'])

    if args.dataset == 'cifar10':
        with open("cifar10_labelled_index.pkl", "rb") as fp:
            S_idx = pickle.load(fp)

        with open("cifar10_unlabelled_index.pkl", "rb") as fp:
            U_idx = pickle.load(fp)
    else:
        with open("svhn_labelled_index.pkl", "rb") as fp:
            S_idx = pickle.load(fp)

        with open("svhn_unlabelled_index.pkl", "rb") as fp:
            U_idx = pickle.load(fp)
else:
Example #60
0
    patience = 0
    start_epoch = 0
    start_time = datetime.datetime.now()

    if args.resume_last or args.resume_best:
        if args.resume_last:
            fname = 'saved_models/%s_last.pth' % args.exp_name
        else:
            fname = 'saved_models/%s_best.pth' % args.exp_name

        if os.path.exists(fname):
            data = torch.load(fname)
            torch.set_rng_state(data['torch_rng_state'])
            torch.cuda.set_rng_state(data['cuda_rng_state'])
            np.random.set_state(data['numpy_rng_state'])
            random.setstate(data['random_rng_state'])
            model.load_state_dict(data['state_dict'], strict=False)
            optim.load_state_dict(data['optimizer'])
            scheduler.load_state_dict(data['scheduler'])
            start_epoch = data['epoch'] + 1
            best_cider = data['best_cider']
            patience = data['patience']
            use_rl = data['use_rl']
            start_time = data['start_time']
            try:
                middle_time = data['middle_time']
            except:
                middle_time = None
            print(
                'Resuming from epoch %d, validation loss %f, best validation cider %f, patience %d'
                % (data['epoch'], data['val_loss'], data['best_cider'],