def train_test_val_split(records_list, train=0.6, test=0.2, val=0.2, seed=None): assert train + test + val == 1 split = deepcopy(records_list) train_size, test_size, val_size = ( round(len(split) * train), round(len(split) * test), round(len(split) * val), ) train_size = len(split) - test_size - val_size if seed is not None: set_seed(seed) shuffle(split) train_set, val_set, test_set = ( split[:train_size], split[train_size:(train_size + test_size)], split[(train_size + test_size):], ) assert len(set(train_set).intersection(set(test_set))) == 0 assert len(set(val_set).intersection(set(test_set))) == 0 assert len(set(train_set).intersection(set(test_set))) == 0 return train_set, val_set, test_set
def train_dl(num_neutron,batch_size,epoch,X_train,y_train,X_test,y_test): """ This function will allow us to train our deep learning model """ import keras as K import numpy as np np.random.seed(1) # NumPy import random random.seed(2) # Python from tensorflow import random random.set_seed(3) model = tf.keras.Sequential([ tf.keras.layers.Dense(num_neutron, activation='relu', input_shape=(1618,)), tf.keras.layers.Dense(14, activation='softmax') ]) model.compile(optimizer=tf.keras.optimizers.Adam(5e-04), loss=tf.keras.losses.CategoricalCrossentropy(), metrics=[tf.keras.metrics.Precision(name='precision')]) dataset = tf.data.Dataset.from_tensor_slices((X_train.values, y_train.values)) train_dataset = dataset.shuffle(len(X_train)).batch(batch_size) dataset = tf.data.Dataset.from_tensor_slices((X_test.values, y_test.values)) validation_dataset = dataset.shuffle(len(X_test)).batch(batch_size) print("Start training..\n") history = model.fit(train_dataset, epochs=epoch, validation_data=validation_dataset) print("Done.") return model
def set_seed(self, seedString): # Check arguments # name seedString = exception.arg_check(seedString, str) # Set seed random.set_seed(random.seed_alphabet_decode(seedString)) self.seed = seedString
def quiz_from_seed(seed, params): quiz={} set_seed(seed) askbiggest = randint(0,1)==0 print("biggest?",askbiggest) if askbiggest: quiz["comparison"]="biggest" else: quiz["comparison"] = "smallest" print(quiz["comparison"]) try: measure=params.get("measure") except (AttributeError,TypeError): measure=None if measure==None or measure == "random": measure=choice(["extent", "count", "amount", "duration", "mass", "area"]) quiz["measure"]=measure quiz["seed"] = seed rf = randomFact(NumberFact, measure, rseed=seed) bestComparisons, tolerance, score = numberFactsLikeThis(NumberFact, rf, rseed=seed) while len(bestComparisons)<4: seed = randint(0,10000000) rf = randomFact(NumberFact, measure, rseed=seed) bestComparisons, tolerance, score = numberFactsLikeThis(NumberFact, rf, rseed=seed) quiz["hint"] = rf.render quiz["options"]=bestComparisons return quiz
def get_control(seq_array, chosen_base, step, flank_size, sample_indices=None, circle_range=None, seed=None): """returns control profile""" assert seed is not None, "Must provide a random number seed" set_seed(seed) if sample_indices is None: sample_indices = chosen_base_indices(seq_array, chosen_base, step) seq_array, sample_indices = filter_seqs_by_chosen_base( seq_array, sample_indices, 1) if circle_range is None: circle_range = MakeCircleRange(seq_array.shape[1], flank_size) sampled_indices = get_random_indices(sample_indices, circle_range) rows = [] for i in range(len(seq_array)): row = seq_array[i].take(sampled_indices[i]) rows.append(row) sampled_data = array(rows) return sampled_data
def __init__(self, subset, path_length=6, grayscale=False, seed=0, image_size=(150, 150), augmentation=None): super().__init__() self.grayscale = grayscale self.image_size = image_size self.augmentation = augmentation folders = { 6: ['curv_baseline'], 9: ['curv_contour_length_9'], 14: ['curv_contour_length_14'], 'all': ['curv_baseline', 'curv_contour_length_9', 'curv_contour_length_14'], }[path_length] self.base_paths = [join(self.data_path(), folder) for folder in folders] splits = list(range(1, 25)) if seed != 0: set_seed(seed) shuffle(splits) if subset == 'train': a_range = splits[0:18] # range(1, 19) elif subset == 'val': a_range = splits[18:20] # range(19, 21) elif subset == 'test': a_range = splits[20:25] # range(21, 25) else: raise ValueError(f'Invalid subset: {subset}') self.samples = [(a, b, pos, bp) for a in a_range for b in range(10000) for pos in [True, False] for bp in range(len(self.base_paths))] self.sample_ids = list(range(len(self.samples)))
def generate_inputs(ninputs, seed=None): yield Int64.ZERO yield Int64.ONE yield Int64.MAX if seed is not None: set_seed(seed) for a in range(ninputs - 3): yield Int64.random()
def get_rand_id_cons(layers, connections, seed=1): # Generate random connectivity n = len(layers) id_cons = [None] * (n - 1) set_seed(seed) for index, con_count in enumerate(connections): i = layers[index] # number of gates in layer index j = layers[index + 1] # number of gates in layer index + 1 all_cons = list(product(range(i), range(j))) id_cons[index] = sample(all_cons, con_count) return id_cons
def random_digraph(vertices=10000, max_arcs_per_node=100, acyclic=False, seed=None): from random import seed as set_seed, sample, randrange if seed is not None: set_seed(seed) n = vertices a = min(max_arcs_per_node, vertices) G = {} for v in xrange(n): population = xrange(v+1, n) if acyclic else xrange(n) sample_size = randrange(min(n-v, a)) if acyclic else randrange(a) G[v] = sample(population, sample_size) return G
def get(limit, padding, orderby, seed=None): if seed is None: song_list = orm \ .select(s for s in Song) \ .order_by(format_order_by(orderby)) \ .limit(limit=limit, offset=padding) else: # shuffle all results id_list = list(orm.select(s.id for s in Song)) set_seed(a=seed, version=2) shuffle(id_list) song_list = orm.select(s for s in Song).where( lambda s: s.id in id_list[padding:padding + limit]) return {'data': [s.serialize() for s in song_list]}, 200
def sample_row_wise(indptr, indices, n_cols, n_samples, seed_seq): """ For every row of a CSR matrix, samples indices not present in this row. """ n_rows = len(indptr) - 1 result = np.empty((n_rows, n_samples), dtype=indices.dtype) for i in prange(n_rows): head = indptr[i] tail = indptr[i + 1] seen_inds = indices[head:tail] state = prime_sampler_state(n_cols, seen_inds) remaining = n_cols - len(seen_inds) set_seed(seed_seq[i]) sample_fill(n_samples, state, remaining, result[i, :]) return result
def get_rand_circuit_id_cons(layers, connections, seed=1): # Generate random connectivity constrained to two incoming connections per # gate (i.e. assuming two-input gates) n = len(layers) id_cons = [None] * (n - 1) set_seed(seed) for ind in range(1, n): gates = layers[ind] prev_gates = range(layers[ind - 1]) layer_cons = [] for gate in range(gates): for src in sample(prev_gates, 2): layer_cons.append((src, gate)) id_cons[ind - 1] = layer_cons return id_cons
def audit(decision_fn, num_scenarios=100000, seed=None): log_file = get_log_file(decision_fn.__name__) if seed is not None: set_seed(seed) for _ in range(num_scenarios): scenario = Scenario(youInCar=False, legalCrossing=False, pedsInLane=True) decision = decision_fn(scenario) if decision not in ['passengers', 'pedestrians']: print(scenario) message = 'Expected "passengers" or "pedestrian", ' message += 'but got "{}" instead'.format(decision) raise ValueError(message) log_scenario(log_file, scenario, decision) calculate_stats(log_file)
def __init__(self, subset, difficulty=0, grayscale=False, seed=0, augmentation=None): super().__init__() self.grayscale = grayscale self.difficulty = difficulty self.augmentation = augmentation folders = { 0: ['baseline-/media/data_cifs/cluttered_nist3/baseline-/'], 1: ['ix1-/media/data_cifs/cluttered_nist3/ix1-/'], 3: ['ix2/media/data_cifs/cluttered_nist3/ix2/'], 'all': [ 'baseline-/media/data_cifs/cluttered_nist3/baseline-/', 'ix1-/media/data_cifs/cluttered_nist3/ix1-/', 'ix2/media/data_cifs/cluttered_nist3/ix2/' ], }[difficulty] self.base_paths = [ join(self.data_path(), folder) for folder in folders ] splits = list(range(1, 51)) if seed != 0: set_seed(seed) shuffle(splits) if subset == 'train': a_range = splits[0:36] # range(1, 19) elif subset == 'val': a_range = splits[36:40] # range(19, 21) elif subset == 'test': a_range = splits[40:51] # range(21, 25) else: raise ValueError(f'Invalid subset: {subset}') self.samples = [(a, b, bp) for a in a_range for b in range(4000) for bp in range(len(self.base_paths))] self.labels = [ np.load(join(self.base_paths[0], f'metadata/{i}.npy'))[:, [0, 2, 4]].astype('U13') for i in range(1, 51) ] self.sample_ids = list(range(len(self.samples)))
def basic_hills(x, height, seed): set_seed(seed) cs = x * 10 xr = xrange(cs, cs+10) chunk = {} for x in xr: for y in xrange(height): top = noise1D(x, seed) if y == top: chunk[x, y] = "G" elif y > top: chunk[x, y] = "K" elif top-y < rand(3, 7): chunk[x, y] = "D" else: chunk[x, y] = "S" return chunk
def sample_element_wise(indptr, indices, n_cols, n_samples, seed_seq): """ For every nnz entry of a CSR matrix, samples indices not present in its corresponding row. """ result = np.empty((indptr[-1], n_samples), dtype=indices.dtype) for i in prange(len(indptr) - 1): head = indptr[i] tail = indptr[i + 1] seen_inds = indices[head:tail] state = prime_sampler_state(n_cols, seen_inds) remaining = n_cols - len(seen_inds) set_seed(seed_seq[i]) for j in range(head, tail): sampler_state = state.copy() sample_fill(n_samples, sampler_state, remaining, result[j, :]) return result
def mf_random_item_scoring(user_factors, item_factors, indptr, indices, size, seedseq, res): """ Calculate matrix factorization scores over a sample of random items excluding the already observed ones. """ num_items, rank = item_factors.shape for i in prange(len(indptr) - 1): head = indptr[i] tail = indptr[i + 1] observed = indices[head:tail] user_coef = user_factors[i, :] set_seed(seedseq[i]) # randomization control for sampling in a thread for j, rnd_item in enumerate(sample_unseen(num_items, size, observed)): item_coef = item_factors[rnd_item, :] tmp = 0 for k in range(rank): tmp += user_coef[k] * item_coef[k] res[i, j] = tmp
def __init__(self, x, y, width, size, osc_url, osc_port, seed=None, border_color=(255, 255, 255), background_color=(0, 0, 0), padding=10): """ Class constructor :param x: x position in pixels of the upper left corner on theater window :param y: y position in pixels of the upper left corner on theater window :param width: width in pixels of stage :param size: size in cells of stage :param osc_url: URL used to reach OSC listener :param osc_port: port used to reach OSC listener :param seed: a number used to seed random values - may be used to repeat same sequence over and over :param border_color: border color of stage as a RGB triplet :param background_color: background color of stage as a RGB triplet :param padding: padding in pixels between cells """ # Store class properties self._x = x self._y = y self._width = width self._size = size self._osc_url = osc_url self._osc_port = osc_port self._seed = seed self._border_color = border_color self._background_color = background_color self._padding = padding # Store different instruments added to this stage self._instruments = [] # Compute cell width in pixels (we don't round to avoid calculation errors - is done further self._cell_width = width / size - padding / 2 # Initiate OSC client self._osc_client = Osc_client() # Initialize random seed for eventual reproducibility set_seed(seed)
def __init__(self, seed, n, m, sample_increment_size): """ Initializes a :class:`SimulatedSenateElection` object. The number of seats in a simulated senate election is equal to the floor of the number of candidates in the election divided by two. :param int seed: The starting value for the random number generator. :param int n: The total number of ballots cast in the election. :param int m: The total number of candidates in the election. :param int sample_increment_size: The number of ballots to add to the growing sample during each audit stage. """ super(SimulatedSenateElection, self).__init__() self._n = n self._m = m self._seats = int(self._m / 2) self._candidates = list(range(1, self._m + 1)) self._candidate_ids = list(range(1, self._m + 1)) self._election_id = SimulatedSenateElection.DEFAULT_ID.format( asctime(localtime())) self._sample_increment_size = sample_increment_size set_seed(seed) # Set the initial value of the RNG.
def _setseed(self, options): """get a seed if the seed is not given set the seed for the random numbers generator""" # check if the seed has been already set (by a child, before calling the super init) if hasattr(self, '_seed'): return self._seed if 'seed' not in options or not options['seed']: set_seed( None ) # (from doc): If seed is omitted or None, current system time is used seed = randint(0, 16777215) # between 0 and 2^24-1 else: try: seed = int(options['seed'], 0) if not 0 <= seed <= 16777215: raise ValueError( "The 'seed' value must be between 0 and 16777215 ('seed=%s'." % options['seed']) except ValueError: raise ValueError("The 'seed' value is invalid ('seed=%s')" % options['seed']) set_seed(seed) return seed
def find_samples_to_remove(ped_fh, kin0_fh, kin_fh, relatedness_threshold, distant_relatedness_threshold, output_fh, coverage_summary_fh=None, seed=None, verbose=False): if output_fh is sys.stdout: log_fh = sys.stderr else: log_fh = open(os.path.splitext(output_fh.name)[0] + ".log", "w") try: if seed is not None: set_seed(seed) phenotypes, ped_lines = process_ped_file(ped_fh, log_fh, verbose) global all_graphs kinship_files = [kin0_fh, kin_fh] all_graphs = process_kinship_file(kinship_files, relatedness_threshold, distant_relatedness_threshold, phenotypes, log_fh, verbose) (affecteds_related_graph, affecteds_unrelated_graph, mixed_related_graph, mixed_unrelated_graph, mixed_affecteds_related_graph, mixed_affecteds_unrelated_graph, unaffecteds_related_graph, unaffecteds_unrelated_graph) = all_graphs break_ties_with_coverage = bool(coverage_summary_fh) if break_ties_with_coverage: coverage_by_sample = process_coverage_summary_file( coverage_summary_fh, log_fh, verbose) else: coverage_by_sample = None samples_to_remove = [] samples_to_remove.extend( remove_samples(affecteds_related_graph, [ affecteds_unrelated_graph, mixed_affecteds_related_graph, mixed_affecteds_unrelated_graph ], log_fh, coverage_by_sample=coverage_by_sample, verbose=verbose)) samples_to_remove.extend( remove_samples(unaffecteds_related_graph, [ mixed_related_graph, mixed_unrelated_graph, unaffecteds_unrelated_graph ], log_fh, coverage_by_sample=coverage_by_sample, verbose=verbose)) samples_to_remove.extend( remove_samples( mixed_related_graph, [mixed_unrelated_graph, unaffecteds_unrelated_graph], log_fh, coverage_by_sample=coverage_by_sample, verbose=verbose)) if verbose: log_fh.write( "Pruning finished; will be removing {} samples:\n".format( len(samples_to_remove))) for sample in samples_to_remove: log_fh.write("{}\n".format(sample)) log_fh.write("\n") samples_to_remove = set(samples_to_remove) for line_fields in ped_lines: if line_fields[1] not in samples_to_remove: output_fh.write("\t".join(line_fields) + "\n") finally: if output_fh is not sys.stdout: output_fh.close() if log_fh is not sys.stderr: log_fh.close()
# Reducing layer width def reduce_width(neurons, reduce_rate, power): return (max(int(neurons * reduce_rate**power), 5)) # best parameters from above dropout = 0.3 neurons = 500 red_rate = 0.3 constraint = 4 mom = 0.9 learning_rate = 0.01 seed = 1 set_seed(seed) model = Sequential() model.add( Dense(reduce_width(neurons, red_rate, 0), input_shape=(input_dim, ), kernel_constraint=MaxNorm(constraint))) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(dropout)) model.add( Dense(reduce_width(neurons, red_rate, 1), kernel_constraint=MaxNorm(constraint))) model.add(BatchNormalization())
def __init__(self, player1, player2, tournament=None, **options): """ Create a Game Parameters: - player1, player2: two Player (the order will be changed according who begins) - options: dictionary of options - 'seed': seed of the labyrinth (same seed => same labyrinth); used as seed for the random generator - 'timeout': timeout of the game (if not given, the default timeout is used) - 'start': who starts the game (0, 1 or -1); random when not precised or '-1' # TODO: add a delay/pause option (in second) """ # check if we can create the game (are the players available) if player1 is None or player2 is None: raise ValueError("Players doesn't exist") if player1 is player2: raise ValueError("Cannot play against himself") if player1.game is not None or player2.game is not None: raise ValueError("Players already play in a game") # players # we randomly decide the order of the players if 'start' not in options: pl = choice((0, 1)) else: try: pl = int(options['start']) if pl == -1: pl = choice((0, 1)) except ValueError: raise ValueError("The 'start' option must be '0', '1' or '-1'") self._players = (player1, player2) if pl == 0 else (player2, player1) # get a seed if the seed is not given; seed the random numbers generator if 'seed' not in options: set_seed(None) # (from doc): If seed is omitted or None, current system time is used seed = randint(0, 16777215) # between 0 and 2^24-1 else: try: seed = int(options['seed']) if not 0 <= seed <= 16777215: raise ValueError("The 'seed' value must be between 0 and 16777215 ('seed=%s'." % options['seed']) except ValueError: raise ValueError("The 'seed' value is invalid ('seed=%s')" % options['seed']) set_seed(seed) # (unique) name composed by # - the first 6 characters are the seed (in hexadecimal), # - the 6 next characters are hash (CRC24) of the time and names (hexadecimal) ok = False name = "" while not ok: # we need a loop just in case we are unlucky and two existing games have the same hash fullName = str(int(time())) + player1.name + player2.name name = hex6(seed)[2:] + hex6(crc24(bytes(fullName, 'utf8')))[2:] ok = name not in self.allInstances if not ok: timemod.sleep(1) # store the tournament self._tournament = tournament # determine who starts (player #0 ALWAYS starts) self._whoPlays = 0 # last move self._lastMove = "" self._lastReturn_code = 0 # set a delay after each move (to let the time to see the party) if 'delay' not in options: self._delay = 0 else: try: self._delay = int(options['delay']) except ValueError: self._delay = 0 # raise ValueError("The 'delay' value is invalid ('delay=%s')" % options['delay']) # time out for the move if 'timeout' not in options: self._timeout = TIMEOUT_TURN else: try: self._timeout = int(options['timeout']) except ValueError: raise ValueError("The 'timeout' value is invalid ('timeout=%s')" % options['timeout']) # timestamp of the last move self._lastMoveTime = datetime.now() # used for the timeout when one player is a non-regular player # Barrier used for the synchronization of the two players (during playMove and getMove) self._sync = Barrier(2, timeout=self._timeout) # list of comments self._comments = CommentQueue(MAX_COMMENTS) # and (almost) last, call the super init for base initialization super().__init__(name) # advertise the players that they enter in a game player1.game = self player2.game = self # log the game self.logger.info("=================================") if self._tournament: self.logger.message("[Tournament %s] Game %s just starts with '%s' and '%s' (seed=%d).", self._tournament.name, name, player1.name, player2.name, seed) else: self.logger.message("Game %s just starts with '%s' and '%s' (seed=%d).", name, player1.name, player2.name, seed) self.logger.debug("The delay is set to %ds" % self._delay) self.logger.debug("The timeout is set to %ds" % self._timeout)
def set_seed_value(self): set_seed(self.seed_buffer)
def audit(election, seed, unpopular_freq_threshold, stage_counter=0, alpha=0.05, trials=100, quick=False): """ Runs a Bayesian audit on the given senate election. :param :class:`BaseSenateElection` election: The senate election to audit. :param int seed: The seed for the random number generator. :param float unpopular_freq_threshold: The upper bound on the frequency of trials a candidate is elected in order for the candidate to be deemed unpopular. :param int stage_counter: The current audit stage (default: 0). :param float alpha: The error tolerance for the given audit (default: 0.05). :param int trials: The number of trials performed per sample (default: 100). :param bool quick: A boolean indicating whether the audit should run to completion (True) or only run one stage (False) (default: False). """ print( 'Audit of {} election.\n'.format(election.get_type()), ' Election ID: {}\n'.format(election.get_election_id()), ' Candidates: {}\n'.format(election.get_candidates()), ' Number of ballots cast: {}\n'.format( election.get_num_cast_ballots()), ' Number of seats being contested: {}\n'.format( election.get_num_seats()), ' Number of trials per sample: {}\n'.format(trials), ' Random number seed: {}'.format(seed), ) start_time = time() set_seed(seed) # Cast one "prior" ballot for each candidate to establish a Bayesian prior. The prior ballot is a length-one partial # ballot with just a first choice vote for that candidate. for cid in election.get_candidate_ids(): election.add_ballot((cid, ), 1) # Mapping from candidates to the set of ballots that elected them. candidate_to_ballots_map = {} candidate_outcomes = None done = False while True: stage_counter += 1 election.draw_ballots() # Increase sample of cast ballots. print( '\nAudit stage number: {}\n'.format(stage_counter), ' Sample size (including prior ballots): {}\n'.format( election.get_num_ballots_drawn()), ) # -- Run trials in a Bayesian manner -- # Each outcome is a tuple of candidates who have been elected in lexicographical order (NOT the order in which # they were elected). print( ' Performing {} Bayesian trials (posterior-based election simulations) in this stage.' .format(trials)) outcomes = [] for _ in range(trials): new_ballot_weights = get_new_ballot_weights( election, election.get_num_cast_ballots()) outcome = election.get_outcome(new_ballot_weights) for cid in outcome: if cid not in candidate_to_ballots_map: candidate_to_ballots_map[cid] = new_ballot_weights outcomes.append(outcome) best, freq = Counter(outcomes).most_common(1)[0] print( ' Most common outcome ({} seats):\n'.format( election.get_num_seats()), ' {}\n'.format(best), ' Frequency of most common outcome: {} / {}'.format(freq, trials), ) candidate_outcomes = Counter(chain(*outcomes)) print( ' Fraction present in outcome by candidate:\n {}'.format( ', '.join([ '{}: {}'.format(str(cid), cid_freq / trials) for cid, cid_freq in sorted(candidate_outcomes.items(), key=lambda x: (x[1], x[0])) ]), ), ) if freq >= trials * (1 - alpha): print( 'Stopping because audit confirmed outcome:\n', ' {}\n'.format(best), 'Total number of ballots examined: {}'.format( election.get_num_ballots_drawn()), ) done = True break if election.get_num_ballots_drawn() >= election.get_num_cast_ballots(): print('Audit has looked at all ballots. Done.') done = True break if not quick: break if candidate_outcomes is not None and done: for cid, cid_freq in sorted(candidate_outcomes.items(), key=lambda x: (x[1], x[0])): if cid_freq / trials < unpopular_freq_threshold: print( ' One set of ballots that elected low frequency ' 'candidate {} which occurred in {}% of outcomes\n'.format( str(cid), str(cid_freq)), ' {}'.format(candidate_to_ballots_map[cid]), ) print('Elapsed time: {} seconds.'.format(time() - start_time)) return done
def __init__(self, n, sealevel=0.5, seed=None): self.sealevel = sealevel # generate an evenly spaced triagonal grid of points, # jittered for randomness if (seed != None): set_seed(seed) m = int(2 * n / (3**0.5)) + 2 if m % 2 == 0: # force odd m m = m + 1 s = 1. / (n - 1) h = (3**0.5) * s / 2 nodes = [] for y in range(m): row = [] k = y % 2 for x in range(n + k): px = s * x - 0.5 * s * k + s * (random() - 0.5) py = h * y - h + h * (random() - 0.5) row.append(node(px, py, 0)) nodes.append(row) # build graph self.graph = {} # ...starting with the corners self.graph[nodes[0][0]] = (nodes[1][0], nodes[0][1], nodes[1][1]) self.graph[nodes[-1][0]] = (nodes[-2][0], nodes[-2][1], nodes[-1][1]) self.graph[nodes[0][-1]] = (nodes[0][-2], nodes[1][-2], nodes[1][-1]) self.graph[nodes[-1][-1]] = (nodes[-1][-2], nodes[-2][-2], nodes[-2][-1]) #next, the edges # sides for y in range(1, m - 1): if y % 2 == 0: # even left self.graph[nodes[y][0]] = (nodes[y + 1][0], nodes[y + 1][1], nodes[y][1], nodes[y - 1][1], nodes[y - 1][0]) # even right self.graph[nodes[y][-1]] = (nodes[y + 1][-2], nodes[y + 1][-1], nodes[y - 1][-1], nodes[y - 1][-2], nodes[y][-2]) else: # odd left self.graph[nodes[y][0]] = (nodes[y + 1][0], nodes[y][1], nodes[y - 1][0]) # odd right self.graph[nodes[y][-1]] = (nodes[y + 1][-1], nodes[y][-2], nodes[y - 1][-1]) # top & bottom for x in range(1, n - 1): # bottom self.graph[nodes[0][x]] = (nodes[0][x - 1], nodes[1][x], nodes[1][x + 1], nodes[0][x + 1]) # bottom self.graph[nodes[-1][x]] = (nodes[-1][x - 1], nodes[-2][x], nodes[-2][x + 1], nodes[-1][x + 1]) # the bulk of the graph for y in range(1, m - 1): k = y % 2 for x in range(1, n + k - 1): self.graph[nodes[y][x]] = (nodes[y - 1][x - k], nodes[y - 1][x + 1 - k], nodes[y][x - 1], nodes[y][x + 1], nodes[y + 1][x - k], nodes[y + 1][x + 1 - k])
def set_seed(SEED =2031): os.environ['TF_DETERMINISTIC_OPS'] = '1' os.environ['PYTHONHASHSEED']=str(SEED) np.random.seed(SEED) random.set_seed(SEED)
# Transform the training data X_train = sc.fit_transform(X_train) X_train = pd.DataFrame(X_train, columns=X_test.columns) # Transform the testing data X_test = sc.transform(X_test) X_test = pd.DataFrame(X_test, columns = X_train.columns) # Import the relevant Keras libraries from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout From tensorflow import random # Initiate the Model with Sequential Class np.random.state(seed) random.set_seed(seed) model = Sequential() # Add the hidden dense layers and with dropout Layer model.add(Dense(units=64, activation='relu', kernel_initializer='uniform', input_dim=X_train.shape[1])) model.add(Dropout(rate=0.5)) model.add(Dense(units=32, activation='relu', kernel_initializer='uniform', input_dim=X_train.shape[1])) model.add(Dropout(rate=0.4)) model.add(Dense(units=16, activation='relu', kernel_initializer='uniform', input_dim=X_train.shape[1])) model.add(Dropout(rate=0.3)) model.add(Dense(units=8, activation='relu', kernel_initializer='uniform', input_dim=X_train.shape[1])) model.add(Dropout(rate=0.2)) model.add(Dense(units=4, activation='relu', kernel_initializer='uniform')) model.add(Dropout(rate=0.1))
def random_seed(length=16): set_seed(binascii.b2a_hex(os.urandom(length))) return binascii.b2a_hex(os.urandom(length))
def main(dataset='omniglot', n=5, k=5, trainsize=None, valsize=None, epochs=200, batch_size=32, lr=10e-4, random_rotation=True, seed=13, force_download=False, device='cuda', use_tensorboard=True, eval_test=True, track_loss_freq=1, track_weights=True, track_weights_freq=100, load_weights=True, evalength=None, trainpbar=True): """ Download the dataset if not present and train SNAIL (Simple Neural Attentive Meta-Learner). When training is successfully finished, the embedding network weights and snail weights are saved, as well the path of classes used for training/test in train_classes.txt/test_classes.txt :param dataset: Dataset used for training, can be only {'omniglot', 'miniimagenet'} (defuult 'omniglot') :param n: the N in N-way in meta-learning i.e. number of class sampled in each row of the dataset (default 5) :param k: the K in K-shot in meta-learning i.e. number of observations for each class (default 5) :param trainsize: [omniglot-only] number of class used in training (default 1200) while the remaining classes are for test. :param epochs: times that model see the dataset (default 200) :param batch_size: size of a training batch (default 32) :param random_rotation: :bool rotate the class images by multiples of 90 degrees (default True) :param seed: seed for reproducibility (default 13) :param force_download: :bool redownload data even if folder is present (default True) :param device: : device used in pytorch for training, can be "cuda*" or "cpu" (default 'cuda') :param use_tensorboard: :bool save metrics in tensorboard (default True) :param eval_test: :bool after test_loss_freq batch calculate loss and accuracy on test set (default True) :param track_loss_freq: :int epoch frequency of loss/accuracy saving inside tensorboard (default 1) :param track_weights: :bool when True log parameters histogram inside tensorboard (default True) :param track_weights_freq: :int steps frequency of saving parameters and gradients histograms inside tensorboard (default 100) :param load_weights: :bool if available load under model_weights snail and embedding network weights (default True) """ assert dataset in ['omniglot', 'miniimagenet'] assert device.startswith('cuda') or device == 'cpu' if not torch.cuda.is_available(): print('Warning: cuda is not available, fall back to cpu') device = 'cpu' np.random.seed(seed) set_seed(seed) if dataset == 'omniglot': dataloader = OmniglotDataLoader(batch_size, n, k, device) else: dataloader = MiniImagenetDataLoader(batch_size, n, k, device) model = SnailTrain(n, k, dataset, device=device, track_loss=use_tensorboard, track_layers=track_weights and use_tensorboard, track_loss_freq=track_loss_freq, track_params_freq=track_weights_freq, random_rotation=random_rotation, lr=lr, trainpbar=trainpbar) if load_weights: model.load_if_exists() model.train(epochs, dataloader.train_dataloader(), dataloader.val_dataloader(), trainsize, valsize)
def quiz(request): params = request.POST try: seed = num(params.get("seed")) except (AttributeError, TypeError): set_seed() seed = randint(0, 10000000) if seed == None: set_seed() seed = randint(0, 10000000) set_seed(seed) try: cycle = params.get("cycle") except (AttributeError, TypeError): cycle = "initial" askbiggest = randint(0, 1) == 0 try: measure = params.get("measure") except (AttributeError, TypeError): measure = None if measure == None or measure == "random": measure = choice(["extent", "count", "amount", "duration", "mass"]) quiz = {} if askbiggest: if measure == "extent": quiz["question"] = "Which of these is the biggest?" elif measure == "count": quiz["question"] = "Which of these is the most numerous?" elif measure == "amount": quiz["question"] = "Which of these is the greatest amount?" elif measure == "duration": quiz["question"] = "Which of these is the longest period of time?" else: quiz["question"] = "Which of these has the greatest mass?" else: if measure == "extent": quiz["question"] = "Which of these is the smallest?" elif measure == "count": quiz["question"] = "Which of these is the least numerous?" elif measure == "amount": quiz["question"] = "Which of these is the smallest amount?" elif measure == "duration": quiz["question"] = "Which of these is the shortest period of time?" else: quiz["question"] = "Which of these has the least mass?" quiz["measure"] = measure quiz["seed"] = seed rf = randomFact(NumberFact, measure, rseed=seed) bestComparisons, tolerance, score = numberFactsLikeThis(NumberFact, rf, rseed=seed) while len(bestComparisons) < 4: seed = randint(0, 10000000) rf = randomFact(NumberFact, measure, rseed=seed) bestComparisons, tolerance, score = numberFactsLikeThis(NumberFact, rf, rseed=seed) quiz["hint"] = rf.render quiz["options"] = bestComparisons if askbiggest: answer = biggestNumberFact(bestComparisons) else: answer = smallestNumberFact(bestComparisons) quiz["answer"] = answer.title if request.method == "POST": response = request.POST if response.get("option") == quiz["answer"] and cycle == "answered": quiz["assessment"] = str( response.get("option")) + " is the correct answer: Well done!" #quiz["question"]="" reveal = [] for option in bestComparisons: reveal.append({ "title": option.render_folk, "link": option.link }) quiz["options"] = reveal quiz["cycle"] = "correct" elif cycle == "answered": quiz["assessment"] = str( response.get("option")) + " is not correct. Try again." else: pass # form = FactForm() # return render(request, 'blog/fact_edit.html', {'form': form}) dyk = spuriousFact(NumberFact, 3, measure=measure) return render(request, 'blog/quiz.html', { 'quiz': quiz, 'quote': choice(quotes), "dyk": dyk })
def quiz(request): params = request.POST if len(params)==0: params = request.GET abs_uri = request.build_absolute_uri() protocol, uri = abs_uri.split("://") site = protocol+"://"+uri.split("/")[0]+"/" try: cycle=params.get("cycle") except (AttributeError,TypeError): cycle="initial" try: force_reveal=params.get("reveal")=="true" except (AttributeError,TypeError): force_reveal=False try: saveas=params.get("saveas") except (AttributeError,TypeError): saveas=None try: spec=request.GET.get("spec") if spec==None: spec=params.get("spec") quiz = quiz_from_spec(spec) except (AttributeError): spec = None if spec == None: try: seed=num(params.get("seed")) except (AttributeError,TypeError): set_seed() seed = randint(0,10000000) if seed == None: set_seed() seed = randint(0,10000000) quiz = quiz_from_seed(seed, params) spec = make_spec(quiz["options"], quiz["comparison"], quiz["measure"]) measure = quiz["measure"] if quiz["comparison"]=="biggest": if measure=="extent": quiz["question"]="Which of these is the biggest?" elif measure=="count": quiz["question"]="Which of these is the most numerous?" elif measure=="amount": quiz["question"]="Which of these is the greatest amount?" elif measure=="duration": quiz["question"]="Which of these is the longest period of time?" elif measure=="volume": quiz["question"]="Which of these has the greatest volume?" elif measure=="area": quiz["question"]="Which of these has the greatest area?" else: quiz["question"]="Which of these has the greatest mass?" else: if measure=="extent": quiz["question"]="Which of these is the smallest?" elif measure=="count": quiz["question"]="Which of these is the least numerous?" elif measure=="amount": quiz["question"]="Which of these is the smallest amount?" elif measure=="volume": quiz["question"]="Which of these has the least volume?" elif measure=="area": quiz["question"]="Which of these has the least area?" elif measure=="duration": quiz["question"]="Which of these is the shortest period of time?" else: quiz["question"]="Which of these has the least mass?" permalink = site+"quiz/?spec="+spec if force_reveal: permalink+="&reveal=true" if saveas: poke_link(permalink, saveas) quiz["spec"]=spec if quiz["comparison"]=="biggest": answer = biggestNumberFact(quiz["options"]) else: answer = smallestNumberFact(quiz["options"]) quiz["answer"]=answer.title if request.method == "POST": response = request.POST if response.get("option")==quiz["answer"] and cycle=="answered": quiz["assessment"] = str(response.get("option"))+" is the correct answer: Well done!" #quiz["question"]="" reveal = [] for option in quiz["options"]: reveal.append({"title":option.render_folk, "link":option.link}) quiz["options"]=reveal quiz["cycle"]="correct" elif cycle=="answered": quiz["assessment"] = str(response.get("option"))+" is not correct. Try again." if request.method == "GET": if force_reveal: reveal = [] for option in quiz["options"]: reveal.append({"title":option.render_folk, "link":option.link}) quiz["assessment"] = quiz["answer"]+" is the correct answer." quiz["options"]=reveal else: pass # form = FactForm() # return render(request, 'blog/fact_edit.html', {'form': form}) dyk=spuriousFact(NumberFact,3,measure=quiz["measure"]) promote = choice(["sponsor","donate"]) return render(request, 'blog/quiz.html', {'quiz':quiz, 'permalink':permalink, 'quote': choice(quotes), "dyk":dyk, "promote":promote})