def train(self, trainfile_name): print >>sys.stderr, "Reading data.." train_data = [tuple(x.strip().split("\t")) for x in codecs.open(trainfile_name, "r", "utf-8")] shuffle(train_data) filter_feature = get_filter() train_labels, train_clauses = zip(*train_data) train_labels = [tl.lower() for tl in train_labels] print >>sys.stderr, "Indexing features.." self.fp.index_data(train_clauses, filter_feature) X = numpy.asarray([self.fp.featurize(clause, filter_feature) for clause in train_clauses]) tagset = list(set(train_labels)) tag_index = {l:i for (i, l) in enumerate(tagset)} Y = numpy.asarray([[tag_index[label]] for label in train_labels]) classifier = OneVsRestClassifier(SVC(kernel='linear')) if self.cv: print >>sys.stderr, "Starting Cross-validation for %d folds.."%(self.folds) y = [l[0] for l in Y] scores = cross_validation.cross_val_score(classifier, X, y, cv=self.folds, scoring='f1_weighted') print >>sys.stderr, "Scores:", scores print >>sys.stderr, "Average: %0.4f (+/- %0.4f)"%(scores.mean(), scores.std() * 2) print >>sys.stderr, "Starting training.." classifier.fit(X, Y) pickle.dump(classifier, open(self.trained_model_name, "wb")) pickle.dump(self.fp.feat_index, open(self.feat_index_name, "wb")) pickle.dump(tagset, open(self.stored_tagset, "wb")) print >>sys.stderr, "Done"
def load_segmented_blizzard_metadata(): with open(DATA_PATH+'/prompts.gui') as prompts_file: lines = [l[:-1] for l in prompts_file] filepaths = [DATA_PATH + '/wavn/' + fname + '.wav' for fname in lines[::3]] transcripts = lines[1::3] # Clean up the transcripts for i in xrange(len(transcripts)): t = transcripts[i] t = t.replace('@ ', '') t = t.replace('# ', '') t = t.replace('| ', '') t = t.lower() transcripts[i] = t # We use '*' as a null padding character charmap = {'*': 0} inv_charmap = ['*'] for t in transcripts: for char in t: if char not in charmap: charmap[char] = len(charmap) inv_charmap.append(char) all_data = zip(filepaths, transcripts) random.seed(123) random.shuffle(all_data) train_data = all_data[2*BATCH_SIZE:] test_data = all_data[:2*BATCH_SIZE] return charmap, inv_charmap, train_data, test_data
def deal_card(self): self.position += 1 #if deck runs out, the deck will reshuffle if self.position >= len(self.deck): shuffle(self.deck) self.position = 0 return self.deck[self.position]
def loadData(): #filenames = os.listdir(os.getcwd()) filenames = [dataFile] for filename in filenames: if 'txt' in filename and 'sum' not in filename: f = open(filename) lines = f.readlines() f.close() random.shuffle(lines) data = [] label = [] for i in range(len(lines)): line = lines[i][:] lines[i] = '' pos = line.find(' ') if pos < 0: continue line = line[pos+1 :].strip() spLine = line.split(' ') if int(spLine[-1]) < 100: spLine[-1] = 0 elif int(spLine[-1]) > 100: spLine[-1] = 1 else: continue data.append(spLine[:-1]) label.append(spLine[-1]) print 'array...' data = np.array(data, dtype = float) label = np.array(label, dtype = int) print 'score...' weight = getWeight(label) return (data, label, weight)
def _shuffle_slides( self ): # randomize the groups and create our play list shuffle( self.tmp_slides ) # now create our final playlist print "-----------------------------------------" # loop thru slide groups and skip already watched groups for slides in self.tmp_slides: # has this group been watched if ( not self.settings[ "trivia_unwatched_only" ] or ( slides[ 0 ] and xbmc.getCacheThumbName( slides[ 0 ] ) not in self.watched ) or ( slides[ 1 ] and xbmc.getCacheThumbName( slides[ 1 ] ) not in self.watched ) or ( slides[ 2 ] and xbmc.getCacheThumbName( slides[ 2 ] ) not in self.watched ) ): # loop thru slide group only include non blank slides for slide in slides: # only add if non blank if ( slide ): # add slide self.slide_playlist += [ slide ] print "included - %s, %s, %s" % ( os.path.basename( slides[ 0 ] ), os.path.basename( slides[ 1 ] ), os.path.basename( slides[ 2 ] ), ) else: print "----------------------------------------------------" print "skipped - %s, %s, %s" % ( os.path.basename( slides[ 0 ] ), os.path.basename( slides[ 1 ] ), os.path.basename( slides[ 2 ] ), ) print "----------------------------------------------------" print print "total slides selected: %d" % len( self.slide_playlist ) print
def main(): p=Primes(190) primes=[] f=0 for x in range(len(p)): if p[x]: primes.append(x) r=1 for x in primes: #print x r*=x print long(r) num=long(math.sqrt(r)) m=0 factors=[] random.shuffle(primes) count=0 for x in itertools.product(reversed(primes), repeat=len(primes)-20): temp=1 y=0 #print x while temp<num and y<len(x): if temp>m: m=temp print m%10**16 print count temp*=x[y] #print temp y+=1 count+=1 print m%(10**16)
def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None, weight_decay = 0.0): """Train the neural network using mini-batch stochastic gradient descent. The ``training_data`` is a list of tuples ``(x, y)`` representing the training inputs and the desired outputs. The other non-optional parameters are self-explanatory. If ``test_data`` is provided then the network will be evaluated against the test data after each epoch, and partial progress printed out. This is useful for tracking progress, but slows things down substantially.""" if test_data: n_test = len(test_data) n = len(training_data) for j in xrange(epochs): random.shuffle(training_data) mini_batches = [ training_data[k:k+mini_batch_size] for k in xrange(0, n, mini_batch_size)] for mini_batch in mini_batches: self.update_mini_batch(mini_batch, eta , weight_decay) if test_data: n_correct = float(self.evaluate(test_data)) print "Epoch {0}: {1} / {2}".format( j,n_correct , n_test) self.test_accuracy.append(float('%.4f'%(n_correct/n_test))) else: print "Epoch {0} complete".format(j) self.train_costs.append(self.cost_val(training_data)) print "Epoch {0}: cost = ".format(j),self.train_costs[-1]
def allPickups (self, me, passengers): pickup = [p for p in passengers if (not p in me.passengersDelivered and p != me.limo.passenger and p.car is None and p.lobby is not None and p.destination is not None)] rand.shuffle(pickup) return pickup
def loadArray(dirpath): # pattern = regex = str variable = '.+\.label' (recommended) pattern = '.+\.label' # another = 'array' (recommended) another = 'array' names = os.listdir(dirpath) random.shuffle(names) for name in names: if re.match(pattern,name) != None: #print name folder,prename,num,suffix = name.split('.') target = folder + '.' + prename + '.' + num + '.' + another targetpath = dirpath + '/' + target # find another suffix data file # meanwhile examine the num, length of spectrogram = length of label if os.path.exists(targetpath): # extract object from a file with file(target,'rb') as f: spectroArray = cPickle.load(f) # GPU default type is float32 spectroArray = np.float32(spectroArray) with file(name,'rb') as f: labelArray = cPickle.load(f) # label should be int type labelArray = np.int32(labelArray) yield spectroArray,labelArray,int(num)
def random_password(bit=12): """ generate a password randomly which include numbers, letters and sepcial characters """ numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] small_letters = [chr(i) for i in range(97, 123)] cap_letters = [chr(i) for i in range(65, 91)] special = ['@', '#', '$', '%', '^', '&', '*', '-'] passwd = [] for i in range(bit/4): passwd.append(random.choice(numbers)) passwd.append(random.choice(small_letters)) passwd.append(random.choice(cap_letters)) passwd.append(random.choice(special)) for i in range(bit%4): passwd.append(random.choice(numbers)) passwd.append(random.choice(small_letters)) passwd.append(random.choice(cap_letters)) passwd.append(random.choice(special)) passwd = passwd[:bit] random.shuffle(passwd) return ''.join(passwd)
def test_sort_index_multicolumn(self): import random A = np.arange(5).repeat(20) B = np.tile(np.arange(5), 20) random.shuffle(A) random.shuffle(B) frame = DataFrame({'A': A, 'B': B, 'C': np.random.randn(100)}) # use .sort_values #9816 with tm.assert_produces_warning(FutureWarning): frame.sort_index(by=['A', 'B']) result = frame.sort_values(by=['A', 'B']) indexer = np.lexsort((frame['B'], frame['A'])) expected = frame.take(indexer) assert_frame_equal(result, expected) # use .sort_values #9816 with tm.assert_produces_warning(FutureWarning): frame.sort_index(by=['A', 'B'], ascending=False) result = frame.sort_values(by=['A', 'B'], ascending=False) indexer = np.lexsort((frame['B'].rank(ascending=False), frame['A'].rank(ascending=False))) expected = frame.take(indexer) assert_frame_equal(result, expected) # use .sort_values #9816 with tm.assert_produces_warning(FutureWarning): frame.sort_index(by=['B', 'A']) result = frame.sort_values(by=['B', 'A']) indexer = np.lexsort((frame['A'], frame['B'])) expected = frame.take(indexer) assert_frame_equal(result, expected)
def buildMap(gridSize): cells = {} # generate a list of candidate coords for cells roomCoords = [(x, y) for x in range(gridSize) for y in range(gridSize)] random.shuffle(roomCoords) roomCount = min(10, int(gridSize * gridSize / 2)) for i in range(roomCount): # search for candidate cell coord = roomCoords.pop() while not safeToPlace(cells, coord) and len(roomCoords) > 0: coord = roomCoords.pop() if not safeToPlace(cells, coord): break width = random.randint(3, CELL_SIZE) height = random.randint(3, CELL_SIZE) cells[coord] = Room(coord[0], coord[1], width, height) grid = Grid() grid.rooms = list(cells.values()) # connect every room to one neighbor for coord in cells: room = cells[coord] room1 = findNearestNeighbor(cells, coord) if not grid.connected(room, room1): grid.corridors.append(Corridor(room, room1)) return grid
def shuffle(self): ''' Shuffles the cards Args: None Returns: None ''' random.shuffle(self.cards)
def ProcessLines(lines): random.shuffle(DEV_NAME_COLORS) nlines = "" onames = {} for line in lines: match = re.search(DEV_NAME_PATTERN, line) if (match == None): continue oname = match.group(0) oname = oname[1: -1] if (not onames.has_key(oname)): nname = oname for n in DEV_NAME_LIST: if (oname.find(n) >= 0): nname = n; break onames[oname] = (len(onames), nname) npair = onames[oname] color = (npair[0] < len(DEV_NAME_COLORS) and DEV_NAME_COLORS[ npair[0] ]) or "F00F00" nname = npair[1] nline = (LOG_LINE_PREFIX % (color, nname)) + line[match.span()[1]: ] + '\n' nlines += nline return nlines
def shuffle_val(X,y,ratio): data = [] data_size = X.shape[0] feature_size = X.shape[1] train_data_size = int(data_size * ratio) for i in range(data_size): tmp_X = X[i] one_line = np.concatenate((tmp_X,y[i])) data.append(one_line) random.shuffle(data) split_index = [0,int(data_size*ratio),data_size] X = np.zeros((data_size,feature_size)) y = np.zeros((data_size,1)) for i in range(data_size): X[i] = data[i][:feature_size] y[i] = data[i][feature_size] X_train = np.array(X[split_index[0]:split_index[1]]) X_val = np.array(X[split_index[1]:split_index[2]]) y_train = np.array(y[split_index[0]:split_index[1]]) y_val = np.array(y[split_index[1]:split_index[2]]) return X_train,y_train,X_val,y_val
def get_media(request, *args, **kwargs): """ :param request: :return: list of media (movies and/or series) """ category_id = request.GET.get('category_id') if request.GET.get('category_id') else None start_movies = request.GET.get('start_movies') if request.GET.get('start_movies') else '' start_series = request.GET.get('start_series') if request.GET.get('start_series') else '' length = int(request.GET.get('length')) if request.GET.get('length') else None category = Category.objects.get(pk=category_id) if not length: length = category.previews_length response = [] if category_id and length and (start_movies != '' or start_series != ''): start_movies = int(start_movies) start_series = int(start_series) cache_key = '%s-%d-%d-%d' % (category_id, start_movies, start_series, length) media = cache.get(cache_key) if not media: movies_length, series_length = get_movies_series_share(length) limit_movies = start_movies + movies_length limit_series = start_series + series_length media = list(Movie.objects.raw_query({'categories': {'$elemMatch': {'id': ObjectId(category.id)}}, 'visible': True}).order_by('-id')[start_movies:limit_movies]) series = list(Series.objects.raw_query({'categories': {'$elemMatch': {'id': ObjectId(category.id)}}, 'visible': True}).order_by('-id')[start_series:limit_series]) media.extend(series) cache.set(cache_key, media, 8 * 3600) if request.GET.get('shuffle'): shuffle(media) response = [item.to_dict() for item in media] return HttpResponse( json.dumps(response), 'content-type: text/json' )
def get_context_data(self, **kwargs): context = super(Home, self).get_context_data(**kwargs) member = self.request.user recommended_items = [] if member.is_authenticated(): for item in get_all_recommended(member, 12): if isinstance(item, Movie): item.type = 'movie' else: item.type = 'series' size = 0 episodes = SeriesEpisode.objects.filter(series=item) for episode in episodes: size += episode.size item.size = size recommended_items.append(item) if len(recommended_items) < Movie.MIN_RECOMMENDED: additional = Movie.MIN_RECOMMENDED - len(recommended_items) additional_items = Movie.objects.all().order_by('-release')[:additional] recommended_items.append(additional_items) context['items'] = recommended_items context['recommended_items'] = as_matrix(recommended_items, 4) recent_releases = list(Movie.objects.all().order_by('-release', '-id')[:Movie.MAX_RECENT]) shuffle(recent_releases) sample_media = recent_releases[0] context['fb_share_item'] = sample_media return context
def buildMatrixW(NEIGHBORHOODS,NWEIGHTS,Weight): #Dhmiourgia araiou mhtrwou M (buildMatrixW)) import numpy,time m=[[] for k in range(len(NWEIGHTS))] m1=[] for i in range(len(NWEIGHTS)): # sunolikos arithmos varwn gia kathe allhlepidrash m[i]=[i,len(NWEIGHTS[i][1][0])] m1.append([i]*m[i][1]) import random m1=[item for sublist in m1 for item in sublist] m1=[m1[0:int(len(m1)/2)],m1[int(len(m1)/2):int(len(m1))]] random.shuffle(m1[1]) M=numpy.eye(len(NEIGHBORHOODS)) for i in range(len(m1[0])): M[m1[0][i]][m1[1][i]]=Weight[i] M[m1[1][i]][m1[0][i]]=Weight[i] M=M/(numpy.ones(len(M))*sum(M)) M1=M return M1; '''#------------- PRINT A NUMPY MATRIX------------------------
def random(n=1, category=None): got = names(category) if got: shuffle(got) if n == 1: return choice(got) return got[:n]
def get_precision(request): t = TrainQueries.objects.all() t = list(enumerate(t)) good = 0 bad = 0 random.shuffle(t) total = len(t) trains = [[el.query,"Added" if el.relevant else "Excluded", 0] for i, el in t[:4 * total/5]] tests = [[el.query, el.relevant] for i, el in t[4 * total/5: ]] words = old_extract_data(trains) for test in tests: if (old_rsv.calculateRsv(test[0], words) > 1.6): if test[1]: good += 1 else: bad += 1 else: if test[1]: bad += 1 else: good += 1 return HttpResponse(str(good/float(len(tests))))
def _try_creation(): # Attempt to create an edge set edges = set() stubs = range(n) * d while stubs: potential_edges = defaultdict(itertools.repeat(0).next) random.shuffle(stubs) stubiter = iter(stubs) for s1, s2 in itertools.izip(stubiter, stubiter): if s1 > s2: s1, s2 = s2, s1 if s1 != s2 and ((s1, s2) not in edges): edges.add((s1, s2)) else: potential_edges[s1] += 1 potential_edges[s2] += 1 if not _suitable(edges, potential_edges): return None # failed to find suitable edge set stubs = [node for node, potential in potential_edges.iteritems() for _ in xrange(potential)] return edges
def getDataCsv(self, filename, header=True, yValue=False, shuffle=True): raw_data = open(filename, "r").read() data = [] yData = [] ids = [] offset = (1 if header else 0) raw_data = raw_data.split('\n')[offset:] if shuffle: random.shuffle(raw_data) for row in raw_data: split_row = row.split(',') if len(split_row) < 2: continue yData.append(int(split_row[1])) ids.append(int(split_row[0])) curRow = [] for item in split_row[(2 - yValue):]: if 'A' <= item <= 'Z': curRow.append(float(ord(item) - 65)) else: curRow.append(float(item)) data.append(curRow) return data, yData, ids
def __init__(self, cache_path, *, max_size=None): self._cache_path = cache_path self.max_size = max_size # convert to bytes if self.max_size is not None: self.max_size *= 1048576 # TODO 2k compat os.makedirs(cache_path, exist_ok=True) self._fn_cache = dict() self._sz_cache = dict() # TODO replace this with a double linked list like boltons LRU self._heap_map = dict() self._heap = [] # put files in to heap in random order files = glob(os.path.join(self._cache_path, '*feather')) shuffle(files) for fn in files: key = self._key_from_filename(fn) self._fn_cache[key] = fn stat = os.stat(fn) self._sz_cache[key] = stat.st_size heap_entry = [time.time(), key] heapq.heappush(self._heap, heap_entry) self._heap_map[key] = heap_entry # prune up front just in case self.__prune_files()
def fping(ips): # IP tomb betoltese rv = loads(ips) # IP cimek osszekeverese shuffle(rv) # tomeges pingeleshez hasznalt fping parancs parameterezese array = ['fping', '-e'] # tomeges pingeleshez hasznalt ipcimek hozzaadasa a parancshoz for x in rv: array.append(x) # tomeges pingeles lefuttatasa csovezetekkel visszaterve p1 = subprocess.Popen(array, stdout=subprocess.PIPE) (pings, err) = p1.communicate() #output={} output = [] pings_arr = pings.split('\n') for i in range(len(rv)): tdict={} pings_line = pings_arr[i].split(' ') tdict["ip"]=pings_line[0] tdict["avg"]= (pings_line[3])[1:] #output[pings_line[0]] = (pings_line[3])[1:] output.append(tdict)
def join(self): logger.log("We will try to join our seeds members", self.seeds, part='gossip') tmp = self.seeds others = [] if not len(self.seeds): logger.log("No seeds nodes, I'm a bootstrap node?") return for e in tmp: elts = e.split(':') addr = elts[0] port = self.port if len(elts) > 1: port = int(elts[1]) others.append( (addr, port) ) random.shuffle(others) while True: logger.log('JOINING myself %s is joining %s nodes' % (self.name, others), part='gossip') nb = 0 for other in others: nb += 1 r = self.do_push_pull(other) # Do not merge with more than KGOSSIP distant nodes if nb > KGOSSIP: continue # If we got enough nodes, we exit if len(self.nodes) != 1 or self.interrupted or self.bootstrap: return # Do not hummer the cpu.... time.sleep(0.1)
def add_bias_to_fitness(rawfitness, bias): ''' Derive new fitness values which incorporate codon bias. ''' new_fitness = np.zeros(61) for i in range(len(genetic_code)): # Determine the new preferred, non-preferred frequencies family = genetic_code[i] aa_fit = rawfitness[ codons.index(genetic_code[i][0]) ] k = len(family) - 1. nonpref = abs(aa_fit) * bias * -1 # Reduce fitness by 50-100% pref = deepcopy(aa_fit) # Assign randomly indices = [codons.index(x) for x in family] shuffle(indices) first = True for ind in indices: if first: new_fitness[ind] = pref first=False else: new_fitness[ind] = nonpref return new_fitness
def get_batches_fn(batch_size): """ Create batches of training data :param batch_size: Batch Size :return: Batches of training data """ image_paths = glob(os.path.join(data_folder, 'image_2', '*.png')) label_paths = { re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))} background_color = np.array([255, 0, 0]) random.shuffle(image_paths) for batch_i in range(0, len(image_paths), batch_size): images = [] gt_images = [] for image_file in image_paths[batch_i:batch_i+batch_size]: gt_image_file = label_paths[os.path.basename(image_file)] image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape) gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape) gt_bg = np.all(gt_image == background_color, axis=2) gt_bg = gt_bg.reshape(*gt_bg.shape, 1) gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2) images.append(image) gt_images.append(gt_image) yield np.array(images), np.array(gt_images)
def choose_next_neighbour(routes_choices, chosen, city): neighbours=list(routes_choices[city]-chosen) if len(neighbours): random.shuffle(neighbours) count, neighbour=min((len(routes_choices[n]), n) for n in neighbours) return neighbour return None
def random_population(self, k): population = [] for i in xrange(0, k): x = range(0, self.instance.solution_size()) random.shuffle(x) population.append(x) return population
def gen(gear): charm_list = [] for (charm, num) in gear: charm_list.extend([charm] * num) random.shuffle(charm_list) for charm in charm_list: yield charm
window = list( nltk.ngrams(dummy + list(sample[0]) + dummy, WINDOW_SIZE * 2 + 1)) windows.extend([[list(window[i]), sample[1][i]] for i in range(len(sample[0]))]) # In[13]: windows[0] # In[14]: len(windows) # In[15]: random.shuffle(windows) train_data = windows[:int(len(windows) * 0.9)] test_data = windows[int(len(windows) * 0.9):] # ## Modeling # <img src="../images/04.window-classifier-architecture.png"> # <center>borrowed image from http://web.stanford.edu/class/cs224n/lectures/cs224n-2017-lecture4.pdf</center> # In[16]: class WindowClassifier(nn.Module): def __init__(self, vocab_size, embedding_size, window_size, hidden_size, output_size):
async def pug(self, ctx: commands.Context, *args): self.logger.debug( f'{ctx.author}: {ctx.prefix}{ctx.invoked_with} {ctx.args[2:]}') random_teams: bool = False map_arg: str = None team1_captain_arg: discord.Member = None team2_captain_arg: discord.Member = None for arg in args: if arg == 'random': random_teams = True self.logger.debug('Random Teams Enabled') elif arg in current_map_pool: map_arg = arg self.logger.debug(f'Force Selected Map = {map_arg}') else: member: discord.Member = await commands.MemberConverter( ).convert(ctx, arg) if member in ctx.author.voice.channel.members: if team1_captain_arg is None: team1_captain_arg = member self.logger.debug( f'Forced Team 1 Captain = {team1_captain_arg}') elif team2_captain_arg is None and member is not team1_captain_arg: team2_captain_arg = member self.logger.debug( f'Forced Team 2 Captain = {team2_captain_arg}') else: if member is team1_captain_arg: raise commands.CommandError( message= f'One user cannot be captain of 2 teams.') else: raise commands.CommandError( message=f'You can only set 2 captains.') else: raise commands.CommandError( message=f'Invalid Argument: `{arg}`') if not self.pug.enabled: self.logger.info('Pug called from queue as pug is disabled') if len(self.bot.queue_captains) > 0: team1_captain_arg = self.bot.queue_captains.pop(0) self.logger.debug( f'Forced Team 1 Captain = {team1_captain_arg}') if len(self.bot.queue_captains) > 0: team2_captain_arg = self.bot.queue_captains.pop(0) self.logger.debug( f'Forced Team 2 Captain = {team2_captain_arg}') # TODO: Refactor this mess db = Database('sqlite:///main.sqlite') await db.connect() csgo_server = self.bot.servers[0] for server in self.bot.servers: if server.available: server.available = False csgo_server = server break channel_original = ctx.author.voice.channel players: List[discord.Member] = ctx.author.voice.channel.members.copy() players = players[:self.bot.match_size] if self.bot.dev: players = [ctx.author] * 10 self.logger.info( 'Filling list of players with the message author because bot is in dev mode' ) if random_teams: shuffle(players) team1 = players[:len(players) // 2] team2 = players[len(players) // 2:] team1_captain = team1[0] team2_captain = team2[0] message_text = 'Random Teams' message = await ctx.send(message_text) embed = self.player_veto_embed(message_text=message_text, players_text='Random Teams', team1=team1, team1_captain=team1_captain, team2=team2, team2_captain=team2_captain) await message.edit(content=message_text, embed=embed) self.logger.debug(f'Random Team1: {team1}') self.logger.debug(f'Random Team2: {team2}') else: emojis = emoji_bank.copy() del emojis[len(players) - 2:len(emojis)] emojis_selected = [] team1 = [] team2 = [] if team1_captain_arg is not None: team1_captain = team1_captain_arg else: team1_captain = players[randint(0, len(players) - 1)] self.logger.debug(f'team1_captain = {team1_captain}') team1.append(team1_captain) players.remove(team1_captain) if team2_captain_arg is not None: team2_captain = team2_captain_arg else: team2_captain = players[randint(0, len(players) - 1)] self.logger.debug(f'team2_captain = {team1_captain}') team2.append(team2_captain) players.remove(team2_captain) current_team_player_select = 1 current_captain = team1_captain player_veto_count = 0 message = await ctx.send( f'{self.bot.match_size} man time\nLoading player selection...') for emoji in emojis: await message.add_reaction(emoji) emoji_remove = [] player_veto = [] if self.bot.match_size == 2: player_veto = [1, 1] for i in range(self.bot.match_size - 2): if i == 0 or i == self.bot.match_size - 3: player_veto.append(1) elif i % 2 == 0: player_veto.append(2) self.logger.debug(f'player_veto = {player_veto}') while len(players) > 0: message_text = '' players_text = '' if current_team_player_select == 1: message_text += f'<@{team1_captain.id}>' current_captain = team1_captain elif current_team_player_select == 2: message_text += f'<@{team2_captain.id}>' current_captain = team2_captain self.logger.debug( f'current_captain (captain currently selected) = {current_captain}' ) message_text += f' select {player_veto[player_veto_count]}\n' message_text += 'You have 60 seconds to choose your player(s)\n' i = 0 for player in players: players_text += f'{emojis[i]} - <@{player.id}>\n' i += 1 embed = self.player_veto_embed(message_text=message_text, players_text=players_text, team1=team1, team1_captain=team1_captain, team2=team2, team2_captain=team2_captain) await message.edit(content=message_text, embed=embed) if len(emoji_remove) > 0: for emoji in emoji_remove: await message.clear_reaction(emoji) emoji_remove = [] selected_players = 0 seconds = 0 while True: await asyncio.sleep(1) message = await ctx.fetch_message(message.id) for reaction in message.reactions: users = await reaction.users().flatten() if current_captain in users and selected_players < player_veto[ player_veto_count] and not ( reaction.emoji in emojis_selected): index = emojis.index(reaction.emoji) if current_team_player_select == 1: team1.append(players[index]) if current_team_player_select == 2: team2.append(players[index]) self.logger.debug( f'{current_captain} selected {players[index]}') emojis_selected.append(reaction.emoji) emoji_remove.append(reaction.emoji) del emojis[index] del players[index] selected_players += 1 seconds += 1 if seconds % 60 == 0: for _ in range(0, player_veto[player_veto_count]): index = randint(0, len(players) - 1) self.logger.debug( f'{current_captain} selected {players[index]}') if current_team_player_select == 1: team1.append(players[index]) if current_team_player_select == 2: team2.append(players[index]) emojis_selected.append(emojis[index]) del emojis[index] del players[index] selected_players += 1 if selected_players == player_veto[player_veto_count]: if current_team_player_select == 1: current_team_player_select = 2 elif current_team_player_select == 2: current_team_player_select = 1 break player_veto_count += 1 if map_arg is None: message_text = 'Map Veto Loading' else: message_text = f'Map is `{map_arg}`' players_text = 'None' embed = self.player_veto_embed(message_text=message_text, players_text=players_text, team1=team1, team1_captain=team1_captain, team2=team2, team2_captain=team2_captain) await message.edit(content=message_text, embed=embed) await message.clear_reactions() if map_arg is not None: chosen_map_embed = await self.get_chosen_map_embed(map_arg) await ctx.send(embed=chosen_map_embed) team1_steamIDs = {} team2_steamIDs = {} spectator_steamIDs = {} if ctx.author.voice.channel.category is None: team1_channel = await ctx.guild.create_voice_channel( name=f'team_{team1_captain.display_name}', user_limit=int(self.bot.match_size / 2) + 1) team2_channel = await ctx.guild.create_voice_channel( name=f'team_{team2_captain.display_name}', user_limit=int(self.bot.match_size / 2) + 1) else: team1_channel = await ctx.author.voice.channel.category.create_voice_channel( name=f'team_{team1_captain.display_name}', user_limit=int(self.bot.match_size / 2) + 1) team2_channel = await ctx.author.voice.channel.category.create_voice_channel( name=f'team_{team2_captain.display_name}', user_limit=int(self.bot.match_size / 2) + 1) for player in team1: await player.move_to(channel=team1_channel, reason=f'You are on {team1_captain}\'s Team') data = await db.fetch_one( 'SELECT steam_id FROM users WHERE discord_id = :player', {"player": str(player.id)}) team1_steamIDs[data[0]] = unidecode(player.display_name) self.logger.debug(f'Moved all team1 players to {team1_channel}') for player in team2: await player.move_to(channel=team2_channel, reason=f'You are on {team2_captain}\'s Team') data = await db.fetch_one( 'SELECT steam_id FROM users WHERE discord_id = :player', {"player": str(player.id)}) team2_steamIDs[data[0]] = unidecode(player.display_name) self.logger.debug(f'Moved all team2 players to {team2_channel}') if len(self.bot.spectators) > 0: for spec in self.bot.spectators: data = await db.fetch_one( 'SELECT steam_id FROM users WHERE discord_id = :spectator', {"spectator": str(spec.id)}) spectator_steamIDs[data[0]] = unidecode(spec.display_name) self.logger.info('Added Spectators') if map_arg is None: map_list = await self.map_veto(ctx, team1_captain, team2_captain) else: map_list = [map_arg] bot_ip = self.bot.web_server.IP if self.bot.bot_IP != '': bot_ip = self.bot.bot_IP team1_country = 'IE' team2_country = 'IE' team1_flags = [] team2_flags = [] team1_flag_request = '' team2_flag_request = '' for player in team1_steamIDs: team1_flag_request += SteamID(player).__str__() + ',' team1_flag_request = team1_flag_request[:-1] for player in team2_steamIDs: team2_flag_request += SteamID(player).__str__() + ',' team2_flag_request = team2_flag_request[:-1] self.logger.info('Making request to the Steam API to get player flags') session = aiohttp.ClientSession() async with session.get( f'https://api.steampowered.com/ISteamUser/GetPlayerSummaries/v2/' f'?key={self.bot.steam_web_api_key}' f'&steamids={team1_flag_request}') as resp: player_info = await resp.json() for player in player_info['response']['players']: if 'loccountrycode' in player: team1_flags.append(player['loccountrycode']) await session.close() session = aiohttp.ClientSession() async with session.get( f'https://api.steampowered.com/ISteamUser/GetPlayerSummaries/v2/' f'?key={self.bot.steam_web_api_key}' f'&steamids={team2_flag_request}') as resp: player_info = await resp.json() for player in player_info['response']['players']: if 'loccountrycode' in player: team2_flags.append(player['loccountrycode']) await session.close() # TODO: Add check for EU/CIS flag if len(team1_flags) > 0: team1_country = Counter(team1_flags).most_common(1)[0][0] if len(team2_flags) > 0: team2_country = Counter(team2_flags).most_common(1)[0][0] team1_name = f'team_{unidecode(team1_captain.display_name)}' team2_name = f'team_{unidecode(team2_captain.display_name)}' match_id = f'PUG_{datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}' match_config = { 'matchid': match_id, 'num_maps': 1, 'maplist': map_list, 'skip_veto': True, 'veto_first': 'team1', 'side_type': 'always_knife', 'players_per_team': int(self.bot.match_size / 2), 'min_players_to_ready': 1, 'spectators': { 'players': spectator_steamIDs, }, 'team1': { 'name': team1_name, 'tag': 'team1', 'flag': team1_country, 'players': team1_steamIDs }, 'team2': { 'name': team2_name, 'tag': 'team2', 'flag': team2_country, 'players': team2_steamIDs }, 'cvars': { 'get5_event_api_url': f'http://{bot_ip}:{self.bot.web_server.port}/', 'get5_print_damage': '1', } } self.logger.debug(f'Match Config =\n {pprint.pformat(match_config)}') with open(f'./{match_id}.json', 'w') as outfile: json.dump(match_config, outfile, ensure_ascii=False, indent=4) await ctx.send( 'If you are coaching, once you join the server, type .coach') loading_map_message = await ctx.send('Server is being configured') await asyncio.sleep(0.3) get5_trigger = valve.rcon.execute( (csgo_server.server_address, csgo_server.server_port), csgo_server.RCON_password, 'exec triggers/get5') self.logger.debug( f'Executing get5_trigger (something for Yannicks Server) \n {get5_trigger}' ) await asyncio.sleep(10) await loading_map_message.delete() load_match = valve.rcon.execute( (csgo_server.server_address, csgo_server.server_port), csgo_server.RCON_password, f'get5_loadmatch_url "{bot_ip}:{self.bot.web_server.port}/{match_id}"' ) self.logger.debug(f'Load Match via URL\n {load_match}') await asyncio.sleep(5) connect_embed = await self.connect_embed(csgo_server) if self.bot.connect_dm: for player in team1 + team2 + self.bot.spectators: try: await player.send(embed=connect_embed) except (discord.HTTPException, discord.Forbidden): await ctx.send( f'Unable to PM <@{player.id}> the server details.') self.logger.warning(f'{player} was not sent the IP via DM') else: await ctx.send(embed=connect_embed) score_embed = discord.Embed() score_embed.add_field(name='0', value=team1_name, inline=True) score_embed.add_field(name='0', value=team2_name, inline=True) score_message = await ctx.send('Match in Progress', embed=score_embed) csgo_server.get_context( ctx=ctx, channels=[channel_original, team1_channel, team2_channel], players=team1 + team2, score_message=score_message) csgo_server.set_team_names([team1_name, team2_name]) self.bot.web_server.add_server(csgo_server) if not self.pug.enabled: self.queue_check.start() self.logger.info('Queue Starting Back')
def shuffle_all(self): self._deck.extend(self._discard) shuffle(self._deck) self._discard = []
def run(args): #load configuration config = Config(args) #load data dataset = ld.LoadData(args.input) data = dataset.data label = dataset.label anomaly_num = dataset.anomaly_num feature_index = dataset.feature_index # feature_item_num = np.sum(dataset.feature_item_num) feature_item_num = dataset.feature_item_num # number of unique item ids in dataset instance instance_num = len(data) #for training training_data = data[:instance_num-2*anomaly_num] training_data = ld.get_shaped_data(training_data,config.batch_size,config.block_size,len(data[0])) print("----------finish shaping training data!-----------") instance_dim = len(training_data[0][0][0]) #for testing testing_data = data[instance_num-2*anomaly_num:] testing_label = label[instance_num-2*anomaly_num:] #shuffle testing data,to ensure testing data and label are shuffled in the same way randnum = config.seed random.seed(randnum) random.shuffle(testing_data) random.seed(randnum) random.shuffle(testing_label) testing_data = ld.get_shaped_data(testing_data,config.batch_size,config.block_size,len(data[0])) print("----------finish shaping testing data!-----------") testing_data_num = len(testing_label) - len(testing_label)%(config.block_size*config.batch_size) testing_label = testing_label[:testing_data_num] # testing data instance level ground truth print("training data",training_data.shape,instance_dim) print("testing data",testing_data.shape,testing_data_num,testing_data[0].shape) print("anomaly_num",anomaly_num) print("number of normal data in testing data:",np.sum(testing_label),len(testing_label)) print("feature_item_num",feature_item_num) with tf.Graph().as_default(),tf.Session() as sess: #graph settings FM_weight_dim = config.FM_weight_dim batch_size = config.batch_size block_size = config.block_size attention_dim = config.attention_dim autoencoder_hidden_dim = config.autoencoder_hidden_dim lstm_dropout_keep_prob = config.lstm_dropout_keep_prob lstm_layer_num = config.lstm_layer_num lstm_hidden_size = config.lstm_hidden_size is_training = config.is_training gan_hidden_dim = config.gan_hidden_dim alpha = config.alpha beta = config.beta noise = config.noise learning_rate = config.learning_rate model = AnomalyNet(feature_index, FM_weight_dim, feature_item_num, batch_size, block_size, instance_dim, attention_dim, autoencoder_hidden_dim, lstm_dropout_keep_prob, lstm_layer_num, lstm_hidden_size, is_training, gan_hidden_dim, alpha, beta, noise, learning_rate) saver = tf.train.Saver(max_to_keep=10)#saver for checkpoints, add var_list because of batching training init = tf.global_variables_initializer() sess.run(init) flag = 0 for epoch in range(config.epoch): # training for i in range(len(training_data)): flag = flag + 1 pointer = flag % 100 curr_batch = training_data[i] feed_dict = {model.data: curr_batch} if pointer < 50: result = sess.run((model.G_train),feed_dict=feed_dict) else: result = sess.run((model.D_train),feed_dict=feed_dict) # result = sess.run((model.G_train,model.D_train),feed_dict=feed_dict) if i % 50 == 0: result = sess.run((model.generator_loss,model.discriminator_loss),feed_dict=feed_dict) print("current epoch %d, in batch %d, current flag is %d, generator average loss %.4f, discriminator average loss %.4f"%(epoch,i,pointer,result[0],result[1])) # result = sess.run((model.test1,model.test2,model.test3,model.test4),feed_dict=feed_dict) # print(result[0],result[0].shape,result[1],result[1].shape)#,result[2][0:10],result[2].shape,result[3],result[3].shape) # model_path = "saved_model/epoch_%s.ckpt" % (epoch) # saver.save(sess, model_path) # ''' # ##### # testing # ##### # ''' #instance output instance_loss_list = [] block_loss_list = [] for i in range(len(testing_data)): curr_batch = testing_data[i] feed_dict = {model.data: curr_batch} instance_loss,block_loss = sess.run((model.instance_total_loss,model.block_total_loss),feed_dict=feed_dict) for i in range(len(instance_loss)): instance_loss_list.append(instance_loss[i]) for i in range(len(block_loss)): block_loss_list.append(block_loss[i]) bw = open(args.instance_output+'_%d'%(epoch), 'w')#by dingfu bw.write("true pred\n") for i in range(len(instance_loss_list)): bw.write(str(testing_label[i])+ " "+str(instance_loss_list[i])+"\n") bw.close() #block output testing_block_num = testing_data_num // config.block_size block_true = [] for i in range(testing_block_num): true_sum = np.sum(testing_label[i*config.block_size:(i+1)*config.block_size]) # generate ground truth if true_sum < config.block_size*config.block_ratio: block_true.append(0) else: block_true.append(1) bw = open(args.block_output+'_%d'%(epoch), 'w')#by dingfu bw.write("true pred\n") for i in range(testing_block_num): bw.write(str(block_true[i])+ " "+str(block_loss_list[i])+"\n") bw.close() # print(true_block,pred_block) instance_auc,_,_,_ = newmetrics.roc(testing_label,instance_loss_list,pos_label=0,output_path=args.instance_output+'_%d'%(epoch))#by dingfu block_auc,_,_,_ = newmetrics.roc(block_true,block_loss_list,pos_label=0,output_path=args.block_output+'_%d'%(epoch))#by dingfu #print("instance level evaluation: ",instance_eval) print('epoch:',epoch," instance level auc: ",instance_auc) #print("block level evaluation: ",block_eval) print('epoch:',epoch," block level auc: ",block_auc)
def get_batches( self, dataloaders: List[EmmentalDataLoader], model ) -> Iterator[Tuple[List[str], Dict[str, Union[Tensor, List[str]]], Dict[ str, Tensor], Dict[str, str], str, str, ]]: """Generate batch generator from all dataloaders in round robin order. Args: dataloaders(list): List of dataloaders. Returns: genertor: A generator of all batches. """ task_to_label_dicts = [ dataloader.task_to_label_dict for dataloader in dataloaders ] uid_names = [dataloader.uid for dataloader in dataloaders] data_names = [dataloader.data_name for dataloader in dataloaders] splits = [dataloader.split for dataloader in dataloaders] data_loaders = [iter(dataloader) for dataloader in dataloaders] # Calc the batch size for each dataloader batch_counts = [len(dataloader) for dataloader in dataloaders] if self.fillup: batch_counts = [max(batch_counts)] * len(dataloaders) for idx in range(len(dataloaders)): if dataloaders[idx].n_batches: batch_counts[idx] = dataloaders[idx].n_batches dataloader_indexer = [] for idx, count in enumerate(batch_counts): dataloader_indexer.extend([idx] * count) random.shuffle(dataloader_indexer) for data_loader_idx in dataloader_indexer: uid_name = uid_names[data_loader_idx] try: X_dict, Y_dict = next(data_loaders[data_loader_idx]) except StopIteration: data_loaders[data_loader_idx] = iter( dataloaders[data_loader_idx]) X_dict, Y_dict = next(data_loaders[data_loader_idx]) if self.augment_k and self.augment_k > 1 and self.augment_k > self.enlarge: model.eval() with torch.no_grad(): uid_dict, loss_dict, prob_dict, gold_dict = model( X_dict[uid_name], X_dict, Y_dict, task_to_label_dicts[data_loader_idx], ) model.train() # Collect losses loss_dist = list(loss_dict.values())[0].detach().cpu().numpy() # row-based weighted sampling dist = normalize(np.array(loss_dist).reshape( -1, self.augment_k), axis=1, norm="l1") select_idx = np.vstack([ i * self.augment_k + np.array( choices(range(self.augment_k), dist[i], k=self.enlarge)) if max(dist[i]) > 0 else i * self.augment_k + np.array(choices(range(self.augment_k), k=self.enlarge)) for i in range(dist.shape[0]) ]).reshape(-1) X_new_dict = {"image": [], uid_name: []} Y_new_dict = {"labels": []} for idx in select_idx: X_new_dict[uid_name].append(X_dict[uid_name][idx]) X_new_dict["image"] = X_dict["image"][select_idx] Y_new_dict["labels"] = Y_dict["labels"][select_idx] X_dict = X_new_dict Y_dict = Y_new_dict yield X_dict[uid_name], X_dict, Y_dict, task_to_label_dicts[ data_loader_idx], data_names[data_loader_idx], splits[ data_loader_idx]
# 카드분배 step.1 print('\n **7 POKER START**\n') sleep(1) print('기본금 : 500원') my_money -= 500 your_money -= 500 sums += 1000 #sleep(1) print('카드 분배') #선 정하기 가위바위보?? # 카드묶음 섞기 / cards 의 값들을 하나로 합친 후 (deck) shuffle을 통하여 무작위 섞은 후 앞에서부터 하나씩 분배 deck = functools.reduce(lambda x,y : x+y, cards) random.shuffle(deck) for i, c in enumerate(deck) : if i == 10 : break elif i % 2 == 0 : your_deck.append(c) else : my_deck.append(c) ## 5장씩 배분완료! #print(my_deck, your_deck)
import time before = time.time() for i in range(100): x = 1+1 after = time.time() execution_time = after-before print(execution_time) Output: 0.0 #Qu.78 Please write a program to shuffle and print the list [3,6,7,8]. import random lst = [3,6,7,8] random.shuffle(lst) print(lst) Output: [6, 7, 8, 3] #Qu.79 Please write a program to generate all sentences where subject is in ["I", "You"] and verb is in ["Play", "Love"] and the object is in ["Hockey","Football"]. subjects=["I","You"] verbs=["Play","Love"] objects=["Hockey","Football"] for sub in subjects: for verb in verbs: for obj in objects: print("{} {} {}".format(sub,verb,obj))
p = "task{}.tex" k = 0 while True: name = p.format("" if k == 0 else k) if not os.path.isfile(name): break else: k += 1 return name pp = 24 tasks = [task(57, 9999, 1) for _ in range(pp // 2)] tasks.extend([task(57, 9999, -1) for _ in range(int(pp * 1.5 // 1))]) random.shuffle(tasks) lines = 5 parts = [tasks[i:i + 4] for i in range(0, lines * 4, 4)] def make_one(data): # data [(6774, 3588, '-'), (3714, 2183, '-'), (6018, 1168, '-'), (1430, 83, '-')] s = [] for d in data: a, b, c = map(str, d) s.append( latex_inner_inner_template.replace("%%%0%%%", a).replace( "%%%1%%%", b).replace("%%%2%%%", c)) return latex_inner_template.replace("%%%0%%%", s[0]).replace( "%%%1%%%", s[1]).replace("%%%2%%%", s[2]).replace("%%%3%%%", s[3])
values.append(value) for col in range(number_of_columns-1): value = (sheet.cell(row,col).value) try: value = str(value) except ValueError: pass finally: values.append(value) item = Arm(*values) items.append(item) # marks the previous matches from last week to not allow people to not get same person prev_matchlist = [(9,4),(15,17),(1,31),(24,34),(29,26),(32,30),(3,16),(18,10),(19,12),(21,5),(8,2),(28,22),(11,7),(35,6),(20,25),(27,23),(33,13),(14,17)] # randomizing the list of items random.shuffle(items) # printing out the information #for item in items: # print item # print("Accessing one single value (eg. DSPName): {0}".format(item.First_Name)) # print # function that checks to see if person has already been matched up with someone else def alreadymatched(matchlst,(pair1,pair2)): for i in range(len(matchlst)): if pair1 in matchlst[i] or pair2 in matchlst[i] : # seeing if either the first or second person has already been paired return True else: if i == len(matchlst)-1: # if function reaches the end of the list without seeing that anyone has been matched it return False
def get_pool_core(progressive, shuffle, difficulty, timer, goal, mode, swords, retro, logic): pool = [] placed_items = {} precollected_items = [] clock_mode = None treasure_hunt_count = None treasure_hunt_icon = None pool.extend(alwaysitems) def place_item(loc, item): assert loc not in placed_items placed_items[loc] = item def want_progressives(): return random.choice([True, False]) if progressive == 'random' else progressive == 'on' # provide boots to major glitch dependent seeds if logic in ['owglitches', 'nologic']: precollected_items.append('Pegasus Boots') pool.remove('Pegasus Boots') pool.extend(['Rupees (20)']) if want_progressives(): pool.extend(progressivegloves) else: pool.extend(basicgloves) lamps_needed_for_dark_rooms = 1 # insanity shuffle doesn't have fake LW/DW logic so for now guaranteed Mirror and Moon Pearl at the start if shuffle == 'insanity_legacy': place_item('Link\'s House', 'Magic Mirror') place_item('Sanctuary', 'Moon Pearl') else: pool.extend(['Magic Mirror', 'Moon Pearl']) if timer == 'display': clock_mode = 'stopwatch' elif timer == 'ohko': clock_mode = 'ohko' diff = difficulties[difficulty] pool.extend(diff.baseitems) # expert+ difficulties produce the same contents for # all bottles, since only one bottle is available if diff.same_bottle: thisbottle = random.choice(diff.bottles) for _ in range(diff.bottle_count): if not diff.same_bottle: thisbottle = random.choice(diff.bottles) pool.append(thisbottle) if want_progressives(): pool.extend(diff.progressiveshield) else: pool.extend(diff.basicshield) if want_progressives(): pool.extend(diff.progressivearmor) else: pool.extend(diff.basicarmor) if want_progressives(): pool.extend(['Progressive Bow'] * 2) elif swords != 'swordless': pool.extend(diff.basicbow) else: pool.extend(['Bow', 'Silver Arrows']) if swords == 'swordless': pool.extend(diff.swordless) elif swords == 'vanilla': swords_to_use = diff.progressivesword.copy() if want_progressives() else diff.basicsword.copy() random.shuffle(swords_to_use) place_item('Link\'s Uncle', swords_to_use.pop()) place_item('Blacksmith', swords_to_use.pop()) place_item('Pyramid Fairy - Left', swords_to_use.pop()) if goal != 'pedestal': place_item('Master Sword Pedestal', swords_to_use.pop()) else: place_item('Master Sword Pedestal', 'Triforce') else: pool.extend(diff.progressivesword if want_progressives() else diff.basicsword) if swords == 'assured': if want_progressives(): precollected_items.append('Progressive Sword') pool.remove('Progressive Sword') else: precollected_items.append('Fighter Sword') pool.remove('Fighter Sword') pool.extend(['Rupees (50)']) extraitems = total_items_to_place - len(pool) - len(placed_items) if timer in ['timed', 'timed-countdown']: pool.extend(diff.timedother) extraitems -= len(diff.timedother) clock_mode = 'stopwatch' if timer == 'timed' else 'countdown' elif timer == 'timed-ohko': pool.extend(diff.timedohko) extraitems -= len(diff.timedohko) clock_mode = 'countdown-ohko' if goal == 'triforcehunt': pool.extend(diff.triforcehunt) extraitems -= len(diff.triforcehunt) treasure_hunt_count = diff.triforce_pieces_required treasure_hunt_icon = 'Triforce Piece' for extra in diff.extras: if extraitems > 0: pool.extend(extra) extraitems -= len(extra) if goal == 'pedestal' and swords != 'vanilla': place_item('Master Sword Pedestal', 'Triforce') if retro: pool = [item.replace('Single Arrow','Rupees (5)') for item in pool] pool = [item.replace('Arrows (10)','Rupees (5)') for item in pool] pool = [item.replace('Arrow Upgrade (+5)','Rupees (5)') for item in pool] pool = [item.replace('Arrow Upgrade (+10)','Rupees (5)') for item in pool] pool.extend(diff.retro) if mode == 'standard': key_location = random.choice(['Secret Passage', 'Hyrule Castle - Boomerang Chest', 'Hyrule Castle - Map Chest', 'Hyrule Castle - Zelda\'s Chest', 'Sewers - Dark Cross']) place_item(key_location, 'Small Key (Universal)') else: pool.extend(['Small Key (Universal)']) return (pool, placed_items, precollected_items, clock_mode, treasure_hunt_count, treasure_hunt_icon, lamps_needed_for_dark_rooms)
def genTest(na, preset): # na = input("Test Name: ") # preset = input("Would you like to use a preset? 1 = All types, 2 = National level test, 3 = Regional level test, 4 = Aristo Spam, 5 = Patristo Spam, 6 = No: ") l = [] if preset == "1": l = [ "1 2", "1 1", "1 0", "2 2", "2 1", "2 0", "3 D", "3 E", "3 C", "4 D", "4 E", "5 D", "5 E", "5 C", "6 D", "6 E", "6 C", "7 D", "7 E", "8 1", "9 L", "9 S", "9 W", "10 D", "10 E", "11 D", "11 C", "12 D", "12 C", ] n = 29 elif preset == "2": aff = ["3 D", "3 E", "3 C"] vig = ["5 D", "5 E", "5 C"] hill2 = ["6 D", "6 E", "6 C"] hill3 = ["7 D", "7 E"] bac = ["9 L", "9 S", "9 W"] mor = ["11 D", "11 C", "12 D", "12 C"] random.shuffle(aff) random.shuffle(vig) random.shuffle(hill2) random.shuffle(hill3) random.shuffle(bac) random.shuffle(mor) l = [ "1 2", "1 2", "1 2", "1 2", "1 2", "1 2", "1 2", "1 2", "1 2", "1 2", "2 2", "2 1", "2 0", aff[0], aff[1], "4 D", "4 E", vig[0], vig[1], hill2[0], hill2[1], hill3[0], "8 1", bac[0], bac[1], "10 D", "10 E", mor[0], mor[1], mor[2], ] n = 30 elif preset == "3": enc = ["3 E", "4 E", "5 E", "6 E"] bac = ["9 L", "9 S", "9 W"] random.shuffle(enc) random.shuffle(bac) l = [ "1 0", "1 1", "1 2", "1 2", "1 2", "1 2", "2 2", "2 0", "3 D", "4 D", "5 D", "6 D", enc[0], enc[1], "8 1", bac[0], bac[1], "11 D", "12 D", ] n = 19 elif preset == "4": l = ["1 2"] * 20 n = 20 elif preset == "5": l = ["2 2"] * 10 n = 10 else: l = preset n = len(l) q = genQuotes(n + 1) test = {"TEST.0": header(n, na)} test["CIPHER.0"] = genRandMono(0, q[len(q) - 1], False, 0) for i in range(n): question = l[i].split(" ") if int(question[0]) <= 2: test["CIPHER." + str(i + 1)] = genRandMono( i, q[i], "1" if question[0] == "2" else 0, question[1] ) if int(question[0]) == 3: test["CIPHER." + str(i + 1)] = genRandAffine(i, q[i], question[1]) if int(question[0]) == 4: test["CIPHER." + str(i + 1)] = genRandCaesar(i, q[i], question[1]) if int(question[0]) == 5: test["CIPHER." + str(i + 1)] = genRandVig(i, q[i], question[1]) if int(question[0]) == 6: test["CIPHER." + str(i + 1)] = genRand2x2Hill(i, q[i], question[1]) if int(question[0]) == 7: test["CIPHER." + str(i + 1)] = genRand3x3Hill(i, q[i], question[1]) if int(question[0]) == 8: test["CIPHER." + str(i + 1)] = genRandXeno(i, q[i], question[1]) if int(question[0]) == 9: test["CIPHER." + str(i + 1)] = genRandBacon(i, q[i], question[1]) if int(question[0]) == 10: test["CIPHER." + str(i + 1)] = RSA(i, question[1]) if int(question[0]) == 11: test["CIPHER." + str(i + 1)] = genRandMorbit(i, q[i], question[1]) if int(question[0]) == 12: test["CIPHER." + str(i + 1)] = genRandPollux(i, q[i], question[1]) file = open("CodeTests/" + na + ".json", "w") file.write(json.dumps(test)) file.close() return json.dumps(test)
def main(): """Dizzy Atmosphere by Dizzy Gillespie""" args = get_args() seed = args.seed """ADD RANDOM SEED STUFF HERE""" if seed is not None: random.seed(seed) cards = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A'] suite = ['♥', '♠', '♣', '♦'] # value = ['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14'] deck = sorted(product(suite, cards)) random.shuffle(deck) player1_wins = 0 player2_wins = 0 while len(deck) != 0: """Ready Player 1?""" player1 = deck.pop() player1_card = player1[1] player1_suite = player1[0] player1_value = player1[1] if player1_value == 'J': player1_value = 11 if player1_value == 'Q': player1_value = 12 if player1_value == 'K': player1_value = 13 if player1_value == 'A': player1_value = 14 player1_hand = player1_suite + player1_card """Ready Player 2?""" player2 = deck.pop() player2_card = player2[1] player2_suite = player2[0] player2_value = player2[1] if player2_value == 'J': player2_value = 11 if player2_value == 'Q': player2_value = 12 if player2_value == 'K': player2_value = 13 if player2_value == 'A': player2_value = 14 player2_hand = player2_suite + player2_card """This means war""" if int(player1_value) > int(player2_value): winner = ('P1') print('{:>3} {:>3} {}'.format(player1_hand, player2_hand, winner)) player1_wins += 1 elif int(player1_value) < int(player2_value): winner = ('P2') print('{:>3} {:>3} {}'.format(player1_hand, player2_hand, winner)) player2_wins += 1 else: winner = ('WAR!') print('{:>3} {:>3} {}'.format(player1_hand, player2_hand, winner)) """and the winner is...""" if player1_wins > player2_wins: print('P1 {} P2 {}: Player 1 wins'.format(player1_wins, player2_wins)) elif player1_wins < player2_wins: print('P1 {} P2 {}: Player 2 wins'.format(player1_wins, player2_wins)) else: print('P1 {} P2 {}: DRAW'.format(player1_wins, player2_wins)) """Nobody. They both suck at random chance games."""
def duel(bot, channel, instigator, target, is_admin=False, warn_nonexistent=True): target = tools.Identifier(target or '') if not target: bot.reply("Who did you want to duel?") return module.NOLIMIT if get_unduelable(bot, instigator): bot.say("Try again when you're duelable, %s." % instigator) return module.NOLIMIT if target == bot.nick: bot.say("I refuse to duel with the yeller-bellied likes of you!") return module.NOLIMIT if is_self(bot, instigator, target): if not get_self_duels(bot, channel): bot.say("You can't duel yourself, you coward!") return module.NOLIMIT if target.lower() not in bot.privileges[channel.lower()]: if warn_nonexistent: bot.say("You can't duel people who don't exist!") return module.NOLIMIT target_unduelable = get_unduelable(bot, target) if target_unduelable and not is_admin: bot.say("You SHALL NOT duel %s!" % target) return module.NOLIMIT time_since = time_since_duel(bot, channel, instigator) if time_since < TIMEOUT: bot.notice( "Next duel will be available in %d seconds." % (TIMEOUT - time_since), instigator) return module.NOLIMIT if is_admin and target_unduelable: bot.notice("Just so you know, %s is marked as unduelable." % target, instigator) kicking = kicking_available(bot, channel) msg = "%s vs. %s, " % (instigator, target) msg += "loser gets kicked!" if kicking else "loser's a yeller belly!" bot.say(msg) combatants = sorted([instigator, target]) random.shuffle(combatants) winner = combatants.pop() loser = combatants.pop() now = time.time() bot.db.set_nick_value(instigator, 'duel_last', now) bot.db.set_channel_value(channel, 'duel_last', now) winner_loss_streak = get_loss_streak(bot, winner) loser_win_streak = get_win_streak(bot, loser) duel_finished(bot, winner, loser) win_streak = get_win_streak(bot, winner) streak = ' (Streak: %d)' % win_streak if win_streak > 1 else '' broken_streak = ', recovering from a streak of %d losses' % winner_loss_streak if winner_loss_streak > 1 else '' broken_streak += ', ending %s\'s streak of %d wins' % ( loser, loser_win_streak) if loser_win_streak > 1 else '' bot.say("%s wins%s!%s" % (winner, broken_streak, streak)) if loser == target: kmsg = "%s done killed ya!" % instigator else: kmsg = "You done got yerself killed!" if kicking and not target_unduelable: bot.write(['KICK', channel, loser], kmsg) else: bot.say(kmsg[:-1] + ", " + loser + kmsg[-1:])
def lombScargle(frequencyRange,objectmag=20,loopNo=looooops,df=0.001,fmin=0.001,numsteps=100000,modulationAmplitude=0.1,Nquist=200): # frequency range and object mag in list #global totperiod, totmperiod, totpower, date, amplitude, frequency, periods, LSperiod, power, mag, error, SigLevel results = {} totperiod = [] totmperiod = [] totpower = [] # reset SigLevel = [] filterletter = ['o','u','g','r','i','z','y'] period = 1/(frequencyRange) if period > 0.5: numsteps = 10000 elif period > 0.01: numsteps = 100000 else: numsteps = 200000 freqs = fmin + df * np.arange(numsteps) # for manuel allobsy, uobsy, gobsy, robsy, iobsy, zobsy, yobsy = [], [], [], [], [], [], [] #reset measuredpower = [] # reset y = [allobsy, uobsy, gobsy, robsy, iobsy, zobsy, yobsy] # for looping only for z in range(1, len(y)): #y[z] = averageFlux(obs[z], frequencyRange[frange], 30) # amplitde calculation for observations, anf frequency range y[z] = ellipsoidalFlux(obs[z], frequencyRange,30) y[z] = [modulationAmplitude * t for t in y[z]] # scaling for G in range(0, len(y[z])): flareMinute = int(round((obs[z][G]*24*60*2)%((dayinsec/(30*2))*flarecycles))) y[z][G] = y[z][G] + longflare[flareMinute] # add flares swapped to second but not changing the name intrtoduces fewer bugs date = [] amplitude = [] mag = [] error = [] filts = [] for z in range(1, len(y)): if objectmag[z] > sat[z] and objectmag[z] < lim[z]: #date.extend([x for x in obs[z]]) date.extend(obs[z]) amplitude = [t + random.gauss(0,magUncertainy(zeroPoints[z],objectmag[z],30,background,FWHMeff[z])) for t in y[z]] # scale amplitude and add poisson noise mag.extend([objectmag[z] - t for t in amplitude]) # add actual mag error.extend([sigSys + magUncertainy(zeroPoints[z],objectmag[z],30,background,FWHMeff[z])+0.2]*len(amplitude)) filts.extend([filterletter[z]]*len(amplitude)) phase = [(day % (period*2))/(period*2) for day in obs[z]] pmag = [objectmag[z] - t for t in amplitude] # plt.plot(phase, pmag, 'o', markersize=4) # plt.xlabel('Phase') # plt.ylabel('Magnitude') # plt.gca().invert_yaxis() # plt.title('filter'+str(z)+', Period = '+str(period))#+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20))) # plt.show() # plt.plot(date, mag, 'o') # plt.xlim(lower,higher) # plt.xlabel('time (days)') # plt.ylabel('mag') # plt.gca().invert_yaxis() # plt.show() model = periodic.LombScargleMultibandFast(fit_period=False) model.fit(date, mag, error, filts) power = model.score_frequency_grid(fmin, df, numsteps) if period > 10.: model.optimizer.period_range=(10, 110) elif period > 0.51: model.optimizer.period_range=(0.5, 10) elif period > 0.011: model.optimizer.period_range=(0.01, 0.52) else: model.optimizer.period_range=(0.0029, 0.012) LSperiod = model.best_period if period < 10: higher = 10 else: higher = 100 # fig, ax = plt.subplots() # ax.plot(1./freqs, power) # ax.set(xlim=(0, higher), ylim=(0, 1.2), # xlabel='period (days)', # ylabel='Lomb-Scargle Power', # title='Period = '+str(period)+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20))); # plt.show() phase = [(day % (period*2))/(period*2) for day in date] #idealphase = [(day % (period*2))/(period*2) for day in dayZ] #print(len(phase),len(idealphase)) #plt.plot(idealphase,Zmag,'ko',) # plt.plot(phase, mag, 'o', markersize=4) # plt.xlabel('Phase') # plt.ylabel('Magnitude') # plt.gca().invert_yaxis() # plt.title('Period = '+str(period)+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20))) # plt.show() #print(period, LSperiod, period*20) # print('actualperiod', period, 'measured period', np.mean(LSperiod),power.max())# 'power',np.mean(power[maxpos])) # print(frequencyRange[frange], 'z', z) # totperiod.append(period) # totmperiod.append(np.mean(LSperiod)) # totpower.append(power.max()) mpower = power.max() measuredpower.append(power.max()) # should this correspond to period power and not max power? maxpower = [] counter = 0. for loop in range(0,loopNo): random.shuffle(date) model = periodic.LombScargleMultibandFast(fit_period=False) model.fit(date, mag, error, filts) power = model.score_frequency_grid(fmin, df, numsteps) maxpower.append(power.max()) for X in range(0, len(maxpower)): if maxpower[X] > measuredpower[-1]: counter = counter + 1. Significance = (1.-(counter/len(maxpower))) #print('sig', Significance, 'counter', counter) SigLevel.append(Significance) #freqnumber = FrangeLoop.index(frequencyRange) #magnumber = MagRange.index(objectmag) #print(fullmaglist) #listnumber = (magnumber*maglength)+freqnumber # print(listnumber) # measuredperiodlist[listnumber] = LSperiod # periodlist[listnumber] = period # powerlist[listnumber] = mpower # siglist[listnumber] = Significance # fullmaglist[listnumber] = objectmag # results order, 0=mag,1=period,2=measuredperiod,3=siglevel,4=power,5=listnumber results[0] = objectmag[3] results[1] = period results[2] = LSperiod results[3] = Significance results[4] = mpower results[5] = 0#listnumber return results
def write_xml_fddb(data, path, xml_path, filename, write_sizes=True, shuffle=True, verbose=True): """Create .xml annotations and file-list for data data: dataset from parse_fddb(...) path: path to dataset xml_path: subpath to folder for .xml filename: name for file-list (no extension) write_sizes: if True, create file with (filename, height, width) records shuffle: if True, shuffle lines in data verbose: if True, report every 1000 files""" img_files = [e[0] for e in data] xml_files = ['.'.join(e.split('.')[:-1]) + '.xml' for e in img_files] xml_files = [xml_path + e.replace('/', '_') for e in xml_files] all_files = [u + ' ' + v for u, v in zip(img_files, xml_files)] if shuffle: random.shuffle(all_files) with open(path + filename + '.txt', 'w') as f: f.write('\n'.join(all_files)) if write_sizes: sizes = [(e[0].split('/')[-1].split('.')[0], str(e[2]), str(e[1])) for e in data] sizes = [' '.join(e) for e in sizes] with open(path + filename + '_name_size.txt', 'w') as f: f.write('\n'.join(sizes)) cnt = 0 for item, sp in zip(data, xml_files): e_anno = ET.Element('annotation') e_size = ET.SubElement(e_anno, 'size') e_width = ET.SubElement(e_size, 'width') e_height = ET.SubElement(e_size, 'height') e_depth = ET.SubElement(e_size, 'depth') e_width.text = str(item[1]) e_height.text = str(item[2]) e_depth.text = str(item[3]) for xmin, ymin, xmax, ymax in item[4]: e_obj = ET.SubElement(e_anno, 'object') e_name = ET.SubElement(e_obj, 'name') e_name.text = 'face' e_bndbox = ET.SubElement(e_obj, 'bndbox') e_xmin = ET.SubElement(e_bndbox, 'xmin') e_ymin = ET.SubElement(e_bndbox, 'ymin') e_xmax = ET.SubElement(e_bndbox, 'xmax') e_ymax = ET.SubElement(e_bndbox, 'ymax') e_xmin.text = str(xmin) e_ymin.text = str(ymin) e_xmax.text = str(xmax) e_ymax.text = str(ymax) txt = minidom.parseString(ET.tostring( e_anno, 'utf-8')).toprettyxml(indent='\t') with open(path + sp, 'w') as f: f.write(txt) cnt += 1 if (cnt % 1000 == 0) and verbose: print(filename + ': ' + str(cnt) + ' of ' + str(len(data)))
def genRandPollux(num, quote, enc): quote = genQuoteLength(35, 55) morse = { "A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.", "H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.", "O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-", "V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "0": "-----", "1": ".----", "2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...", "8": "---..", "9": "----.", " ": "xx", } l = list(range(0, 10)) random.shuffle(l) enc1 = "" for i in quote: if i in morse: enc1 += morse[i] + "x" enc1 = enc1[:-1] enc2 = "" for i in enc1: if i == ".": enc2 += str(l[random.randint(0, 3)]) if i == "-": enc2 += str(l[random.randint(0, 2) + 4]) if i == "x": enc2 += str(l[random.randint(0, 2) + 7]) x = { "cipherString": quote, "cipherType": "pollux", "replacement": {}, "dotchars": str(l[0]) + str(l[1]) + str(l[2]) + str(l[3]), "dashchars": str(l[4]) + str(l[5]) + str(l[6]), "xchars": str(l[7]) + str(l[8]) + str(l[9]), "curlang": "en", "editEntry": str(num), "offset": None, "alphabetSource": "", "alphabetDest": "", "shift": None, "offset2": None, "encoded": enc2, } if enc == "D": x["operation"] = "decode" x["points"] = 275 x["question"] = ( "<p>Decode this quote which has been encoded with a Pollux cipher. " + str(l[0]) + "," + str(l[1]) + "," + str(l[4]) + "," + str(l[5]) + "," + str(l[7]) + "," + str(l[8]) + "= . . - - x x.</p>" ) if enc == "C": x["operation"] = "crypt" x["points"] = 350 x["crib"] = quote[:4] x["question"] = ( "<p>Decode this quote which has been encoded with a Pollux cipher. The first four letters are " + quote[:4] + ".</p>" ) return x
while SQL.check_table()==WAIT: pass # if SQL.check_table()==RESTORE: # load_model=True Genomes=SQL.GatherGenomes() gene_images=setup_genomes() timeStamp=datetime.datetime.now().time() POPULATION=len(Genomes) print(POPULATION) UsedGenomes=np.ones(Genomes.shape[0]) FakeGenomes=list(range(0,(Genomes.shape[0]))) random.shuffle(FakeGenomes) #print("Load Model is " + str(load_model) ) print() tf.reset_default_graph() batch_size = POPULATION//4 #How many experiences to use for each training step. #mainQN = Qnetwork(h_size,img_size,POPULATION,batch_size,"Main") #targetQN = Qnetwork(h_size,img_size,POPULATION,batch_size,"Target") #mainQN=FrozenValueNetwork() #mainQN_model=mainQN.make_model() model = load_model('dqn_frozen_modelv4.h5') #init = tf.global_variables_initializer() #saver = tf.train.Saver() #trainables = tf.trainable_variables()
def examine_ssd512(gt_file, model_checkpoint): gt = pickle.load(open(gt_file, 'rb')) gt = gt_classification_convert(gt) keys = sorted(gt.keys()) random.shuffle(keys) ### load model ### model = SSD512v2(input_shape, num_classes=NUM_CLASSES) model.load_weights(model_checkpoint, by_name=True) inputs = [] images = [] add_num = 0 gt_result = [] # for i in range(num_val): for i in range(20): img_path = keys[i + add_num] if os.path.isfile(img_path): gt_result.append(gt[keys[i + add_num]]) img = image.load_img(img_path, target_size=(512, 512)) img = image.img_to_array(img) images.append(img) inputs.append(img.copy()) inputs = preprocess_input(np.array(inputs)) preds = model.predict(inputs, batch_size=1, verbose=1) results = bbox_util.detection_out(preds) for i, img in enumerate(images): currentAxis = plt.gca() currentAxis.cla() plt.imshow(img / 255.) # Parse the outputs. if len(results[i]): # det_label = results[i][:, 0] det_conf = results[i][:, 1] det_xmin = results[i][:, 2] det_ymin = results[i][:, 3] det_xmax = results[i][:, 4] det_ymax = results[i][:, 5] # Get detections with confidence higher than 0.6. top_indices = [i for i, conf in enumerate(det_conf) if conf >= 0.5] top_conf = det_conf[top_indices] top_xmin = det_xmin[top_indices] top_ymin = det_ymin[top_indices] top_xmax = det_xmax[top_indices] top_ymax = det_ymax[top_indices] for j in range(top_conf.shape[0]): xmin = int(round(top_xmin[j] * img.shape[1])) ymin = int(round(top_ymin[j] * img.shape[0])) xmax = int(round(top_xmax[j] * img.shape[1])) ymax = int(round(top_ymax[j] * img.shape[0])) score = top_conf[j] display_txt = '{:0.2f}'.format(score) coords = (xmin, ymin), xmax - xmin + 1, ymax - ymin + 1 color = 'g' currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2)) currentAxis.text(xmin, ymin, display_txt, bbox={'facecolor': color, 'alpha': 0.5}) # plt GT gt_img = gt_result[i] for g_num in range(len(gt_img)): gt_top_xmin = gt_img[g_num][0] gt_top_ymin = gt_img[g_num][1] gt_top_xmax = gt_img[g_num][2] gt_top_ymax = gt_img[g_num][3] xmin = int(round(gt_top_xmin * img.shape[1])) ymin = int(round(gt_top_ymin * img.shape[0])) xmax = int(round(gt_top_xmax * img.shape[1])) ymax = int(round(gt_top_ymax * img.shape[0])) coords = (xmin, ymin), xmax - xmin + 1, ymax - ymin + 1 color = 'r' ## gt label currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=1)) plt.draw() plt.waitforbuttonpress(3)
import random users = [ "Власюк Владислав Володимирович", "Степаненко Юрій", "Редько Аліна Олександрівна", "Станкевич Дарій-Сергій", "Гурін Ілля Валентинович", "Яромир Юрченко", "Маценко Володимир Володимирович", "Осачій Роман Олексанрович", "Оліщук Олександр Олександрович", "Нечко Дмитро Васильович", "Яковенко Даніель", "Тетьора Ілля Сергійович ", "Стрижов Кіріл", "Іван" ] random.shuffle(users) for user in users: print()
def shuffle(self): # shuffle the deck # use random.shuffle() random.shuffle(self.deck_list)
''' La función shuffle() 'mezcla' o cambia aleatoriamente el orden de los elementos de una lista antes de realizar la selección de alguno de ellos. Esta mezcla recuerda en el caso de los juegos de cartas la acción de barajar un número de veces antes de repartir o seleccionar cartas. ''' import random mylist = ["1 de picas", "2 de picas", "3 de picas", "4 de picas", "5 de picas", "6 de picas"] random.shuffle(mylist) print(mylist)
height=IMAGE_HEIGHT, label=label, data=np.rollaxis(img, 2).tostring()) train_lmdb = '../mo/kaggle-catdog/input/train_lmdb' validation_lmdb = '../mo/kaggle-catdog/input/validation_lmdb' os.system('rm -rf ' + train_lmdb) os.system('rm -rf ' + validation_lmdb) train_data = [img for img in glob.glob("../input/train/*jpg")] test_data = [img for img in glob.glob("../input/test1/*jpg")] #Shuffle train_data random.shuffle(train_data) print('Creating train_lmdb') in_db = lmdb.open(train_lmdb, map_size=int(1e12)) with in_db.begin(write=True) as in_txn: for in_idx, img_path in enumerate(train_data): if in_idx % 6 == 0: continue img = cv2.imread(img_path, cv2.IMREAD_COLOR) img = transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT) if 'cat' in img_path: label = 0 else:
def shuffleCards(self): '''randomly shuffle the cards in a deck''' random.shuffle(self.cards)
# range로 리스트 초기화 시키기 b=list(range(1,11)) #print(b) c = a+b # 리스트를 더하면 두 리스트를 합친다. #print(c) a.append(6) # push_back 같은 느낌 #print(a) a.insert(3,7) a.pop() a.pop(3) a.remove(4) # 리스트에서 4라는 값을 찾아서 제거해라 print(a.index(5)) # 5라는 값을 찾아서 인덱스 번호를 가져와라 a=list(range(1,11)) print(a) print(sum(a)) # 리스트안에 있는 모든 값을 더해준다. print(max(a)) # 리스트에서 가장 큰 값을 출력한다. print(min(a)) # 리스트에서 가장 작은 값을 출력해준다. print(min(7,5,3)) # 7하고 5중에 최소값을 찾아라 print(a) r.shuffle(a) print(a) a.sort() # 오름차순으로 소팅 print(a) a.sort(reverse=True) # 내림차순으로 소팅 print(a) a.clear() # 리스트에 있는 값을 클리어 한다.
import random, os, sys if len(sys.argv) != 2: print(""" USAGE: python shuffile.py directory_path where 'directory_path' is the full path of the directory containing files to be shuffled. """) else: dirname = sys.argv[1] files = os.listdir(dirname) random.shuffle(files) counter = 0 for filename in files: if not filename.startswith("."): print(counter, filename) os.rename(os.path.join(dirname,filename), os.path.join(dirname,str(counter).zfill(2)+" "+filename)) counter += 1
#Q8- Write a program that asks the user to enter some text and then counts how many articles # are in the text. Articles are the words 'a', 'an', and 'the'. tk = input("enter string :" ) t = tk.count('the') a = tk.count('a') n = tk.count('an') print("number of *the* is: {} , number of *a* is: {}, number of *an* is: {}".format(t,a,n)) # Q9- (a) Write a program that asks the user to enter a sentence and then randomly rearranges # the words of the sentence. Don’t worry about getting punctuation or capitalization correct. from random import shuffle u = input("Enter a sentence") split = u.split() # Split the string into a list of words shuffle(split) # This shuffles the list in-place. j = ' '.join(split) #the list back into a string print("Suffle Strings: ",j) # #(b) Do the above problem, but now make sure that the sentence starts with a capital, that the # # original first word is not capitalized if it comes in the middle of the sentence, and that the # # period is in the right place # sentences = u.split(". ") sentences2 = [sentence[0].capitalize() + sentence[1:] for sentence in sentences] string2 = '. '.join(sentences2) print ("Final Sentence with capiatalize: ",string2) # Q10-Write a censoring program. Allow the user to enter some text and your program should
# -*- coding: utf-8 -*- """ card-game1.py This and That (c) Madhu Vasudevan 2019 """ from pprint import pprint from random import shuffle values = ['Ace'] + list(range(2, 11)) + 'Jack Queen King'.split() suits = 'diamonds clubs hearts spades'.split() deck = ['{} of {}'.format(v, s) for v in values for s in suits] #pprint(deck[:12]) print(','.join(deck)) shuffle(deck) pprint(deck[:12]) while deck: input(deck.pop())
def reset(self): self.idx = 0 if self.shuffle: self.shuffled_indices = np.arange(self.n_data) random.shuffle(self.shuffled_indices)
def historical_command(signal: str = "", start=""): """Displays historical price comparison between similar companies [Yahoo Finance]""" # Debug user input if imps.DEBUG: logger.debug("scr-historical %s %s", signal, start) # Check for argument if signal not in so.d_signals_desc: raise Exception("Invalid preset selected!") register_matplotlib_converters() screen = ticker.Ticker() if signal in finviz_model.d_signals: screen.set_filter(signal=finviz_model.d_signals[signal]) else: preset_filter = configparser.RawConfigParser() preset_filter.optionxform = str # type: ignore preset_filter.read(so.presets_path + signal + ".ini") d_general = preset_filter["General"] d_filters = { **preset_filter["Descriptive"], **preset_filter["Fundamental"], **preset_filter["Technical"], } d_filters = {k: v for k, v in d_filters.items() if v} if d_general["Signal"]: screen.set_filter(filters_dict=d_filters, signal=d_general["Signal"]) else: screen.set_filter(filters_dict=d_filters) if start == "": start = datetime.now() - timedelta(days=365) else: start = datetime.strptime(start, imps.DATE_FORMAT) # Output Data l_min = [] l_leg = [] l_stocks = screen.screener_view(verbose=0) if len(l_stocks) > 10: description = ( "\nThe limit of stocks to compare with are 10. Hence, 10 random similar stocks will be displayed." "\nThe selected list will be: ") random.shuffle(l_stocks) l_stocks = sorted(l_stocks[:10]) description = description + (", ".join(l_stocks)) logger.debug(description) plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI) while l_stocks: l_parsed_stocks = [] for symbol in l_stocks: try: df_similar_stock = yf.download( symbol, start=datetime.strftime(start, "%Y-%m-%d"), progress=False, threads=False, ) if not df_similar_stock.empty: plt.plot( df_similar_stock.index, df_similar_stock["Adj Close"].values, ) l_min.append(df_similar_stock.index[0]) l_leg.append(symbol) l_parsed_stocks.append(symbol) except Exception as e: error = ( f"{e}\nDisregard previous error, which is due to API Rate limits from Yahoo Finance. " f"Because we like '{symbol}', and we won't leave without getting data from it." ) return { "title": "ERROR Stocks: [Yahoo Finance] Historical Screener", "description": error, } for parsed_stock in l_parsed_stocks: l_stocks.remove(parsed_stock) if signal: plt.title( f"Screener Historical Price using {finviz_model.d_signals[signal]} signal" ) else: plt.title(f"Screener Historical Price using {signal} preset") plt.xlabel("Time") plt.ylabel("Share Price ($)") plt.legend(l_leg) plt.grid(b=True, which="major", color="#666666", linestyle="-") plt.minorticks_on() plt.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2) # ensures that the historical data starts from same datapoint plt.xlim([max(l_min), df_similar_stock.index[-1]]) imagefile = "scr_historical.png" dataBytesIO = io.BytesIO() plt.savefig(dataBytesIO) dataBytesIO.seek(0) plt.close("all") imagefile = imps.image_border(imagefile, base64=dataBytesIO) return { "title": "Stocks: [Yahoo Finance] Historical Screener", "description": description, "imagefile": imagefile, }
import random for i in range(1,13): for j in range(i+1,13): input_name = "class"+str(i)+"_"+str(j)+".txt" ff = open(input_name,"r") data = ff.read().split("\n") random.shuffle(data) train_data = data[:9000] test_data = data[9000:] train_name = "class"+str(i)+"_"+str(j)+"_train.txt" test_name = "class"+str(i)+"_"+str(j)+"_test.txt" train = open(train_name,"w") test = open(test_name,"w") for it in train_data: print(it,file=train) for it in test_data: print(it,file=test) ff.close() train.close() test.close()