def generate_transaction(trans_type, country_iso, trans_value_mean, trans_value_std, start, end): Trans_Date = faker.date_time_between(start_date=start, end_date=end) if trans_type == 'domestic_sale' or 'domestic_purchase': country_iso_vat = country_iso else: country_iso_codes.remove(country_iso) country_iso_vat = random.choices(country_iso_codes)[0] VAT_ID = generate_vat_id(country_iso_vat) Company = faker.company() Tax_Base_Amount = round(abs(gauss(trans_value_mean, trans_value_std)), 2) if trans_type == 'domestic_sale': tax_codes = domestic_output_tax_codes Tax_Code = random.choices(list(tax_codes.keys()), weights=(60, 20, 10, 10))[0] if trans_type == 'domestic_purchase': tax_codes = domestic_input_tax_codes Tax_Code = random.choices(list(tax_codes.keys()), weights=(60, 20, 10, 10))[0] if trans_type == 'ic_sale': Tax_Code = 'F1' if trans_type == 'ic_purchase': Tax_Code = 'F9' if trans_type == 'domestic_sale' or trans_type == 'domestic_purchase': Tax_Rate = tax_codes[Tax_Code][0] else: Tax_Rate = 0.0 Tax = Tax_Base_Amount * Tax_Rate return [ Trans_Date, VAT_ID, Company, Tax_Base_Amount, Tax_Code, Tax_Rate, Tax ]
def next_row(self, is_handstroke: bool): if self._index == -1 and self.auto_start: self.tower.make_call(Calls.Go) if not self.auto_start and not self._has_go and not is_handstroke and random.choices( [True, False], [1, 3]): self.tower.make_call(Calls.Go) return super(GoAndStopCallingGenerator, self).next_row(is_handstroke)
def get_data_locations(self): self.choic_loc = [] self.mobs = [] log.debug(self.locations) for location, data in self.locations.items(): self.player.location = location self.locations = self.locations[location] self.format_time = datetime.datetime(year=1, month=1, day=1) + \ datetime.timedelta(seconds=float(self.player.user_time)) print(DICT_MESSAGES['info_message'][0].format(self.player.location, self.player.user_xp, self.format_time.day, self.format_time.hour, self.format_time.minute, self.format_time.second)) for n in data: if type(n) == dict: for k, v in n.items(): if 'Hatch' in k: print(random.choices(DICT_MESSAGES['hatch_in_loc'])) self.hatch = True else: print(DICT_MESSAGES['entrance'].format(k)) self.choic_loc.append(k) elif type(n) != dict: print(choice(DICT_MESSAGES['mobs']).format(n)) self.mobs.append(n)
def selection_pair(population: Population,distances_avg,distances,cities,dates,fixture: Fixture) -> Population: fitness_result = [-fitness(gene,distances_avg,distances,cities,dates,fixture) for gene in population] pair = random.choices( population=population, weights=fitness_result, k=2 ) return pair
async def log_requests(request: Request, call_next): idem = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6)) logger.info(f"rid={idem} start request path={request.url.path}") start_time = time.time() response = await call_next(request) process_time = (time.time() - start_time) * 1000 formatted_process_time = '{0:.2f}'.format(process_time) logger.info(f"rid={idem} completed_in={formatted_process_time}ms status_code={response.status_code}") return response
def get_initial_centers_from_data_set(data, k): if CHOOSE_INITIAL_CENTERS_RANDOMLY: random.seed(8) return np.array(random.choices(data, k=k), dtype=np.float64) min_point = data.min(0) max_point = data.max(0) centers = [] for i in range(k): centers.append(min_point + (max_point - min_point) / k) return centers
def gomi(): pool_next = [] next_list=[i for i in range(self.N)] while len(pool_next) < self.N: #offspring1=copy.deepcopy(self.pool[random.choices(next_list,weights=self.Glist)]) #pool_next.append(self.pool[random.choices(next_list,weights=self.Glist)]) a=random.choices(next_list,weights=self.Glist) #print(a) #print(self.pool[a[0]]) offspring1=copy.deepcopy(self.pool[a[0]]) #pass pool_next.append(offspring1) self.pool = pool_next[:]
def rouletteSelection(population, fitnesses): import random #print("A") # ルーレット選択の関数 # ここを実装する #a=[1,2,3,4,5,6] #b=[1,3,56,787,989,1] #c=random.choices(a,weights=b) next_list=[i for i in range(POP_SIZE)] a=random.choices(next_list,weights=fitnesses) #print(a) b=a[0] #print(b) return deepcopy(population[b])
def generate_weather_type(temperature): choice = random.choices([WeatherType.SUNNY] * 21 + [WeatherType.SUNNY_WITH_LITTLE_CLOUDS] * 60 + [WeatherType.CLOUDY] * 137 + [WeatherType.LITTLE_RAIN] * 63 + [WeatherType.RAIN] * 60 + [WeatherType.HUGE_RAIN] * 23) if temperature < 2: if choice == WeatherType.LITTLE_RAIN: return WeatherType.LITTLE_SNOW if choice == WeatherType.RAIN: return WeatherType.SNOW if choice == WeatherType.HUGE_RAIN: return WeatherType.HUGE_SNOW return choice
def processData(self): self.train_data = [] self.test_data = [] for i in self.trainSets["users"]: # Neu nguoi dung i la khoi dau nguoi if i in self.coldStartUser: #Random 20% danh gia cua nguoi dung khoi dau nguoi testData = random.choices(list(self.trainSets["items_seen_by_user"][i]), k = int(len(self.trainSets["items_seen_by_user"][i]) * 0.2)) for u in self.trainSets["feedback"][i]: if u in testData: self.test_data.append([i, u, self.trainSets["feedback"][i][u]]) else: self.train_data.append([i, u, self.trainSets["feedback"][i][u]]) else: for u in self.trainSets["feedback"][i]: self.train_data.append([i, u, self.trainSets["feedback"][i][u]])
def fuzzer(self, remoteip, port, field, start, stop, step): if (field == None): field = "user" for size in range(int(start), int(stop), int(step)): streaming = [True] while len(streaming) > 0: if (self.config.fuzzer_type.lower() == "custom"): self.config.fuzzer_buffer = ''.join( random.choices(string.ascii_uppercase + string.digits, k=size)) else: self.config.fuzzer_buffer = "A" * size streaming = (self.inject(remoteip, port, field, self.config.fuzzer_buffer, None)) if (len(streaming) > 0): _stream = streaming[-1].decode('latin-1').strip() _response = _stream.split(' ') # print("[!] ERROR COMMUNICATING TO THE SERVICE " + "|".join(streaming)) # responsecode = int(_response[0].strip()) # # 6xx Protected reply # if responsecode > 599: # print(base64_decode(_response.join(' '))) # #5xx Permanent Negative Completion reply # if responsecode > 499: # return size # break; time.sleep(1) size += step # buffer = "A" * size return size
def get_snap(self, timeout: float = 3, proxies: Any = None) -> Optional[Image]: """ Gets a "snap" of the current camera video data and returns a Pillow Image or None :param timeout: Request timeout to camera in seconds :param proxies: http/https proxies to pass to the request object. :return: Image or None """ data = { 'cmd': 'Snap', 'channel': 0, 'rs': ''.join( random.choices(string.ascii_uppercase + string.digits, k=10)), 'user': self.username, 'password': self.password, } parms = parse.urlencode(data).encode("utf-8") try: response = requests.get(self.url, proxies=proxies, params=parms, timeout=timeout) if response.status_code == 200: return open_image(BytesIO(response.content)) print( "Could not retrieve data from camera successfully. Status:", response.status_code) return None except Exception as e: print("Could not get Image data\n", e) raise
def ruleta(lista, n): import random F, NF = [], [] f = len(lista.loc[d.fact == True, ]) nf = len(lista.loc[d.fact == False, ]) for i in range(0, f): F.append(i) for j in range(f, (f + nf)): NF.append(j) if len(NF) == 0: for k in range(0, 3): NF.append(k) listas = [(random.sample(F, k=2)), (random.sample(NF, k=2))] results = random.choices(listas, weights=[len(F), len(NF)], k=2) if results[0] == results[1]: salida = results[0] if results[0] != results[1]: dato1 = results[0] dato2 = results[1] salida = [dato1[0], dato2[0]] return (salida)
def create_ref_code(): return ''.join(random.choices(string.ascii_lowercase + string.digits, k=20))
def random_string(strings=string.ascii_letters, length=15): #获取随机字符串 values = ''.join(random.choices(strings, k=length)) return values
def train(hyp, opt, device, tb_writer=None): print(f'Hyperparameters {hyp}') log_dir = tb_writer.log_dir if tb_writer else 'runs/evolve' # run directory wdir = str(Path(log_dir) / 'weights') + os.sep # weights directory os.makedirs(wdir, exist_ok=True) last = wdir + 'last.pt' best = wdir + 'best.pt' results_file = log_dir + os.sep + 'results.txt' epochs, batch_size, total_batch_size, weights, rank = \ opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.local_rank # Save run settings with open(Path(log_dir) / 'hyp.yaml', 'w') as f: yaml.dump(hyp, f, sort_keys=False) with open(Path(log_dir) / 'opt.yaml', 'w') as f: yaml.dump(vars(opt), f, sort_keys=False) # Configure cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict # train_path = data_dict['train'] train_path = f'{opt.trainset_path}/images/train' # test_path = data_dict['val'] test_path = f'{opt.trainset_path}/images/test' nc, names = (1, ['item']) if opt.single_cls else (int( data_dict['nc']), data_dict['names']) # number classes, names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % ( len(names), nc, opt.data) # check # Remove previous results if rank in [-1, 0]: for f in glob.glob('*_batch*.jpg') + glob.glob(results_file): os.remove(f) # Create model if opt.not_use_SE: model = Model(opt.cfg, nc=nc).to(device) else: model = ModelSE(opt.cfg, nc=nc).to(device) print(model) exit() # Image sizes gs = int(max(model.stride)) # grid size (max stride) imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size ] # verify imgsz are gs-multiples # Optimizer nbs = 64 # nominal batch size # default DDP implementation is slow for accumulation according to: https://pytorch.org/docs/stable/notes/ddp.html # all-reduce operation is carried out during loss.backward(). # Thus, there would be redundant all-reduce communications in a accumulation procedure, # which means, the result is still right but the training speed gets slower. # in https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/run_pretraining.py accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_parameters(): if v.requires_grad: if '.bias' in k: pg2.append(v) # biases elif '.weight' in k and '.bn' not in k: pg1.append(v) # apply weight decay else: pg0.append(v) # all else if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({ 'params': pg1, 'weight_decay': hyp['weight_decay'] }) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) print('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR lf = lambda x: (( (1 + math.cos(x * math.pi / epochs)) / 2)**1.0) * 0.8 + 0.2 # cosine scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # Load Model with torch_distributed_zero_first(rank): attempt_download(weights) start_epoch, best_fitness = 0, 0.0 if weights.endswith('.pt'): # pytorch format ckpt = torch.load(weights, map_location=device) # load checkpoint # load model try: exclude = ['anchor'] # exclude keys ckpt['model'] = { k: v for k, v in ckpt['model'].float().state_dict().items() if k in model.state_dict() and not any(x in k for x in exclude) and model.state_dict()[k].shape == v.shape } model.load_state_dict(ckpt['model'], strict=False) print('Transferred %g/%g items from %s' % (len(ckpt['model']), len(model.state_dict()), weights)) except KeyError as e: s = "%s is not compatible with %s. This may be due to model differences or %s may be out of date. " \ "Please delete or update %s and try again, or use --weights '' to train from scratch." \ % (weights, opt.cfg, weights, weights) raise KeyError(s) from e # load optimizer if ckpt['optimizer'] is not None: optimizer.load_state_dict(ckpt['optimizer']) best_fitness = ckpt['best_fitness'] # load results if ckpt.get('training_results') is not None: with open(results_file, 'w') as file: file.write(ckpt['training_results']) # write results.txt # epochs start_epoch = ckpt['epoch'] + 1 if epochs < start_epoch: print( '%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % (weights, ckpt['epoch'], epochs)) epochs += ckpt['epoch'] # finetune additional epochs del ckpt # DP mode if cuda and rank == -1 and torch.cuda.device_count() > 1: # model = torch.nn.DataParallel(model) model = torch.nn.DataParallel(model, device_ids=[0, 1]) # SyncBatchNorm if opt.sync_bn and cuda and rank != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) print('Using SyncBatchNorm()') # Exponential moving average ema = ModelEMA(model) if rank in [-1, 0] else None # DDP mode if cuda and rank != -1: model = DDP(model, device_ids=[rank], output_device=rank) # Trainloader dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, local_rank=rank, world_size=opt.world_size) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class nb = len(dataloader) # number of batches assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % ( mlc, nc, opt.data, nc - 1) # Testloaderc if rank in [-1, 0]: # local_rank is set to -1. Because only the first process is expected to do evaluation. testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt, hyp=hyp, augment=False, cache=opt.cache_images, rect=True, local_rank=-1, world_size=opt.world_size)[0] # Model parameters hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou) model.class_weights = labels_to_class_weights(dataset.labels, nc).to( device) # attach class weights model.names = names # Class frequency if rank in [-1, 0]: labels = np.concatenate(dataset.labels, 0) c = torch.tensor(labels[:, 0]) # classes # cf = torch.bincount(c.long(), minlength=nc) + 1. # model._initialize_biases(cf.to(device)) plot_labels(labels, save_dir=log_dir) if tb_writer: # tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384 tb_writer.add_histogram('classes', c, 0) # Check anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # Start training t0 = time.time() nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations) # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training maps = np.zeros(nc) # mAP per class results = ( 0, 0, 0, 0, 0, 0, 0 ) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification' scheduler.last_epoch = start_epoch - 1 # do not move scaler = amp.GradScaler(enabled=cuda) if rank in [0, -1]: print('Image sizes %g train, %g test' % (imgsz, imgsz_test)) print('Using %g dataloader workers' % dataloader.num_workers) print('Starting training for %g epochs...' % epochs) # torch.autograd.set_detect_anomaly(True) for epoch in range( start_epoch, epochs ): # epoch ------------------------------------------------------------------ model.train() # Update image weights (optional) if dataset.image_weights: # Generate indices if rank in [-1, 0]: w = model.class_weights.cpu().numpy() * ( 1 - maps)**2 # class weights image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w) dataset.indices = random.choices( range(dataset.n), weights=image_weights, k=dataset.n) # rand weighted idx # Broadcast if DDP if rank != -1: indices = torch.zeros([dataset.n], dtype=torch.int) if rank == 0: indices[:] = torch.from_tensor(dataset.indices, dtype=torch.int) dist.broadcast(indices, 0) if rank != 0: dataset.indices = indices.cpu().numpy() # Update mosaic border # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) # dataset.mosaic_border = [b - imgsz, -b] # height, width borders mloss = torch.zeros(4, device=device) # mean losses if rank != -1: dataloader.sampler.set_epoch(epoch) pbar = enumerate(dataloader) if rank in [-1, 0]: print( ('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size')) pbar = tqdm(pbar, total=nb) # progress bar optimizer.zero_grad() for i, ( imgs, targets, paths, _ ) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device, non_blocking=True).float( ) / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 # Warmup if ni <= nw: xi = [0, nw] # x interp # model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou) accumulate = max( 1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 x['lr'] = np.interp( ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) if 'momentum' in x: x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']]) # Multi-scale if opt.multi_scale: sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:] ] # new shape (stretched to gs-multiple) imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Autocast with amp.autocast(enabled=cuda): # Forward pred = model(imgs) # print([x.shape for x in pred]) # [1, 3, 76, 76, 25] [1, 3, 38, 38, 25] [1, 3, 19, 19, 25]) # Loss loss, loss_items = compute_loss(pred, targets.to(device), model) # scaled by batch_size if rank != -1: loss *= opt.world_size # gradient averaged between devices in DDP mode # if not torch.isfinite(loss): # print('WARNING: non-finite loss, ending training ', loss_items) # return results # Backward scaler.scale(loss).backward() # Optimize if ni % accumulate == 0: scaler.step(optimizer) # optimizer.step scaler.update() optimizer.zero_grad() if ema is not None: ema.update(model) # Print if rank in [-1, 0]: mloss = (mloss * i + loss_items) / (i + 1 ) # update mean losses mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.4g' * 6) % ('%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) pbar.set_description(s) # Plot if ni < 3: f = str(Path(log_dir) / ('train_batch%g.jpg' % ni)) # filename result = plot_images(images=imgs, targets=targets, paths=paths, fname=f) if tb_writer and result is not None: tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) # tb_writer.add_graph(model, imgs) # add model to tensorboard # end batch ------------------------------------------------------------------------------------------------ # Scheduler scheduler.step() # DDP process 0 or single-GPU if rank in [-1, 0]: # mAP if ema is not None: ema.update_attr( model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride']) final_epoch = epoch + 1 == epochs if not opt.notest or final_epoch: # Calculate mAP results, maps, times = test.test( opt.data, batch_size=total_batch_size, imgsz=imgsz_test, save_json=final_epoch and opt.data.endswith(os.sep + 'coco.yaml'), model=ema.ema.module if hasattr(ema.ema, 'module') else ema.ema, single_cls=opt.single_cls, dataloader=testloader, save_dir=log_dir) # Write with open(results_file, 'a') as f: f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls) if len(opt.name) and opt.bucket: os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name)) # Tensorboard if tb_writer: tags = [ 'train/giou_loss', 'train/obj_loss', 'train/cls_loss', 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/giou_loss', 'val/obj_loss', 'val/cls_loss' ] for x, tag in zip(list(mloss[:-1]) + list(results), tags): tb_writer.add_scalar(tag, x, epoch) # Update best mAP fi = fitness(np.array(results).reshape( 1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1] if fi > best_fitness: best_fitness = fi # Save model save = (not opt.nosave) or (final_epoch and not opt.evolve) if save: with open(results_file, 'r') as f: # create checkpoint ckpt = { 'epoch': epoch, 'best_fitness': best_fitness, 'training_results': f.read(), 'model': ema.ema.module if hasattr(ema, 'module') else ema.ema, 'optimizer': None if final_epoch else optimizer.state_dict() } # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) del ckpt # end epoch ---------------------------------------------------------------------------------------------------- # end training if rank in [-1, 0]: # Strip optimizers n = ('_' if len(opt.name) and not opt.name.isnumeric() else '') + opt.name fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]): if os.path.exists(f1): os.rename(f1, f2) # rename ispt = f2.endswith('.pt') # is *.pt strip_optimizer(f2) if ispt else None # strip optimizer os.system('gsutil cp %s gs://%s/weights' % ( f2, opt.bucket)) if opt.bucket and ispt else None # upload # Finish if not opt.evolve: plot_results(save_dir=log_dir) # save as results.png print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) dist.destroy_process_group() if rank not in [-1, 0] else None torch.cuda.empty_cache() return results
os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists for _ in range(100): # generations to evolve if os.path.exists( 'evolve.txt' ): # if evolve.txt exists: select best hyps and mutate # Select parent(s) parent = 'single' # parent selection method: 'single' or 'weighted' x = np.loadtxt('evolve.txt', ndmin=2) n = min(5, len(x)) # number of previous results to consider x = x[np.argsort(-fitness(x))][:n] # top n mutations w = fitness(x) - fitness(x).min() # weights if parent == 'single' or len(x) == 1: # x = x[random.randint(0, n - 1)] # random selection x = x[random.choices(range(n), weights=w)[0]] # weighted selection elif parent == 'weighted': x = (x * w.reshape( n, 1)).sum(0) / w.sum() # weighted combination # Mutate mp, s = 0.9, 0.2 # mutation probability, sigma npr = np.random npr.seed(int(time.time())) g = np.array([x[0] for x in meta.values()]) # gains 0-1 ng = len(meta) v = np.ones(ng) while all( v == 1 ): # mutate until a change occurs (prevent duplicates) v = (g * (npr.random(ng) < mp) * npr.randn(ng) *
def select_by_tournament(solutions: List[Solution]) -> Solution: selected = random.choices(solutions, k=TOURNAMENT_SIZE) best_res = best_from_list(selected) return best_res
def auto_gesture(self): self.gestures = [Gestures("rock"), Gestures("paper"), Gestures("scissors"), Gestures("lizard"), Gestures("spock")] self.random = (random.choices()) print(self.random)
async def PageVisitSpout(): url = random.choices(UrlsList) id = random.choices(UsersList) await page_visits.send(key=url, value=PageVisit(url, id))
def select_by_roulette(solutions: List[Solution]) -> Solution: selected = random.choices(solutions, weights=[s.score for s in solutions], k=TOURNAMENT_SIZE) best_res = best_from_list(selected) return best_res
def generate_random_string(string_length: int): return ''.join( random.choices(string.ascii_uppercase + string.digits, k=string_length))
from random import random for i in range(1, 11): sample = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] print("Attempt", i, ":", random.choices(sample))
# fixed normal noise that is used to generate fake images after sometimes fixed_noise = torch.randn((32, z_feautures, 1, 1), device=device) writer = SummaryWriter(f"./data/run/{exp_name}") # images for visualising embeddings made by the discriminator special_batch = { 'imgs': [], 'labels': [], 'features': [], } per_class_sample = 10 for label in triplet_mnist_dataset.classes: special_batch['imgs'].extend( random.choices(triplet_mnist_dataset.labels2images[label], k=per_class_sample)) special_batch['labels'].extend([label for _ in range(per_class_sample)]) special_batch['imgs'] = torch.cat(special_batch['imgs']).reshape( -1, 1, image_size, image_size) """ TRAINING GOING TO START """ visualisation_imgs = [ ] # stores visulization of model at the end of each x steps generator_losses = [] discriminator_losses = [] steps = 1 for epoch in range(num_epochs): for i, (anchor_img, positive_img, negative_img) in enumerate(triplet_mnist_dataloader):
def pickIndex(self) -> int: return random.choices(range(len(self.w)), weights=self.w)[0]
def get_random_string(): res = ''.join(random.choices(string.ascii_lowercase, k=8)) print(res) return str(res)
def random_filename(): # There are better ways to do this alphabet = string.ascii_lowercase + string.digits return ''.join(random.choices(alphabet, k=8))