def init_env(self, server_config, args): self.hostname = args.hostname # update server_config helm_path = os.path.join(os.getcwd(), "../milvus-helm/milvus") server_config_file = helm_path + "/ci/config/sqlite/%s/server_config.yaml" % ( args.image_type) if not os.path.exists(server_config_file): raise Exception("File %s not existed" % server_config_file) if server_config: logger.debug("Update server config") utils.update_server_config(server_config_file, server_config) # update log_config log_config_file = helm_path + "/config/log_config.conf" if not os.path.exists(log_config_file): raise Exception("File %s not existed" % log_config_file) src_log_config_file = helm_path + "/config/log_config.conf.src" if not os.path.exists(src_log_config_file): # copy os.system("cp %s %s" % (log_config_file, src_log_config_file)) else: # reset os.system("cp %s %s" % (src_log_config_file, log_config_file)) if "db_config.primary_path" in server_config: os.system("sed -i 's#%s#%s#g' %s" % (default_path, server_config["db_config.primary_path"], log_config_file)) # with open(log_config_file, "r+") as fd: # for line in fd.readlines(): # fd.write(re.sub(r'^%s' % default_path, server_config["db_config.primary_path"], line)) # update values values_file_path = helm_path + "/values.yaml" if not os.path.exists(values_file_path): raise Exception("File %s not existed" % values_file_path) utils.update_values(values_file_path, args.hostname) try: logger.debug("Start install server") self.host, self.ip = utils.helm_install_server( helm_path, args.image_tag, args.image_type, self.name, namespace) except Exception as e: logger.error("Helm install server failed: %s" % str(e)) logger.error(traceback.format_exc()) self.clean_up() return False # for debugging # self.host = "192.168.1.101" if not self.host: logger.error("Helm install server failed") self.clean_up() return False return True
def test_create_learning_path(self): solution_list = create_list(sample_solution) student_tests = create_list(student_test) domain_orders = create_list(domain_order) domain_order_dict = create_domain_dict(domain_orders) student_list_dict = student_setup(update_values(student_tests)) #Check to see that all student results are in solution set self.assertEqual(all(create_learning_path(domain_order_dict, student) in solution_list for student in student_list_dict), True)
def test_update_values(self): student_grades = [ ['Student Name', 'RF', 'RL', 'RI', 'L'], ['Alex Trebek', '2', '3', 'K', '3'], ['Sinbad', '3', 'K', '3', '3'], ['Mark Martinez', 'K', 'K', 'K', '2'] ] update_student_grades = update_values(student_grades) #Check if function output is a list self.assertEqual(isinstance(update_student_grades, list), True) #Check if all eligible items were converted to integers self.assertEqual(all(isinstance(item, int) for item in update_student_grades[3][1:]), True)
def init_env(self, server_config, server_host, image_type, image_tag): self.hostname = server_host # update values helm_path = os.path.join(os.getcwd(), "../milvus-helm") values_file_path = helm_path+"/values.yaml" if not os.path.exists(values_file_path): raise Exception("File %s not existed" % values_file_path) utils.update_values(values_file_path, server_host, server_config) try: logger.debug("Start install server") self.host, self.ip = utils.helm_install_server(helm_path, image_tag, image_type, self.name, namespace) except Exception as e: logger.error("Helm install server failed: %s" % str(e)) logger.error(traceback.format_exc()) self.clean_up() return False # for debugging # self.host = "192.168.1.101" if not self.host: logger.error("Helm install server failed") self.clean_up() return False return True
def index(request): if request.method == 'POST': domain_order = request.FILES['domainorder'] student_test = request.FILES['studenttests'] student_test_list = create_list(student_test) domain_order_list = create_list(domain_order) domain_order = create_domain_dict(domain_order_list) student_list = student_setup(update_values(student_test_list)) student_learning_paths = [] for student in student_list: student_order = create_learning_path(domain_order, student) student_learning_paths.append(student_order) html_table = create_html_table(student_learning_paths) context = {'myhtml': html_table} return JsonResponse(context) return render(request, 'index.html')
def main(): # Hyper Parameters setting parser = argparse.ArgumentParser() parser.add_argument('--data_path', default='/mnt/data/linkaiyi/scan/data/f30k_precomp', help='path to datasets') parser.add_argument('--path_opt', default='option/FusionNoattn_baseline.yaml', type=str, help='path to a yaml options file') parser.add_argument('--data_name', default='flickr30k_splits', help='{coco,f30k}_splits') parser.add_argument('--logger_name', default='./log_2', help='Path to save Tensorboard log.') parser.add_argument( '--vocab_path', default= '/home/linkaiyi/fusion_wangtan/Fusion_flickr/Fusion_10.28/vocab', help='Path to saved vocabulary json files.') parser.add_argument( '--model_name', default='/mnt/data/linkaiyi/mscoco/fusion/Fusion_flic/runs/checkpoint', help='Path to save the model.') parser.add_argument('--num_epochs', default=120, type=int, help='Number of training epochs.') parser.add_argument('--batch_size', default=128, type=int, help='Size of a training mini-batch.') parser.add_argument('--workers', default=2, type=int, help='Number of data loader workers.') parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--lr_update', default=20, type=int, help='Number of epochs to update the learning rate.') opt = parser.parse_args() if os.path.isdir(opt.logger_name): if click.confirm('Logs directory already exists in {}. Erase?'.format( opt.logger_name, default=False)): os.system('rm -r ' + opt.logger_name) tb_logger.configure(opt.logger_name, flush_secs=5) logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) ######################################################################################### # Create options ######################################################################################### options = {'logs': {}, 'coco': {}, 'model': {'seq2vec': {}}, 'optim': {}} if opt.path_opt is not None: with open(opt.path_opt, 'r') as handle: options_yaml = yaml.load(handle) options = utils.update_values(options, options_yaml) vocab = deserialize_vocab( os.path.join(opt.vocab_path, '%s_vocab.json' % opt.data_name)) vocab_word = sorted(vocab.word2idx.items(), key=lambda x: x[1], reverse=False) vocab_word = [tup[0] for tup in vocab_word] opt.vocab_size = len(vocab) # Create dataset, model, criterion and optimizer train_loader, val_loader = data.get_loaders(opt.data_path, vocab, opt.batch_size, opt.workers, opt) model = models.factory(options['model'], vocab_word, cuda=True, data_parallel=False) criterion = nn.CrossEntropyLoss(weight=torch.Tensor([1, 128])).cuda() optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=options['optim']['lr']) print('Model has {} parameters'.format(utils.params_count(model))) # optionally resume from a checkpoint if opt.resume: if os.path.isfile(opt.resume): print("=> loading checkpoint '{}'".format(opt.resume)) checkpoint = torch.load(opt.resume) start_epoch = checkpoint['epoch'] best_rsum = checkpoint['best_rsum'] model.load_state_dict(checkpoint['model']) # Eiters is used to show logs as the continuation of another # training model.Eiters = checkpoint['Eiters'] print("=> loaded checkpoint '{}' (epoch {}, best_rsum {})".format( opt.resume, start_epoch, best_rsum)) engine.validate(val_loader, model, criterion, optimizer, opt.batch_size) else: print("=> no checkpoint found at '{}'".format(opt.resume)) else: start_epoch = 0 # Train the Model best_rsum = 0 for epoch in range(start_epoch, opt.num_epochs): adjust_learning_rate(opt, options, optimizer, epoch) # train for one epoch engine.train(train_loader, model, criterion, optimizer, epoch, print_freq=10) # evaluate on validation set rsum = engine.validate(val_loader, model, criterion, optimizer, opt.batch_size) is_best = rsum > best_rsum best_rsum = max(rsum, best_rsum) if not os.path.exists(opt.model_name): os.mkdir(opt.model_name) save_checkpoint( { 'epoch': epoch + 1, 'arch': 'baseline', 'model': model.state_dict(), 'best_rsum': best_rsum, 'opt': opt, 'options': options, 'Eiters': model.Eiters, }, is_best, filename='checkpoint_{}{}.pth.tar'.format(epoch, best_rsum), prefix=opt.model_name + '/')
def build_img_discriminator(opts, vocab): d_kwargs = deepcopy(opts["generic"]) d_kwargs = update_values(opts["image"], d_kwargs) discriminator = models.PatchDiscriminator(**d_kwargs) return discriminator, d_kwargs
def build_obj_discriminator(opts, vocab): d_kwargs = deepcopy(opts["generic"]) d_kwargs = update_values(opts["object"], d_kwargs) d_kwargs["vocab"] = vocab discriminator = models.AcCropDiscriminator(**d_kwargs) return discriminator, d_kwargs