def import_project_from_xml(project, root, filename): loader = NewIdLoader(project) if root.find("configuration") is not None: load_configuration(root.find("configuration"), project, loader) for e in root.findall("net"): net = load_net(e, project, loader) project.add_net(net) project.id_counter += 1 return project
def load_project_from_xml(root, filename): target_env_name = root.get("target_env") if target_env_name is None: # For backward compatability target_env_name = root.get("extenv", "C++") project = create_project(filename, target_env_name) project.library_rpc = utils.xml_bool(root, "library-rpc", False) project.library_octave = utils.xml_bool(root, "library-octave", False) loader = BasicLoader(project) if root.find("configuration") is not None: load_configuration(root.find("configuration"), project, loader) for e in root.findall("net"): project.add_net(load_net(e, project, loader)) assert project.nets for e in root.findall("sequence"): project.sequences.append(controlseq.ControlSequence(element=e)) project.build_net = project.nets[0] project.id_counter += 1 return project
load_dir = data_path print("Data from: {}".format(load_dir)) load_path = load_dir + '/{}/train/0'.format(group_size) save_path = main_path + '/{}'.format(args.name) if not os.path.exists(save_path): os.makedirs(save_path) ###################### # initialization if gpu_num > 1: model = nn.DataParallel( net.load_net(model_name, 1, out_channels, start_filts, depth, img_side, num_cn, split, kernel_size, num_global_control)).to(args.device) criterion = nn.DataParallel(net.criterion(degree=degree)).to(args.device) regularizer = nn.DataParallel(net.regularizer()).to(args.device) print("Assigned {} GPUs".format(gpu_num)) else: model = net.load_net(model_name, 1, out_channels, start_filts, depth, img_side, num_cn, split, kernel_size, num_global_control).to(args.device) criterion = net.criterion(degree=degree).to(args.device) regularizer = net.regularizer().to(args.device) print("Assigned on {}".format(args.device)) print('network contains {} parameters'.format( net.count_parameters(model))) # parameter number time.sleep(2)
args.num_classes_iter) + '_' + str(args.num_elements_class) + '_' + str(args.num_labeled_points_class) + '_' + str(args.scaling_loss) batch_size = args.num_classes_iter * args.num_elements_class device = 'cuda:0' # create folders where we save the trained nets and we put the results save_folder_nets = 'save_trained_nets' save_folder_results = 'save_results' if not os.path.exists(save_folder_nets): os.makedirs(save_folder_nets) if not os.path.exists(save_folder_results): os.makedirs(save_folder_results) # load the pre-trained model = net.load_net(dataset=args.dataset_name, net_type=args.net_type, nb_classes=args.nb_classes) # define the loss and optimizer and put them to cuda model = model.to(device) gtg = gtg.GTG(args.nb_classes, max_iter=args.num_iter_gtg, device=device).to(device) opt = RAdam([{'params': list(set(model.parameters())), 'lr': args.lr_net}], weight_decay=args.weight_decay) criterion = nn.NLLLoss().to(device) criterion2 = nn.CrossEntropyLoss().to(device) # do training in mixed precision if args.is_apex: model, opt = amp.initialize(model, opt, opt_level="O1") # create loaders dl_tr, dl_ev, _, _ = data_utility.create_loaders(args.cub_root, args.nb_classes, args.cub_is_extracted, args.nb_workers,