def get_file_options(opt, files): """Get the dictionnary of the first existing file in files if opt is None, else the value of the option 'opt'.""" res = {} options_file = rutils.get_first_existing_file(files) if options_file is not None: reader.read_config(options_file, {}, res) if opt is not None: res = res.get(opt,{}) return res
def infer(args): batch_size = args.batch_size items_num = reader.read_config(args.config_path) test_data = reader.Data(args.test_path, False) place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) loss, acc, py_reader, feed_datas, logits = network.network( items_num, args.hidden_size, args.step, args.batch_size) exe.run(fluid.default_startup_program()) [infer_program, feeded_var_names, target_var] = fluid.io.load_inference_model(dirname=args.model_path, executor=exe) feed_list = [e.name for e in feed_datas] print(feed_list, type(target_var[0]), type(logits)) infer_reader = test_data.reader(batch_size, batch_size * 20, False) feeder = fluid.DataFeeder(place=place, feed_list=feed_list) for iter, data in enumerate(infer_reader()): res = exe.run(infer_program, feed=feeder.feed(data), fetch_list=[logits]) #logits = res #print('session:', data, 'label:',np.argmax(logits)) print("@@@, ", res) print("!!!,", logits) if iter == 0: break
def __init__(self): self._is_running = False self._current_state = 0 self._state_start_time = 0 self._state_aux_var = 0 self._station = None self._mqtt_client = None self._mqtt_recently_ok = False self._last_ip_report = True self.autos_mqtt_topics = { } # {Topic: [handlers, ]} for objects framework automatically handles e.g. MqttSwitch self.autos_with_loop = [] self.autos_full_list = [] self.conf = read_config() api.app = self
def infer_wrong(args): batch_size = args.batch_size items_num = reader.read_config(args.config_path) test_data = reader.Data(args.test_path, False) place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) loss, acc, py_reader, feed_datas = network.network(items_num, args.hidden_size, args.step, batch_size) exe.run(fluid.default_startup_program()) infer_program = fluid.default_main_program().clone(for_test=True) for epoch_num in range(args.start_index, args.last_index + 1): model_path = os.path.join(args.model_path, "epoch_" + str(epoch_num)) try: if not os.path.exists(model_path): raise ValueError() fluid.io.load_persistables(executor=exe, dirname=model_path, main_program=infer_program) loss_sum = 0.0 acc_sum = 0.0 count = 0 py_reader.set_sample_list_generator( test_data.reader(batch_size, batch_size * 20, False)) py_reader.start() try: while True: res = exe.run(infer_program, fetch_list=[loss.name, acc.name], use_program_cache=True) loss_sum += res[0] acc_sum += res[1] count += 1 except fluid.core.EOFException: py_reader.reset() logger.info("TEST --> loss: %.4lf, Recall@20: %.4lf" % (loss_sum / count, acc_sum / count)) except ValueError as e: logger.info("TEST --> error: there is no model in " + model_path)
def main(argv=''): args = get_args(argv) if not args.poscar_only: config = read_config(args.config) else: config = dict() # TODO: add options for gen_midpoints opts = dict() slab = read(args.slab, format=args.format) logger.info(f'{args.slab} Atoms object created') adsorbate = get_adsorbate(args.ads) logger.info(f'{args.ads} Atoms object created') if args.poscar_only: state = write_all(slab, adsorbate, **opts) else: state = run_all(slab, adsorbate, config, **opts) print(state)
def train(): args = parse_args() if args.enable_ce: SEED = 102 fluid.default_main_program().random_seed = SEED fluid.default_startup_program().random_seed = SEED batch_size = args.batch_size items_num = reader.read_config(args.config_path) loss, acc, py_reader, feed_datas, _ = network.network(items_num, args.hidden_size, args.step, batch_size) data_reader = reader.Data(args.train_path, True) logger.info("load data complete") use_cuda = True if args.use_cuda else False use_parallel = True if args.use_parallel else False place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) step_per_epoch = data_reader.length // batch_size optimizer = fluid.optimizer.Adam( learning_rate=fluid.layers.exponential_decay( learning_rate=args.lr, decay_steps=step_per_epoch * args.lr_dc_step, decay_rate=args.lr_dc), regularization=fluid.regularizer.L2DecayRegularizer( regularization_coeff=args.l2)) optimizer.minimize(loss) exe.run(fluid.default_startup_program()) all_vocab = fluid.global_scope().var("all_vocab").get_tensor() all_vocab.set( np.arange(1, items_num).astype("int64").reshape((-1)), place) feed_list = [e.name for e in feed_datas] if use_parallel: train_exe = fluid.ParallelExecutor( use_cuda=use_cuda, loss_name=loss.name) else: train_exe = exe logger.info("begin train") total_time = [] ce_info = [] start_time = time.time() loss_sum = 0.0 acc_sum = 0.0 global_step = 0 PRINT_STEP = 500 #py_reader.decorate_paddle_reader(data_reader.reader(batch_size, batch_size * 20, True)) py_reader.set_sample_list_generator(data_reader.reader(batch_size, batch_size * 20, True)) for i in range(args.epoch_num): epoch_sum = [] py_reader.start() try: while True: res = train_exe.run(fetch_list=[loss.name, acc.name]) loss_sum += res[0].mean() acc_sum += res[1].mean() epoch_sum.append(res[0].mean()) global_step += 1 if global_step % PRINT_STEP == 0: ce_info.append([loss_sum / PRINT_STEP, acc_sum / PRINT_STEP]) total_time.append(time.time() - start_time) logger.info("global_step: %d, loss: %.4lf, train_acc: %.4lf" % ( global_step, loss_sum / PRINT_STEP, acc_sum / PRINT_STEP)) loss_sum = 0.0 acc_sum = 0.0 start_time = time.time() except fluid.core.EOFException: py_reader.reset() logger.info("epoch loss: %.4lf" % (np.mean(epoch_sum))) save_dir = os.path.join(args.model_path, "epoch_" + str(i)) fetch_vars = [loss, acc] fluid.io.save_inference_model(save_dir, feed_list, fetch_vars, exe) logger.info("model saved in " + save_dir) # only for ce if args.enable_ce: gpu_num = get_cards(args) ce_loss = 0 ce_acc = 0 ce_time = 0 try: ce_loss = ce_info[-1][0] ce_acc = ce_info[-1][1] ce_time = total_time[-1] except: print("ce info error") print("kpis\teach_pass_duration_card%s\t%s" % (gpu_num, ce_time)) print("kpis\ttrain_loss_card%s\t%f" % (gpu_num, ce_loss)) print("kpis\ttrain_acc_card%s\t%f" % (gpu_num, ce_acc))