init_epoch = 0 # In[6]: os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(0) # TensorFlow wizardry config = tf.ConfigProto() # Don't pre-allocate memory; allocate as-needed config.gpu_options.allow_growth = True # Only allow a total of half the GPU memory to be allocated config.gpu_options.per_process_gpu_memory_fraction = 1.0 # Create a session with the above options specified. k.tensorflow_backend.set_session(tf.Session(config=config)) xnet = HourglassNet(num_classes=16, num_stacks=num_stacks, num_channels=256, inres=(256, 256),outres=(64, 64)) if resume: xnet.resume_train(batch_size=batch_size, model_path=model_path, init_epoch=init_epoch, epochs=args.epochs) else: xnet.build_model(show=True) xnet.train(epochs=epochs, model_path=model_path, batch_size=batch_size)
# -k.set_session(tf.Session(config=config)) tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(config=config))""" if args.tiny: xnet = HourglassNet(num_classes=16, num_stacks=args.num_stack, num_channels=128, inres=(192, 192), outres=(48, 48)) else: xnet = HourglassNet(num_classes=16, num_stacks=args.num_stack, num_channels=256, inres=(256, 256), outres=(64, 64)) if args.resume: xnet.resume_train(batch_size=args.batch_size, model_json=args.resume_model_json, model_weights=args.resume_model, init_epoch=args.init_epoch, epochs=epochs) else: xnet.build_model(mobile=args.mobile, show=True) # NameError: name 'epochs' is not defined # -xnet.train(epochs=epochs, model_path=model_path, batch_size=batch_size) xnet.train(epochs=args.epochs, model_path=args.model_path, batch_size=args.batch_size)
image_aug_str=args.augment, pickle_name=args.pickle, optimizer_str=args.optimizer, learning_rate=args.learning_rate, activation_str=args.activation) training_start = time.time() # TODO Save all model parameters in JSON for easy resuming and parsing later on if args.resume: print("\n\nResume training start: {}\n".format(time.ctime())) hgnet.resume_train(args.batch, args.model_save, args.resume_json, args.resume_weights, \ args.resume_epoch, args.epochs, args.resume_subdir, args.subset, new_run=args.resume_with_new_run) else: hgnet.build_model(show=True) print("\n\nTraining start: {}\n".format(time.ctime())) print("Hourglass blocks: {:2d}, epochs: {:3d}, batch size: {:2d}, subset: {:.2f}".format(\ args.hourglass, args.epochs, args.batch, args.subset)) hgnet.train(args.batch, args.model_save, args.epochs, args.subset, args.notes) print("\n\nTraining end: {}\n".format(time.ctime())) training_end = time.time() setup_time = training_start - setup_start training_time = training_end - training_start
# Only allow a total of half the GPU memory to be allocated config.gpu_options.per_process_gpu_memory_fraction = 1.0 # Create a session with the above options specified. tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(config=config)) if args.tiny: xnet = HourglassNet(num_classes=16, num_stacks=args.num_stack, num_channels=128, inres=(192, 192), outres=(48, 48)) else: xnet = HourglassNet(num_classes=11, num_stacks=args.num_stack, num_channels=16, inres=(256, 256), outres=(64, 64)) if args.resume: xnet.resume_train(batch_size=args.batch_size, model_json=args.resume_model_json, model_weights=args.resume_model, init_epoch=args.init_epoch, epochs=args.epochs) else: xnet.build_model(mobile=args.mobile, show=False) xnet.train(epochs=args.epochs, model_path=args.model_path, batch_size=args.batch_size)