def build_training_model(config): # set device device_opt = caffe2_pb2.DeviceOption() if config['gpu_id'] is not None: device_opt.device_type = caffe2_pb2.CUDA device_opt.cuda_gpu_id = config['gpu_id'] # build model with core.DeviceScope(device_opt): training_model = model_helper.ModelHelper( name = '{}_training_model'.format(config['name']), ) data, label = add_input(training_model, config, is_test=False) softmax = add_model(training_model, data, config) softmax_loss = add_softmax_loss(training_model, softmax, label) add_training_operators(training_model, config, softmax_loss) acc, acc5 = add_accuracy(training_model, softmax, label) # init workspace for training net workspace.RunNetOnce(training_model.param_init_net) # if in finetune mode, we need to load pretrained weights and bias if config['finetune']: load_init_net(config['network']['init_net'], device_opt) workspace.CreateNet(training_model.net) return training_model
def build_validation_model(config): # set device device_opt = caffe2_pb2.DeviceOption() if config['gpu_id'] is not None: device_opt.device_type = caffe2_pb2.CUDA device_opt.cuda_gpu_id = config['gpu_id'] # build model with core.DeviceScope(device_opt): validation_model = model_helper.ModelHelper( name='{}_validation_model'.format(config['name']), init_params=False, ) data, label = add_input(validation_model, config, is_test=True) pred = add_model_all(validation_model, config, data, is_test=True) add_softmax_loss(validation_model, pred, label) add_accuracy(validation_model) # init workspace for validation net workspace.RunNetOnce(validation_model.param_init_net) workspace.CreateNet(validation_model.net) return validation_model
def create_target_model_ops(model, loss_scale): initializer = (PseudoFP16Initializer if args.dtype == 'float16' else Initializer) with brew.arg_scope([brew.conv, brew.fc], WeightInitializer=initializer, BiasInitializer=initializer, enable_tensor_core=args.enable_tensor_core, float16_compute=args.float16_compute): pred = add_se_model(model, model_config, "data", is_test=False) if args.dtype == 'float16': pred = model.net.HalfToFloat(pred, pred + '_fp32') loss = add_softmax_loss(model, pred, 'label') brew.accuracy(model, ['softmax', 'label'], 'accuracy') return [loss]