Example #1
0
    def create_resnet50_model_ops(model, loss_scale):
        initializer = (pFP16Initializer if args.dtype == 'float16'
                       else Initializer)

        with brew.arg_scope([brew.conv, brew.fc],
                            WeightInitializer=initializer,
                            BiasInitializer=initializer,
                            enable_tensor_core=args.enable_tensor_core):
            pred = resnet.create_resnet50(
                model,
                "data",
                num_input_channels=args.num_channels,
                num_labels=args.num_labels,
                no_bias=True,
                no_loss=True,
            )

        if args.dtype == 'float16':
            pred = model.net.HalfToFloat(pred, pred + '_fp32')

        softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
                                              ['softmax', 'loss'])
        loss = model.Scale(loss, scale=loss_scale)
        brew.accuracy(model, [softmax, "label"], "accuracy")
        return [loss]
Example #2
0
    def create_resnext_model_ops(model, loss_scale):
        initializer = (PseudoFP16Initializer
                       if args.dtype == 'float16' else Initializer)

        with brew.arg_scope([brew.conv, brew.fc],
                            WeightInitializer=initializer,
                            BiasInitializer=initializer,
                            enable_tensor_core=args.enable_tensor_core,
                            float16_compute=args.float16_compute):
            pred = resnet.create_resnext(
                model,
                "data",
                num_input_channels=args.num_channels,
                num_labels=args.num_labels,
                num_layers=args.num_layers,
                num_groups=args.resnext_num_groups,
                num_width_per_group=args.resnext_width_per_group,
                no_bias=True,
                no_loss=True,
            )

        if args.dtype == 'float16':
            pred = model.net.HalfToFloat(pred, pred + '_fp32')

        softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
                                              ['softmax', 'loss'])
        loss = model.Scale(loss, scale=loss_scale)
        brew.accuracy(model, [softmax, "label"], "accuracy", top_k=1)
        brew.accuracy(model, [softmax, "label"], "accuracy_top5", top_k=5)
        return [loss]
Example #3
0
    def create_model(model, loss_scale):
        initializer = (PseudoFP16Initializer
                       if args.data_type == 'float16' else Initializer)

        with brew.arg_scope([brew.conv, brew.fc],
                            WeightInitializer=initializer,
                            BiasInitializer=initializer,
                            enable_tensor_core=False,
                            float16_compute=False):
            pred = resnet.create_resnet50(
                model,
                "data",
                num_input_channels=args.channels,
                num_labels=args.num_labels,
                # num_groups=args.resnext_num_groups,
                # num_width_per_group=args.resnext_width_per_group,
                no_bias=True,
                no_loss=True)

        # If we're using float on 2B, then inflate to the 4B representation
        if args.data_type == 'float16':
            pred = model.net.HalfToFloat(pred, pred + '_fp32')

        # Compute the softmax probabilities and the loss
        softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
                                              ['softmax', 'loss'])

        # Noralize the loss, and compute the top_k accuracies for k \in {1, 5}
        loss = model.Scale(loss, scale=loss_scale)
        brew.accuracy(model, [softmax, "label"], "accuracy", top_k=1)
        brew.accuracy(model, [softmax, "label"], "accuracy_top5", top_k=5)
        return [loss]
Example #4
0
    def create_resnet50_model_ops(model, loss_scale):
        initializer = (pFP16Initializer if args.dtype == 'float16'
                       else Initializer)

        with brew.arg_scope([brew.conv, brew.fc],
                            WeightInitializer=initializer,
                            BiasInitializer=initializer,
                            enable_tensor_core=args.enable_tensor_core):
            pred = resnet.create_resnet50(
                model,
                "data",
                num_input_channels=args.num_channels,
                num_labels=args.num_labels,
                no_bias=True,
                no_loss=True,
            )

        if args.dtype == 'float16':
            pred = model.net.HalfToFloat(pred, pred + '_fp32')

        softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
                                              ['softmax', 'loss'])
        loss = model.Scale(loss, scale=loss_scale)
        brew.accuracy(model, [softmax, "label"], "accuracy")
        return [loss]
Example #5
0
def Resnext101(model, loss_scale, dtype='float'):
    initializer = (PseudoFP16Initializer
                   if dtype == 'float16' else Initializer)
    with brew.arg_scope(
        [brew.conv, brew.fc],
            WeightInitializer=initializer,
            BiasInitializer=initializer,
    ):
        # residual network
        pred = resnet.create_resnext(
            model,
            "data",
            num_input_channels=3,
            num_labels=1000,
            label="label",
            num_layers=101,
            num_groups=32,
            num_width_per_group=4,
            no_bias=True,
            no_loss=True,
        )
        if dtype == 'float16':
            pred = model.net.HalfToFloat(pred, pred + '_fp32')

        softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
                                              ['softmax', 'loss'])
        prefix = model.net.Proto().name
        loss = model.net.Scale(loss, prefix + "_loss", scale=loss_scale)
        brew.accuracy(model, [softmax, "label"], prefix + "_accuracy")
        return [loss]
Example #6
0
    def create_inception_model_ops(model, loss_scale=1.0):
        [softmax,
         loss] = inceptionv4.create_Inceptionv4(model, "data", num_labels,
                                                "label")

        prefix = model.net.Proto().name
        loss = model.net.Scale(loss, prefix + "_loss", scale=loss_scale)
        brew.accuracy(model, [softmax, "label"], prefix + "_accuracy")
        return [loss]
def add_training_operators(softmax, model, device_opts) :

    with core.DeviceScope(device_opts):
        xent = model.LabelCrossEntropy([softmax, "label"], 'xent')
        loss = model.AveragedLoss(xent, "loss")
        brew.accuracy(model, [softmax, "label"], "accuracy")

        model.AddGradientOperators([loss])
        opt = optimizer.build_sgd(model, base_learning_rate=0.01, policy="step", stepsize=1, gamma=0.999)  # , momentum=0.9
 def add_accuracy(self, model, output, label, device_opts, eval_metric):
     with core.DeviceScope(device_opts):
         if eval_metric == 'accuracy':
             accuracy = brew.accuracy(model, [output, label], "accuracy")
         elif eval_metric == 'top_k_accuracy':
             accuracy = brew.accuracy(model, [output, label],
                                      "accuracy",
                                      top_k=3)
         return accuracy
Example #9
0
 def create_mobilenet_model_ops(model, loss_scale):
     [softmax, loss] = mobilenet.create_mobilenet(
         model,
         "data",
         num_input_channels=args.num_channels,
         num_labels=args.num_labels,
         label="label")
     # loss = model.Scale(loss, scale=loss_scale)
     brew.accuracy(model, [softmax, "label"], "accuracy")
Example #10
0
 def create_resnet50_model_ops(model, loss_scale=1.0):
     # residual network
     [softmax, loss] = resnet.create_resnet50(model,
                                              "data",
                                              num_input_channels=3,
                                              num_labels=num_labels,
                                              label="label",
                                              no_bias=True, )
     prefix = model.net.Proto().name
     loss = model.net.Scale(loss, prefix + "_loss", scale=loss_scale)
     brew.accuracy(model, [softmax, "label"], prefix + "_accuracy")
     return [loss]
Example #11
0
def AddForwardPassOps(model):
    """Add forward pass ops and return a list of losses."""
    conv1 = brew.conv(model, 'data', 'conv1', 1, 20, 5)
    pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
    conv2 = brew.conv(model, pool1, 'conv2', 20, 50, 5)
    pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
    fc3 = brew.fc(model, pool2, 'fc3', 50 * 4 * 4, 500)
    fc3 = brew.relu(model, fc3, fc3)
    pred = brew.fc(model, fc3, 'pred', 500, 10)
    softmax, loss = model.SoftmaxWithLoss([pred, 'label'], ['softmax', 'loss'])
    brew.accuracy(model, [softmax, 'label'], 'accuracy')
    return [loss]
Example #12
0
 def create_resnet50_model_ops(model, loss_scale):
     [softmax, loss] = resnet.create_resnet50(
         model,
         "data",
         num_input_channels=args.num_channels,
         num_labels=args.num_labels,
         label="label",
         no_bias=True,
     )
     loss = model.Scale(loss, scale=loss_scale)
     brew.accuracy(model, [softmax, "label"], "accuracy")
     return [loss]
Example #13
0
def create_resnet50_model_ops(model, loss_scale=1.0):
    # Creates a residual network
    [softmax, loss] = resnet.create_resnet50(
        model,
        "data",
        num_input_channels=3,
        num_labels=num_labels,
        label="label",
    )
    prefix = model.net.Proto().name
    loss = model.net.Scale(loss, prefix + "_loss", scale=loss_scale)
    brew.accuracy(model, [softmax, "label"], prefix + "_accuracy")
    return [loss]
Example #14
0
    def create_target_model_ops(model, loss_scale):
        initializer = (PseudoFP16Initializer if args.dtype == 'float16'
                       else Initializer)
        with brew.arg_scope([brew.conv, brew.fc],
                            WeightInitializer=initializer,
                            BiasInitializer=initializer,
                            enable_tensor_core=args.enable_tensor_core,
                            float16_compute=args.float16_compute):
            pred = add_se_model(model, model_config, "data", is_test=False)

        if args.dtype == 'float16':
            pred = model.net.HalfToFloat(pred, pred + '_fp32')

        loss = add_softmax_loss(model, pred, 'label')
        brew.accuracy(model, ['softmax', 'label'], 'accuracy')
        return [loss]
Example #15
0
def add_accuracy(model, softmax, label):
    """ compute model classification accuracy """
    accuracy = brew.accuracy(model, [softmax, label], "accuracy")
    accuracy_5 = model.net.Accuracy(
        [softmax, label],
        "accuracy_5",
        top_k=5,
    )
    return (accuracy, accuracy_5)
Example #16
0
def AddTrainingOperators(model, softmax, label, device_opts):
    with core.DeviceScope(device_opts):
        xent = model.LabelCrossEntropy([softmax, label], 'xent')
        # Compute the expected loss
        loss = model.AveragedLoss(xent, "loss")
        brew.accuracy(model, [softmax, label], "accuracy")
        # Use the average loss we just computed to add gradient operators to the model
        model.AddGradientOperators([loss])
        # Use SGD optimizer
        optimizer.build_sgd(
            model,
            base_learning_rate=0.1,
            weight_decay=1e-5,
            gamma=0.999, 
            policy='step', 
            stepsize=50,
            nesterov=1,
        )
Example #17
0
def AddForwardPassOps(model):
    """Add forward pass ops and return a list of losses."""
    with brew.arg_scope([brew.conv, brew.fc],
                        WeightInitializer=pFP16Initializer,
                        BiasInitializer=pFP16Initializer):
        conv1 = brew.conv(model, 'data', 'conv1', 1, 20, 5)
        pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
        conv2 = brew.conv(model, pool1, 'conv2', 20, 50, 5)
        pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
        fc3 = brew.fc(model, pool2, 'fc3', 50 * 4 * 4, 500)
        fc3 = brew.relu(model, fc3, fc3)
        pred = brew.fc(model, fc3, 'pred', 500, 10)

    # Cast back to fp32 for remaining ops
    pred = model.net.HalfToFloat(pred, pred + '_fp32')
    softmax, loss = model.SoftmaxWithLoss([pred, 'label'], ['softmax', 'loss'])
    brew.accuracy(model, [softmax, 'label'], 'accuracy')
    return [loss]
Example #18
0
    def create_resnet50_model_ops(model, loss_scale):
        with brew.arg_scope([brew.conv, brew.fc],
                            WeightInitializer=Initializer,
                            BiasInitializer=Initializer,
                            enable_tensor_core=0):
            pred = resnet.create_resnet50(
                model,
                "data",
                num_input_channels=num_channels,
                num_labels=num_labels,
                no_bias=True,
                no_loss=True,
            )

        softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
                                              ['softmax', 'loss'])
        loss = model.Scale(loss, scale=loss_scale)
        brew.accuracy(model, [softmax, "label"], "accuracy")
        return [loss]
Example #19
0
def AddAccuracy(model, softmax, label):
    '''模型准确率.
	参数:
		model: 模型结构
		softmax: 分类数据
		lable:图像标签
	返回:
		accuracy: 识别准确率
	'''
    accuracy = brew.accuracy(model, [softmax, label], 'accuracy')
    return accuracy
def AddLeNetModel(model, data, label=None, no_loss=False, embedding_size=2, class_num=10, margin=0):
    '''
    This part is the standard LeNet model: from data to the softmax prediction.
    For each convolutional layer we specify dim_in - number of input channels
    and dim_out - number or output channels. Also each Conv and MaxPool layer changes the
    image size. For example, kernel of size 5 reduces each side of an image by 4.

    While when we have kernel and stride sizes equal 2 in a MaxPool layer, it divides
    each side in half.
    '''
    # Image size: 28 x 28 -> 24 x 24
    conv1 = brew.conv(model, data, 'conv1', dim_in=1, dim_out=32, kernel=5)
    prelu1 = brew.relu(model, conv1, "prelu1")

    # Image size: 24 x 24 -> 12 x 12
    pool1 = brew.max_pool(model, prelu1, 'pool1', kernel=2, stride=2)

    # Image size: 12 x 12 -> 8 x 8
    conv2 = brew.conv(model, pool1, 'conv2', dim_in=32, dim_out=64, kernel=5)
    prelu2 = brew.relu(model, conv2, "prelu2")

    # Image size: 8 x 8 -> 4 x 4
    pool2 = brew.max_pool(model, prelu2, 'pool2', kernel=2, stride=2)
    # 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size
    fc3 = brew.fc(model, pool2, 'fc3', dim_in=64 * 4 * 4, dim_out=256)
    prelu3 = brew.relu(model, fc3, "prelu3")

    embedding = brew.fc(model, prelu3, 'embedding', 256, embedding_size)    
    if no_loss:
        return embedding

    if label is not None:
        output = brew.lsoftmax(model, [embedding, label], "fc4", embedding_size, class_num,
                                margin=margin,
                                base=float(10),#200
                                lambda_min=float(0)) #5
        fc4 = output[0]
        softmax, loss = model.SoftmaxWithLoss([fc4, label], ['softmax', 'loss'])
        accuracy = brew.accuracy(model, [softmax, label], "accuracy")

        return [loss, accuracy]
    else:
        fc4 = brew.fc(model, embedding, 'fc4', embedding_size, class_num)
        return brew.softmax(model, fc4, "softmax")
Example #21
0
def add_accuracy(model, softmax, device_opts):
    with core.DeviceScope(device_opts):
        accuracy = brew.accuracy(model, [softmax, "label"], "accuracy")
        return accuracy
Example #22
0
def AddAccuracy(model, softmax, label):
    accuracy = brew.accuracy(model, [softmax, label], "accuracy")
    return accuracy
Example #23
0
    # Load init and predict nets
    #
    test_model = model_helper.ModelHelper(name="test_model", arg_scope=arg_scope, init_params=False)
    data, _ = AddInputLayer(test_model, 1, TEST_LMDB, "lmdb")

    init_net_proto = caffe2_pb2.NetDef()
    with open(INIT_NET, "rb") as f:
        init_net_proto.ParseFromString(f.read())
    test_model.param_init_net = test_model.param_init_net.AppendNet(core.Net(init_net_proto))

    predict_net_proto = caffe2_pb2.NetDef()
    with open(PREDICT_NET, "rb") as f:
        predict_net_proto.ParseFromString(f.read())
    test_model.net = test_model.net.AppendNet(core.Net(predict_net_proto))

    accuracy = brew.accuracy(test_model, ["softmax", "label"], "accuracy")

    #
    # Test loop
    #
    workspace.RunNetOnce(test_model.param_init_net)
    workspace.CreateNet(test_model.net, overwrite=True)

    avg_accuracy = 0.0
    test_iters = 10000

    for i in range(test_iters):
        workspace.RunNet(test_model.net)
        acc = workspace.FetchBlob('accuracy')
        avg_accuracy += acc
        if (i+1) % 500 == 0:
Example #24
0
def AddAccuracy(model, softmax, label):
    """Adds an accuracy op to the model"""
    accuracy = brew.accuracy(model, [softmax, label], "accuracy")
    return accuracy
Example #25
0
 def Accuracy(self, *args, **kwargs):
     return brew.accuracy(self, *args, **kwargs)
def CivilNet(name, train_test_deplopy=0):
    arg_scope = {
        'order': 'NCHW',
        'use_cudnn': True,
        'cudnn_exhaustive_search': True,
        'ws_nbytes_limit': (64 * 1024 * 1024)
    }
    model = model_helper.ModelHelper(name=name, arg_scope=arg_scope)

    model._device_type = caffe2_pb2.CUDA
    model._device_prefix = "gpu"
    model._shared_model = False
    model._devices = [0]
    device_opt = core.DeviceOption(caffe2_pb2.CUDA, 0)

    #for deploy
    if train_test_deplopy == 2:
        with core.DeviceScope(device_opt):
            with core.NameScope("{}_{}".format(model._device_prefix, 0)):
                with brew.arg_scope([brew.conv, brew.fc],
                                    WeightInitializer=Initializer,
                                    BiasInitializer=Initializer,
                                    enable_tensor_core=False,
                                    float16_compute=False):
                    resnet.create_resnet50(model,
                                           "data",
                                           num_input_channels=3,
                                           num_labels=args.num_labels,
                                           no_bias=True,
                                           no_loss=False)
        workspace.RunNetOnce(model.param_init_net)
        workspace.CreateNet(model.net)
        return model

    reader_name = "reader" if train_test_deplopy == 0 else "test_reader"
    reader_data = args.train_data if train_test_deplopy == 0 else args.test_data
    reader = model.CreateDB(reader_name,
                            db=reader_data,
                            db_type='lmdb',
                            num_shards=1,
                            shard_id=0)

    is_test = True if train_test_deplopy == 1 else False
    loss = None
    with core.DeviceScope(device_opt):
        with core.NameScope("{}_{}".format(model._device_prefix, 0)):
            AddImageInput(model, reader, batch_size=32, is_test=is_test)
            with brew.arg_scope([brew.conv, brew.fc],
                                WeightInitializer=Initializer,
                                BiasInitializer=Initializer,
                                enable_tensor_core=False,
                                float16_compute=False):
                pred = resnet.create_resnet50(model,
                                              "data",
                                              num_input_channels=3,
                                              num_labels=args.num_labels,
                                              no_bias=True,
                                              no_loss=True)
            softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
                                                  ['softmax', 'loss'])
            brew.accuracy(model, [softmax, "label"], "accuracy")
    #for test
    if train_test_deplopy == 1:
        workspace.RunNetOnce(model.param_init_net)
        workspace.CreateNet(model.net)
        return model

    #for train
    loss_grad = {}
    losses_by_gpu = {}
    losses_by_gpu[0] = [loss]

    #add grad
    def create_grad(lossp):
        return model.ConstantFill(lossp, str(lossp) + "_grad", value=1.0)

    # Explicitly need to create gradients on GPU 0
    device = core.DeviceOption(model._device_type, 0)
    with core.DeviceScope(device):
        for l in losses_by_gpu[0]:
            lg = create_grad(l)
            loss_grad[str(l)] = str(lg)

        model.AddGradientOperators(loss_grad)
        #end add grad
        optimizer.add_weight_decay(model, args.weight_decay)
        stepsz = int(30 * args.epoch_size / 32)
        opt = optimizer.build_multi_precision_sgd(model,
                                                  args.base_learning_rate,
                                                  momentum=0.9,
                                                  nesterov=1,
                                                  policy="step",
                                                  stepsize=stepsz,
                                                  gamma=0.1)
        model._optimizer = opt

    workspace.RunNetOnce(model.param_init_net)
    workspace.CreateNet(model.net)
    return model
Example #27
0
def AddAccuracy(model, softmax, label, device_opts):
    with core.DeviceScope(device_opts):
        accuracy = brew.accuracy(model, [softmax, label], "accuracy")
        return accuracy
Example #28
0
def AddAccuracy(model, softmax, label):
    """Adds an accuracy op to the model"""
    accuracy = brew.accuracy(model, [softmax, label], "accuracy")
    return accuracy
Example #29
0
def AddForwardPassOps(model, loss_scale, dtype):
    """Add forward pass ops and return a list of losses."""
    initializer = (pFP16Initializer
                   if dtype == DataType.FLOAT16 else Initializer)
    with brew.arg_scope([brew.conv, brew.fc],
                        WeightInitializer=initializer,
                        BiasInitializer=initializer):
        conv1 = brew.conv(model,
                          'data',
                          'conv1',
                          3,
                          32,
                          5,
                          pad=2,
                          weight_init=('GaussianFill', {
                              'std': 0.0001,
                              'mean': 0.0
                          }))
        pool1 = brew.max_pool(model, conv1, 'pool1', kernel=3, stride=2)
        relu1 = brew.relu(model, pool1, 'relu1')
        conv2 = brew.conv(model,
                          relu1,
                          'conv2',
                          32,
                          32,
                          5,
                          pad=2,
                          weight_init=('GaussianFill', {
                              'std': 0.01
                          }))
        conv2 = brew.relu(model, conv2, conv2)
        pool2 = brew.average_pool(model, conv2, 'pool2', kernel=3, stride=2)
        conv3 = brew.conv(model,
                          pool2,
                          'conv3',
                          32,
                          64,
                          5,
                          pad=2,
                          weight_init=('GaussianFill', {
                              'std': 0.01
                          }))
        conv3 = brew.relu(model, conv3, conv3)
        pool3 = brew.average_pool(model, conv3, 'pool3', kernel=3, stride=2)
        fc1 = brew.fc(model,
                      pool3,
                      'fc1',
                      64 * 3 * 3,
                      64,
                      weight_init=('GaussianFill', {
                          'std': 0.1
                      }))
        fc2 = brew.fc(model,
                      fc1,
                      'fc2',
                      64,
                      10,
                      weight_init=('GaussianFill', {
                          'std': 0.1
                      }))

    if dtype == DataType.FLOAT16:
        fc2 = model.net.HalfToFloat(fc2, fc2 + '_fp32')
    softmax, loss = model.SoftmaxWithLoss([fc2, 'label'], ['softmax', 'loss'])
    loss = model.Scale(loss, loss, scale=loss_scale)
    brew.accuracy(model, [softmax, 'label'], 'accuracy')
    return [loss]
Example #30
0
    def create_model_ops_testable(model, loss_scale, is_test=False):
        initializer = (PseudoFP16Initializer
                       if args.dtype == 'float16' else Initializer)

        with brew.arg_scope([brew.conv, brew.fc],
                            WeightInitializer=initializer,
                            BiasInitializer=initializer,
                            enable_tensor_core=args.enable_tensor_core,
                            float16_compute=args.float16_compute):

            if args.model == "cifar10":
                if args.image_size != 32:
                    log.warn("Cifar10 expects a 32x32 image.")
                pred = models.cifar10.create_cifar10(
                    model,
                    "data",
                    image_channels=args.num_channels,
                    num_classes=args.num_labels,
                    image_height=args.image_size,
                    image_width=args.image_size,
                )
            elif args.model == "resnet32x32":
                if args.image_size != 32:
                    log.warn("ResNet32x32 expects a 32x32 image.")
                pred = models.resnet.create_resnet32x32(
                    model,
                    "data",
                    num_layers=args.num_layers,
                    num_input_channels=args.num_channels,
                    num_labels=args.num_labels,
                    is_test=is_test)
            elif args.model == "resnet":
                if args.image_size != 224:
                    log.warn(
                        "ResNet expects a 224x224 image. input image = %d" %
                        args.image_size)
                pred = resnet.create_resnet50(
                    #args.layers,
                    model,
                    "data",
                    num_input_channels=args.num_channels,
                    num_labels=args.num_labels,
                    no_bias=True,
                    no_loss=True,
                )
            elif args.model == "vgg":
                if args.image_size != 224:
                    log.warn("VGG expects a 224x224 image.")
                pred = vgg.create_vgg(model,
                                      "data",
                                      num_input_channels=args.num_channels,
                                      num_labels=args.num_labels,
                                      num_layers=args.num_layers,
                                      is_test=is_test)
            elif args.model == "googlenet":
                if args.image_size != 224:
                    log.warn("GoogLeNet expects a 224x224 image.")
                pred = googlenet.create_googlenet(
                    model,
                    "data",
                    num_input_channels=args.num_channels,
                    num_labels=args.num_labels,
                    is_test=is_test)
            elif args.model == "alexnet":
                if args.image_size != 224:
                    log.warn("Alexnet expects a 224x224 image.")
                pred = alexnet.create_alexnet(
                    model,
                    "data",
                    num_input_channels=args.num_channels,
                    num_labels=args.num_labels,
                    is_test=is_test)
            elif args.model == "alexnetv0":
                if args.image_size != 224:
                    log.warn("Alexnet v0 expects a 224x224 image.")
                pred = alexnet.create_alexnetv0(
                    model,
                    "data",
                    num_input_channels=args.num_channels,
                    num_labels=args.num_labels,
                    is_test=is_test)
            else:
                raise NotImplementedError("Network {} not found.".format(
                    args.model))

        if args.dtype == 'float16':
            pred = model.net.HalfToFloat(pred, pred + '_fp32')

        softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
                                              ['softmax', 'loss'])
        loss = model.Scale(loss, scale=loss_scale)
        brew.accuracy(model, [softmax, "label"], "accuracy")
        return [loss]
Example #31
0
def add_accuracy(model, probs, label):
    accuracy = brew.accuracy(model, [probs, label], "accuracy")
    return accuracy
Example #32
0
    pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
    fc3 = brew.fc(model, pool2, 'fc3', dim_in=50 * 4 * 4, dim_out=500)
    fc3 = brew.relu(model, fc3, fc3)
    pred = brew.fc(model, fc3, 'pred', 500, 10)
    softmax = brew.softmax(model, pred, 'softmax')


softmax = AddLeNetModel(train_model, data)

##################################################################################
#### Step 3: Add training operators to the model
# TODO: use the optimizer class here instead of doing sgd by hand

xent = train_model.LabelCrossEntropy(['softmax', 'label'], 'xent')
loss = train_model.AveragedLoss(xent, 'loss')
brew.accuracy(train_model, ['softmax', 'label'], 'accuracy')
train_model.AddGradientOperators([loss])
opt = optimizer.build_sgd(train_model, base_learning_rate=0.1)
for param in train_model.GetOptimizationParamInfo():
    opt(train_model.net, train_model.param_init_net, param)

#model.Checkpoint([ITER] + model.params, [], db="mnist_lenet_checkpoint_%05d.lmdb", db_type="lmdb", every=20)
ITER = brew.iter(train_model, "iter")
train_model.Checkpoint([ITER] + train_model.params, [],
                       db="mnist_lenet_checkpoint_%05d.lmdb",
                       db_type="lmdb",
                       every=checkpoint_iters)

##################################################################################
#### Run the training procedure
                        load_all=1))

##### Externally initialize params so we can extract gradients
# for i,op in enumerate(init_net_proto.op):
# 	param_name = op.output[0]
# 	if param_name != 'data':
# 		print "param_name:", param_name
# 		assert(op.arg[0].name == "shape")
# 		tags = (ParameterTags.WEIGHT if param_name.endswith("_w") else ParameterTags.BIAS)
# 		model.create_param(param_name=op.output[0], shape=op.arg[0].ints, initializer=initializers.ExternalInitializer(), tags=tags)

# Add the "training operators" to the model
softmax = model_defs.Add_CNN_M(model, 'data', device_opts)
xent = model.LabelCrossEntropy([softmax, 'label'], 'xent')
loss = model.AveragedLoss(xent, "loss")
accuracy = brew.accuracy(model, [softmax, 'label'], "accuracy")
model.AddGradientOperators([loss])

# Instatiate test_dataset object
test_dataset = jdh.Jester_Dataset(dictionary_file=test_dictionary, seq_size=10)

# Prime the workspace with some data so we can run init net once
for image, label in test_dataset.read(batch_size=1):
    workspace.FeedBlob("data", image)
    workspace.FeedBlob("label", label)
    break

# run the param init network once
workspace.RunNetOnce(model.param_init_net)
# create the network
workspace.CreateNet(model.net, overwrite=True)
Example #34
0
 def Accuracy(self, *args, **kwargs):
     return brew.accuracy(self, *args, **kwargs)