예제 #1
0
    def test_net_transformer_function(self):
        devices = [1, 2, 3]

        def add_input_ops(model):
            model.param_init_net.UniformFill([], ["data"], shape=[32, 8])

        def add_optimizer(model):
            optimizer.build_sgd(model, 0.1)

        def add_model_ops(model, loss_scale):
            fc1 = brew.fc(model, "data", "fc1", dim_in=8, dim_out=8)
            return [fc1]

        kwargs = {
            'input_builder_fun': add_input_ops,
            'forward_pass_builder_fun': add_model_ops,
            'devices': devices,
        }

        # assert that the transformer is called for both train and test cases
        transform = Mock()
        kwargs['net_transformer_fun'] = transform
        model = model_helper.ModelHelper(name="r", init_params=False)
        data_parallel_model.Parallelize_CPU(model, **kwargs)
        self.assertTrue(transform.called)
        self.assertEqual(transform.call_count, 1)

        transform = Mock()
        kwargs['net_transformer_fun'] = transform
        kwargs['optimizer_builder_fun'] = add_optimizer
        model = model_helper.ModelHelper(name="r", init_params=True)
        data_parallel_model.Parallelize_CPU(model, **kwargs)
        self.assertTrue(transform.called)
        self.assertEqual(transform.call_count, 1)
예제 #2
0
    def test_checkpoint_params(self):
        def add_input_ops(model):
            pass

        def add_model_ops(model, loss_scale):
            model.NHWC2NCHW("data", "data_nchw")
            model.Conv("data_nchw",
                       'conv1',
                       3,
                       64,
                       weight_init=("MSRAFill", {}),
                       kernel=7,
                       stride=2,
                       pad=3,
                       no_bias=0)
            model.SpatialBN('conv1', 'conv1_spatbn_relu', 64, epsilon=1e-3)
            model.Relu('conv1_spatbn_relu', 'conv1_spatbn_relu')
            model.MaxPool('conv1_spatbn_relu', 'pool1', kernel=3, stride=2)
            model.FC('pool1', 'fc', dim_in=(64 * 56 * 56), dim_out=100)
            model.Sigmoid('fc', 'fc_sigm')
            model.Softmax('fc_sigm', 'softmax')
            model.LabelCrossEntropy(['softmax', 'label'], 'xent')
            loss = model.AveragedLoss('xent', 'loss')

            # Add a duplicate param init to ensure it does not cause issues
            model.param_init_net.ConstantFill([], ["fc_w"],
                                              shape=((64 * 56 * 56), 1000))
            return [loss]

        def add_optimizer(model):
            optimizer.build_sgd(model, 0.1, policy="fixed", momentum=0.9)

        model = cnn.CNNModelHelper(
            order="NHWC",
            name="test",
        )
        data_parallel_model.Parallelize_CPU(
            model,
            input_builder_fun=add_input_ops,
            forward_pass_builder_fun=add_model_ops,
            optimizer_builder_fun=add_optimizer,
            devices=[1, 2, 3],
        )

        # Only gpu_1 params should be returned (gpu_1 is the first gpu)
        checkpoint_params = data_parallel_model.GetCheckpointParams(model)
        for p in model.GetParams("cpu_1/"):
            self.assertTrue(p in checkpoint_params)
            self.assertTrue(p + "_momentum" in checkpoint_params)
        for p in model.GetParams("cpu_2/"):
            self.assertFalse(p in checkpoint_params)
        self.assertTrue(
            core.BlobReference("cpu_1/fc_w_momentum") in checkpoint_params)
        for c in model.GetComputedParams("cpu_1/"):
            self.assertTrue(c in checkpoint_params)
        for c in model.GetComputedParams("cpu_2/"):
            self.assertFalse(c in checkpoint_params)
        self.assertFalse(core.BlobReference("cpu_1/data") in checkpoint_params)
        self.assertTrue(
            core.BlobReference("optimizer_iteration") in checkpoint_params)
예제 #3
0
    def test_net_conversion_and_append_net(self):
        other = model_helper.ModelHelper()
        fc1 = brew.fc(other,
                      "data",
                      "other_fc1",
                      dim_in=3 * 227 * 227,
                      dim_out=10)
        fc2 = brew.fc(other, fc1, "other_fc2", dim_in=10, dim_out=10)
        brew.fc(other, fc2, "other_fc3", dim_in=10, dim_out=10)

        def add_input_ops(model):
            model.net.UniformFill([], ["data"], shape=[4, 227, 227, 3])
            model.net.UniformFill([], ["label"], shape=[4])

        def add_model_ops(model, loss_scale):
            model.NHWC2NCHW("data", "data_nchw")
            model.Conv("data_nchw",
                       'conv1',
                       3,
                       64,
                       weight_init=("MSRAFill", {}),
                       kernel=7,
                       stride=2,
                       pad=3,
                       no_bias=0)
            model.SpatialBN('conv1', 'conv1_spatbn_relu', 64, epsilon=1e-3)
            model.Relu('conv1_spatbn_relu', 'conv1_spatbn_relu')
            model.MaxPool('conv1_spatbn_relu', 'pool1', kernel=3, stride=2)
            model.FC('pool1', 'fc', dim_in=(64 * 56 * 56), dim_out=10)

            # Append the net and param_init_net of the other model
            appendnet = data_parallel_model.ConvertNetForDevice(other.net)
            model.net.AppendNet(appendnet)

            model.param_init_net.AppendNet(
                data_parallel_model.ConvertNetForDevice(other.param_init_net))

            model.Sigmoid('fc', 'fc_sigm')
            model.Softmax('fc_sigm', 'softmax')
            loss = model.AveragedLoss('softmax', 'loss')
            return [loss]

        def add_optimizer(model):
            optimizer.build_sgd(model, 0.1, policy="fixed", momentum=0.9)

        model = cnn.CNNModelHelper(
            order="NCHW",
            name="test",
        )
        data_parallel_model.Parallelize_CPU(
            model,
            input_builder_fun=add_input_ops,
            forward_pass_builder_fun=add_model_ops,
            optimizer_builder_fun=add_optimizer,
            devices=range(4))

        # Just create and run net and confirm no exception is thrown
        workspace.RunNetOnce(model.param_init_net)
        workspace.CreateNet(model.net)
        workspace.RunNet(model.net)
예제 #4
0
        def run(comm_rank, comm_size, tmpdir):
            def add_input_ops(model):
                pass

            def add_model_ops(model, loss_scale):
                return []

            def add_optimizer(model):
                pass

            workspace.ResetWorkspace()
            store_handler = "store_handler"
            workspace.RunOperatorOnce(
                core.CreateOperator(
                    "FileStoreHandlerCreate",
                    [],
                    [store_handler],
                    path=tmpdir))
            rendezvous = dict(
                kv_handler=store_handler,
                shard_id=comm_rank,
                num_shards=comm_size,
                engine='GLOO',
            )

            model = cnn.CNNModelHelper(
                order="NHWC",
                name="test",
            )
            # Set network timeout to 2 seconds, and add a 3 seconds
            # sleep for 1 host.  Make sure there is no timeout on the
            # second RunNet.
            data_parallel_model._DEFAULT_TIMEOUT_SEC=2
            data_parallel_model.Parallelize_CPU(
                model,
                input_builder_fun=add_input_ops,
                forward_pass_builder_fun=add_model_ops,
                optimizer_builder_fun=add_optimizer,
                devices=[1, 2, 3],
                rendezvous=rendezvous,
                barrier_net_timeout_sec=5
            )
            data_parallel_model.RunInitNet(model)
            data_parallel_model.RunNet(model, 2)
            if comm_rank == 0:
                time.sleep(data_parallel_model._DEFAULT_TIMEOUT_SEC)
            data_parallel_model.RunNet(model, 2)
예제 #5
0
        def run(comm_rank, comm_size, tmpdir):
            def add_input_ops(model):
                pass

            def add_model_ops(model, loss_scale):
                return []

            def add_optimizer(model):
                pass

            store_handler = "store_handler"
            workspace.RunOperatorOnce(
                core.CreateOperator(
                    "FileStoreHandlerCreate",
                    [],
                    [store_handler],
                    path=tmpdir))
            rendezvous = dict(
                kv_handler=store_handler,
                shard_id=comm_rank,
                num_shards=comm_size,
                engine='GLOO',
            )

            model = cnn.CNNModelHelper(
                order="NHWC",
                name="test",
            )
            data_parallel_model.Parallelize_CPU(
                model,
                input_builder_fun=add_input_ops,
                forward_pass_builder_fun=add_model_ops,
                optimizer_builder_fun=add_optimizer,
                devices=[1, 2, 3],
                rendezvous=rendezvous
            )
            data_parallel_model.RunInitNet(model)

            for _ in range(2):
                data_parallel_model.Synchronize(model)
예제 #6
0
def Train_Model(args):

    # Get relative path name of current directory
    dir_path = os.path.dirname(os.path.realpath(__file__))
    # create folder path for data sets
    current_folder = os.path.join(dir_path, '../../', 'datasets',
     'blood-cells')
    data_folder = os.path.join(current_folder, 'dataset2-master')
    root_folder = current_folder

    db_missing = False

    # find the dataset folder
    if not os.path.exists(data_folder):
        os.makedirs(data_folder)   
        print("Your data folder was not found!! This was generated: {}".format(data_folder))

    # Look for existing database: lmdb
    if os.path.exists(os.path.join(data_folder,'blood_cells_train_lmdb')):
        print("lmdb train db found!")
    else:
        db_missing = True
        
    if os.path.exists(os.path.join(data_folder,'blood_cells_test_lmdb')):
        print("lmdb test db found!")
    else:
        db_missing = True

    if db_missing:
        print("DB is not found. Please fix file paths")
        sys.exit()

    # prepare the databases for training
    train_data_db = os.path.join(data_folder,'blood_cells_train_lmdb')
    test_data_db = os.path.join(data_folder, 'blood_cells_test_lmdb')

    arg_scope = {
    "order" : "NCHW",
    "use_cudnn" : args.gpu,
    }
    train_model = model_helper.ModelHelper(name="vgg19_train", arg_scope=arg_scope)

    reader = train_model.CreateDB(
        "train_reader",
        db= train_data_db,
        db_type= 'lmdb',
        )

    # prepare precursor hyper-parameters
    devices = []
    for i in range(args.shards):
        devices.append(i)

    train_data_count = 9893

    batch_per_device = 16
    total_batch_size = batch_per_device * len(devices)

    num_labels = 4

    base_learning_rate = .001 * total_batch_size

    weight_decay = (5 * 10**(-4))

    stepsize = int(2 * train_data_count / total_batch_size)

    def add_image_input_ops(model):
        '''
        The image input operator loads image and label data from the reader and
        applies transformations to the images (random cropping, mirroring, ...).
        '''
        data, label = brew.image_input(
            model,
            reader,
            ["data", "label"],
            batch_size=batch_per_device,
            # mean: to remove color values that are common
            # mean=128.,
            # std is going to be modified randomly to influence the mean subtraction
            # std=128.,
            # scale to rescale each image to a common size
            scale=224,
            # crop to the square each image to exact dimensions
            crop=224,
            # not running in test mode
            is_test=False,
            # mirroring of the images will occur randomly
            mirror=1
        )
        # prevent back-propagation: optional performance improvement; may not be observable at small scale
        data = model.StopGradient(data, data)

    def create_vgg_model_ops(model, loss_scale= 1.0):
        [softmax, loss] = vgg19.create_VGG19(model, "data", num_labels, "label")

        prefix = model.net.Proto().name
        loss = model.net.Scale(loss, prefix + "_loss", scale= loss_scale)
        brew.accuracy(model, [softmax, "label"], prefix + "_accuracy")
        return [loss]

    def add_parameter_update_ops(model):
        brew.add_weight_decay(model, weight_decay)
        iter = brew.iter(model, "iter")
        lr = model.net.LearningRate(
            [iter],
            "lr",
            base_lr=base_learning_rate,
            policy="step",
            stepsize=stepsize,
            gamma=0.1,
        )
        for param in model.GetParams():
            param_grad = model.param_to_grad[param]
            param_momentum = model.param_init_net.ConstantFill(
                [param], param + '_momentum', value=0.0
            )

            # Update param_grad and param_momentum in place
            model.net.MomentumSGDUpdate(
                [param_grad, param_momentum, lr, param],
                [param_grad, param_momentum, param],
                # almost 100% but with room to grow
                momentum=0.9,
                # netsterov is a defenseman for the Montreal Canadiens, but
                # Nesterov Momentum works slightly better than standard momentum
                nesterov=1,
            )

    def accuracy(model):
        accuracy = []
        prefix = model.net.Proto().name
        for device in model._devices:
            accuracy.append(
                np.asscalar(workspace.FetchBlob("{}_{}/{}_accuracy".format('gpu' if args.gpu else 'cpu', device, prefix))))
        return np.average(accuracy)

    if args.gpu:
        dpm.Parallelize_GPU(
            train_model,
            input_builder_fun=add_image_input_ops,
            forward_pass_builder_fun=create_vgg_model_ops,
            param_update_builder_fun=add_parameter_update_ops,
            devices=devices,
            dynamic_memory_management=True,
        )
    else:
        dpm.Parallelize_CPU(
            train_model,
            input_builder_fun=add_image_input_ops,
            forward_pass_builder_fun=create_vgg_model_ops,
            param_update_builder_fun=add_parameter_update_ops,
            devices=devices,
            optimize_gradient_memory=True,
            )

    workspace.RunNetOnce(train_model.param_init_net)
    workspace.CreateNet(train_model.net)

    test_model = model_helper.ModelHelper(name="vgg_test", arg_scope=arg_scope, init_params=False)

    reader = test_model.CreateDB(
        "test_reader",
        db=test_data_db,
        db_type='lmdb',
    )

    if args.gpu:    
        # Validation is parallelized across devices as well
        dpm.Parallelize_GPU(
            test_model,
            input_builder_fun=add_image_input_ops,
            forward_pass_builder_fun=create_vgg_model_ops,
            param_update_builder_fun=None,
            devices=devices,
        )
    else:
        dpm.Parallelize_CPU(
            test_model,
            input_builder_fun=add_image_input_ops,
            forward_pass_builder_fun=create_vgg_model_ops,
            param_update_builder_fun=None,
            devices=devices,
            )

    workspace.RunNetOnce(test_model.param_init_net)
    workspace.CreateNet(test_model.net)

    # Start looping through epochs where we run the batches of images to cover the entire dataset
    # Usually you would want to run a lot more epochs to increase your model's accuracy
    num_epochs = 20
    for epoch in range(num_epochs):
        # Split up the images evenly: total images / batch size
        num_iters = int(train_data_count / total_batch_size)
        for iter in range(num_iters):
            # Stopwatch start!
            t1 = time.time()
            # Run this iteration!
            workspace.RunNet(train_model.net.Proto().name)
            t2 = time.time()
            dt = t2 - t1
            
            # Stopwatch stopped! How'd we do?
            print((
                "Finished iteration {:>" + str(len(str(num_iters))) + "}/{}" +
                " (epoch {:>" + str(len(str(num_epochs))) + "}/{})" + 
                " ({:.2f} images/sec)").
                format(iter+1, num_iters, epoch+1, num_epochs, total_batch_size/dt))
            
            # Get the average accuracy for the training model
            train_accuracy = accuracy(train_model)
        
        # Run the test model and assess accuracy
        test_accuracies = []
        for _ in range(test_data_count / total_batch_size):
            # Run the test model
            workspace.RunNet(test_model.net.Proto().name)
            test_accuracies.append(accuracy(test_model))
        test_accuracy = np.average(test_accuracies)

        print(
            "Train accuracy: {:.3f}, test accuracy: {:.3f}".
            format(train_accuracy, test_accuracy))