Ejemplo n.º 1
0
def test_using_device_with_name(device_instance1, device_instance2):
    if device_instance1 == device_instance2:
        return

    device1 = device_instance1
    device2 = device_instance2

    chainerx.set_default_device(device1)
    with chainerx.using_device(device2.name) as scope:
        assert chainerx.get_default_device() == device2
        assert scope.device is device2

    with chainerx.using_device(device2.backend.name, device2.index) as scope:
        assert chainerx.get_default_device() == device2
        assert scope.device is device2
Ejemplo n.º 2
0
def test_using_device(device_instance1, device_instance2):
    if device_instance1 == device_instance2:
        return

    device1 = device_instance1
    device2 = device_instance2

    chainerx.set_default_device(device1)
    with chainerx.using_device(device2) as scope:
        assert chainerx.get_default_device() is device2
        assert scope.device is device2

    scope = chainerx.using_device(device2)
    assert chainerx.get_default_device() == device1
    assert scope.device is device2
    with scope:
        assert chainerx.get_default_device() == device2
        assert scope.device is device2
    assert chainerx.get_default_device() == device1
    assert scope.device is device2
Ejemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser(
        description='Learning convnet from ILSVRC2012 dataset')
    parser.add_argument('train', help='Path to training image-label list file')
    parser.add_argument('val', help='Path to validation image-label list file')
    parser.add_argument('--batchsize',
                        '-B',
                        type=int,
                        default=32,
                        help='Learning minibatch size')
    parser.add_argument('--epoch',
                        '-E',
                        type=int,
                        default=10,
                        help='Number of epochs to train')
    parser.add_argument(
        '--iteration',
        '-I',
        type=int,
        default=None,
        help='Number of iterations to train. Epoch is ignored if specified.')
    parser.add_argument('--loaderjob',
                        '-j',
                        type=int,
                        help='Number of parallel data loading processes')
    parser.add_argument('--mean',
                        '-m',
                        default='mean.npy',
                        help='Mean file (computed by compute_mean.py)')
    parser.add_argument('--root',
                        '-R',
                        default='.',
                        help='Root directory path of image files')
    parser.add_argument('--val_batchsize',
                        '-b',
                        type=int,
                        default=250,
                        help='Validation minibatch size')
    parser.set_defaults(test=False)
    parser.add_argument('--device',
                        '-d',
                        default='native',
                        help='Device to use')

    args = parser.parse_args()

    chx.set_default_device(args.device)
    batch_size = args.batchsize
    eval_size = args.val_batchsize

    # Prepare model
    model = resnet50.ResNet50()

    # Prepare datasets and mean file
    mean = np.load(args.mean)
    train = PreprocessedDataset(args.train, args.root, mean, model.insize)
    test = PreprocessedDataset(args.val, args.root, mean, model.insize, False)
    train_iter = chainer.iterators.MultiprocessIterator(
        train, batch_size, n_processes=args.loaderjob)
    test_iter = chainer.iterators.MultiprocessIterator(
        test, eval_size, n_processes=args.loaderjob)

    N = len(train)

    # Train
    model.require_grad()

    it = 0
    epoch = 0
    is_finished = False
    start = time.time()

    while not is_finished:

        for i in range(0, N // batch_size):
            x, t = get_imagenet(train_iter)
            y = model(x)
            loss = compute_loss(y, t)

            loss.backward()
            model.update(lr=0.01)

            it += 1
            if args.iteration is not None:
                x_test, t_test = get_imagenet(test_iter)
                mean_loss, accuracy = evaluate(model, x_test, t_test,
                                               eval_size, batch_size)
                elapsed_time = time.time() - start
                print(
                    'iteration {}... loss={},\taccuracy={},\telapsed_time={}'.
                    format(it, mean_loss, accuracy, elapsed_time))
                if it >= args.iteration:
                    is_finished = True
                    break

        epoch += 1
        if args.iteration is None:
            x_test, t_test = get_imagenet(test_iter)
            mean_loss, accuracy = evaluate(model, x_test, t_test, eval_size,
                                           batch_size)
            elapsed_time = time.time() - start
            print('epoch {}... loss={},\taccuracy={},\telapsed_time={}'.format(
                epoch, mean_loss, accuracy, elapsed_time))
            if epoch >= args.epoch:
                is_finished = True
Ejemplo n.º 4
0
def global_set_gpu():
    mxnet.test_utils.set_default_context(mxnet.gpu(0))
    chainerx.set_default_device('cuda:0')
Ejemplo n.º 5
0
def global_set_cpu():
    mxnet.test_utils.set_default_context(mxnet.cpu())
    chainerx.set_default_device('native')
Ejemplo n.º 6
0
def main():
    parser = argparse.ArgumentParser('Train a neural network on MNIST dataset')
    parser.add_argument('--batchsize',
                        '-B',
                        type=int,
                        default=100,
                        help='Batch size')
    parser.add_argument('--epoch',
                        '-E',
                        type=int,
                        default=20,
                        help='Number of epochs to train')
    parser.add_argument(
        '--iteration',
        '-I',
        type=int,
        default=None,
        help='Number of iterations to train. Epoch is ignored if specified.')
    parser.add_argument(
        '--data',
        '-p',
        default='mnist',
        help='Path to the directory that contains MNIST dataset')
    parser.add_argument('--device',
                        '-d',
                        default='native',
                        help='Device to use')
    parser.add_argument(
        '--eval-size',
        default=None,
        type=int,
        help='Number of samples to use from the test set for evaluation. '
        'None to use all.')
    args = parser.parse_args()

    chx.set_default_device(args.device)

    # Prepare dataset
    X, Y = get_mnist(args.data, 'train')
    X_test, Y_test = get_mnist(args.data, 't10k')

    # Prepare model
    model = MLP()

    # Training
    N = X.shape[0]  # TODO(beam2d): implement len
    # TODO(beam2d): support int32 indexing
    all_indices_np = np.arange(N, dtype=np.int64)
    batch_size = args.batchsize
    eval_size = args.eval_size

    # Train
    model.require_grad()

    it = 0
    epoch = 0
    is_finished = False
    start = time.time()

    while not is_finished:
        # TODO(beam2d): not suupported in chx
        np.random.shuffle(all_indices_np)
        all_indices = chx.array(all_indices_np)

        for i in range(0, N, batch_size):
            indices = all_indices[i:i + batch_size]
            x = X.take(indices, axis=0)
            t = Y.take(indices, axis=0)

            y = model.forward(x)
            loss = compute_loss(y, t)

            loss.backward()
            model.update(lr=0.01)

            it += 1
            if args.iteration is not None:
                mean_loss, accuracy = evaluate(model, X_test, Y_test,
                                               eval_size, batch_size)
                elapsed_time = time.time() - start
                print(
                    'iteration {}... loss={},\taccuracy={},\telapsed_time={}'.
                    format(it, mean_loss, accuracy, elapsed_time))
                if it >= args.iteration:
                    is_finished = True
                    break

        epoch += 1
        if args.iteration is None:  # stop based on epoch, instead of iteration
            mean_loss, accuracy = evaluate(model, X_test, Y_test, eval_size,
                                           batch_size)
            elapsed_time = time.time() - start
            print('epoch {}... loss={},\taccuracy={},\telapsed_time={}'.format(
                epoch, mean_loss, accuracy, elapsed_time))
            if epoch >= args.epoch:
                is_finished = True
Ejemplo n.º 7
0
 def use(self):
     chainerx.set_default_device(self.device)
Ejemplo n.º 8
0
 def use(self):
     chainerx.set_default_device(self.device)
Ejemplo n.º 9
0
def main():
    parser = argparse.ArgumentParser('Train a neural network on MNIST dataset')
    parser.add_argument(
        '--batchsize', '-B', type=int, default=100, help='Batch size')
    parser.add_argument(
        '--epoch', '-E', type=int, default=20,
        help='Number of epochs to train')
    parser.add_argument(
        '--iteration', '-I', type=int, default=None,
        help='Number of iterations to train. Epoch is ignored if specified.')
    parser.add_argument(
        '--data', '-p', default='mnist',
        help='Path to the directory that contains MNIST dataset')
    parser.add_argument(
        '--device', '-d', default='native', help='Device to use')
    parser.add_argument(
        '--eval-size', default=None, type=int,
        help='Number of samples to use from the test set for evaluation. '
        'None to use all.')
    args = parser.parse_args()

    chx.set_default_device(args.device)

    # Prepare dataset
    X, Y = get_mnist(args.data, 'train')
    X_test, Y_test = get_mnist(args.data, 't10k')

    # Prepare model
    model = MLP()

    # Training
    N = X.shape[0]   # TODO(beam2d): implement len
    # TODO(beam2d): support int32 indexing
    all_indices_np = np.arange(N, dtype=np.int64)
    batch_size = args.batchsize
    eval_size = args.eval_size

    # Train
    model.require_grad()

    it = 0
    epoch = 0
    is_finished = False
    start = time.time()

    while not is_finished:
        # TODO(beam2d): not suupported in chx
        np.random.shuffle(all_indices_np)
        all_indices = chx.array(all_indices_np)

        for i in range(0, N, batch_size):
            indices = all_indices[i:i + batch_size]
            x = X.take(indices, axis=0)
            t = Y.take(indices, axis=0)

            y = model.forward(x)
            loss = compute_loss(y, t)

            loss.backward()
            model.update(lr=0.01)

            it += 1
            if args.iteration is not None:
                mean_loss, accuracy = evaluate(
                    model, X_test, Y_test, eval_size, batch_size)
                elapsed_time = time.time() - start
                print(
                    'iteration {}... loss={},\taccuracy={},\telapsed_time={}'
                    .format(it, mean_loss, accuracy, elapsed_time))
                if it >= args.iteration:
                    is_finished = True
                    break

        epoch += 1
        if args.iteration is None:  # stop based on epoch, instead of iteration
            mean_loss, accuracy = evaluate(
                model, X_test, Y_test, eval_size, batch_size)
            elapsed_time = time.time() - start
            print(
                'epoch {}... loss={},\taccuracy={},\telapsed_time={}'
                .format(epoch, mean_loss, accuracy, elapsed_time))
            if epoch >= args.epoch:
                is_finished = True
Ejemplo n.º 10
0
def test_default_device_with_name(device_instance1):
    device = device_instance1
    chainerx.set_default_device(device.name)
    assert chainerx.get_default_device() is device
Ejemplo n.º 11
0
 def restore_device():
     chainerx.set_default_device(device)
Ejemplo n.º 12
0
 def restore_context():
     chainerx.set_global_default_context(global_context)
     chainerx.set_default_context(context)
     chainerx.set_default_device(device)
Ejemplo n.º 13
0
 def restore_context():
     chainerx.set_global_default_context(global_context)
     chainerx.set_default_context(context)
     chainerx.set_default_device(device)
Ejemplo n.º 14
0
def main():
	parser = argparse.ArgumentParser(description='Compare chainer vs chainerx')
	parser.add_argument('--batchsize', '-b', type=int, default=100)
	parser.add_argument('--epoch', '-e', type=int, default=10)
	parser.add_argument('--gpu', '-g', type=int, default=0, choices=[-1, 0, 1, 2, 3])
	parser.add_argument('--chxon', '-c', type=int, default=1)
	args = parser.parse_args()
	
	# setup
	start = time.time()
	chx.available = True if args.chxon == 1 else False
	batch_size = args.batchsize
	
	# get MNIST
	train, test = chainer.datasets.get_mnist()
	
	if chx_available == True:
		device_name = 'cuda:{}'.format(args.gpu)
		# data
		with chx.using_device(device_name):
			train_images, train_labels = map(lamda d:chx.asarray(d), train._datasets)
			test_images, test_labels = map(lamda d:chx.asarray(d), test._datasets)
		# model
		chx.set_default_device(device_name)
		model = MLP(n_units=1000, n_out=10)
		optimizer = SGD(lr=0.01)
	else:
		device_name = args.gpu
		# data
		train_iter = chainer.iterators.SerialIterator(train, batch_size)
		test_iter = chainer.iterators.SerialIterator(train, batch_size, repeat=False, shuffle=False)
		# model
		model = MLP_chain(n_units=1000, n_out=10)
		model.to_gpu()
		chainer.cuda.get_device_from_id(device_name).use()
		optimizer = chainer.optimizers.SGD(lr=0.01)
	
	optimizer.setup(model)
	
	N_train, N_test = len(train), len(test)
	all_indices_np = np.arange(N_train, dtype=np.int64) # for chainerx
	epoch = 0
	
	while epoch <= args.epoch:
		epoch += 1
		if chx_available == True:
			np.random.shuffle(all_indices_np)
			all_indices = chx.array(all_indices_np)
		
		for i in range(0, N_train, batch_size):
			# time 1
			if chx_available == True:
				indices = all_indices[i: i + batch_size]
				x = train_images.take(indices, axis=0)
				t = train_labels.take(indices, axis=0)
			else:
				batch = train_iter.next()
				x, t = convert.concat_examples(batch, device=device_name)
			
			y = model.forward(x) # time 2
			
			# time 3
			if chx_available == True:
				loss = compute_loss(y, t)
			else:
				loss = F.softmax_cross_entropy(y, t)
				model.cleargrads()
			
			loss.backward() # time 4
			optimizer.update() # time 5
		
		if chx_available == True:
			with chx.no_backprop_mode():
				total_loss = chx.array(0, dtype=chx.float32)
				num_correct = chx.array(0, dtype=chx.int64)
				for i in range(0, N_test, batch_size):
					x = test_images[i:min(i + batch_size, N_test)]
					x = test_labels[i:min(i + batch_size, N_test)]
					
					y = model.forward(x)
					total_loss += compute_loss(y, t) * len(t)
					num_correct += (y.argmax(axis=1).astype(t.dtype) == t).astype(chx.int32).sum()
		else:
			test_iter.reset()
			with chainer.using_config('enable_backprop', False):
				total_loss = 0
				num_correct = 0
				for batch in test_iter:
					x, t = convert.concat_examples(batch, device=device_name)
					
					y = model.forward(x)
					total_loss += float(F.softmax_cross_entropy(y, t).array) * len(t)
					num_correct += float(F.accuracy(y, t).array) * len(t)
			
			mean_loss = float(total_loss) / N_test
			accuracy = int(num_correct) / N_test
			elapsed_time = time.time() - start
			print('epoch {} ... loss={}, accuracy, elapsed_time={}'.format(
						epoch, mean_loss, accuracy, elapsed_time))