Ejemplo n.º 1
0
class TestDarts(unittest.TestCase):
    def setUp(self):
        data_path = os.path.join(os.path.dirname(__file__), 'mock_data.darts')
        self._ins = Darts(data_path)

    def test_common_prefix_search(self):
        eq_(['star'], self._ins.common_prefix_search('star'))
        eq_(['star', 'star wars'], self._ins.common_prefix_search('star wars'))

    def test_longest_match(self):
        eq_('star', self._ins.longest_match('star'))
        eq_('star wars', self._ins.longest_match('star wars'))
Ejemplo n.º 2
0
def training_phase(args, device, genotype):
	cudnn.benchmark = True
	cudnn.enabled = True
	torch.manual_seed(args.manual_seed)
	torch.cuda.manual_seed(args.manual_seed)
	C = args.init_channels
	num_cells = args.num_cells
	num_nodes = args.num_nodes
	num_classes = args.num_classes
	criterion = nn.CrossEntropyLoss()
	logging.info("args = %s", args)
	if args.gpus == "all":
		gpus = None
	else:
		gpus = [int(gpu.strip()) for gpu in args.gpus.split(",")]

	train_transform, test_transform = utils.data_transform_cifar10()
	train_data = torchvision.datasets.CIFAR10(root=args.data, train=True, transform=train_transform, download=True)
	test_data = torchvision.datasets.CIFAR10(root=args.data, train=False, transform=test_transform)

	darts = Darts(C, num_cells, num_nodes, num_classes, criterion, found_genotype=genotype)
	darts = nn.DataParallel(darts, device_ids=gpus).to(device)
	weights_optimizer = torch.optim.SGD(darts.parameters(),
										lr=args.learning_rate,
										momentum=args.momentum,
										weight_decay=args.weight_decay)
	darts.module.set_optimizers(None, weights_optimizer)

	train_queue = torch.utils.data.DataLoader(
		train_data, batch_size=args.batch_size,
		shuffle=True, pin_memory=True)

	test_queue = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=True, pin_memory=True)
	scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(weights_optimizer, float(args.epochs), eta_min=args.learning_rate_min)

	for epoch in range(args.epochs):
		logging.info("EPOCH %d", epoch)
		lr = weights_optimizer.param_groups[0]["lr"]
		logging.info('Learning rate %e', lr)

		# validation
		valid_acc, valid_loss = evaluate(test_queue, darts, device)
		logging.info('Validation Accuracy: %.2f', valid_acc)
		logging.info("Validation Loss: %.4f", valid_loss)

		# training
		train_acc, train_loss = train(train_queue, darts, device)
		logging.info('Train Accuracy: %.2f', train_acc)
		logging.info("Train Loss: %.4f", train_loss)

		utils.save(darts, os.path.join(args.save, 'weights%d.pt' % epoch))
		scheduler.step()
		logging.info("-------------------------------------------------------------------")
Ejemplo n.º 3
0
 def setUp(self):
     data_path = os.path.join(os.path.dirname(__file__), 'mock_data.darts')
     self._ins = Darts(data_path)