コード例 #1
0
def main():
    args = arg_parser()

    if args.mode == "train":
        env = environment.make(args.env, args)
        if args.networks == "MLP":
            nn = MLP(env.observation_space.shape[0], env.action_space,
                     args.n_frames)
        elif args.networks == "CONV":
            nn = CONV(args.n_frames, env.action_space)

        optimizer = SharedAdam(nn.parameters())

        threads = []
        thread = mp.Process(target=test, args=(args, nn))
        thread.start()
        threads.append(thread)

        for i in range(0, args.n_workers):
            thread = mp.Process(target=train, args=(i, args, nn, optimizer))
            thread.start()
            threads.append(thread)

        for thread in threads:
            thread.join()
    elif args.mode == "test":
        evaluate(args)
コード例 #2
0
# clf.avgpool = nn.AdaptiveAvgPool2d(1)
# for param in clf.parameters():
#     param.requires_grad = False
# clf.to(device)
# clf.eval()
# clf_norm = Normalization(IMAGENET_MEAN, IMAGENET_STD)
# clf_norm.to(device)
# clf_norm.eval()
"""fixed"""

net = MLP(n_layers=args.n_layers, width=args.width)
net.to(device)
net = nn.DataParallel(net)

criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.Adam(net.parameters(), lr=args.lr, betas=(args.momentum, 0.999), weight_decay=args.wd)
# scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)

# ckpt = torch.load(args.checkpoint)
# net.load_state_dict(ckpt['model'])
# optimizer.load_state_dict(ckpt['optimizer'])
# train_loss = ckpt['train_loss']
# test_loss = ckpt['test_loss']
# test_acc = ckpt['test_acc']
# epochs = ckpt['epoch']

if not os.path.exists('logs/'):
    os.makedirs('logs/')
log_name = 'logs/' + args.name + '_' + str(args.seed) + '.csv'
if not os.path.exists(log_name):
    with open(log_name, 'w') as log_file:
コード例 #3
0
from loss import cross_entropy
from utils import visualize_3D
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = MLP()
print(model)
dataset = MyDataset()
trainloader = torch.utils.data.DataLoader(dataset,
                                          batch_size=128,
                                          shuffle=False)
criterion = cross_entropy
# criterion = FocalLoss(num_classes=1)

optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.9)
# loop over the dataset multiple times

epochs = 50


def train():
    with tqdm(total=epochs) as pbar:
        for epoch in trange(epochs):
            running_loss = 0.0
            for i, data in enumerate(trainloader, 0):
                inputs, labels = data
                inputs, labels = inputs.to(device), labels.to(device)
                # zero the parameter gradients
                optimizer.zero_grad()
                # forward + backward + optimize
コード例 #4
0
def train_dpi(args):
    if "Custom-CartPole" in args.task:
        # env = make_CartPole_env()
        env = gym.make(args.task)
        state_shape = env.observation_space.shape or env.observation_space.n
        action_shape = 2
    else:
        env = make_minigrid_env(
            args.task, flatten=True)  ## FIXME change to false if ConvNet
        state_shape = env.observation_space.shape or env.observation_space.n

        if "Empty" in args.task:
            action_shape = 3  # selecting Basic actions in minigrid
        else:
            action_shape = 6  # all except done

    print("Observations shape:", state_shape)
    print("Actions shape:", action_shape)

    # seed FIXME, envs dont use seed on reset
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    env.seed(args.seed)
    # train_envs.seed(args.seed)
    # test_envs.seed(args.seed)

    net = MLP(state_shape, action_shape)
    # net = ConvNet(state_shape, action_shape)

    optim = torch.optim.Adam(net.parameters(), lr=args.lr)

    # define policy
    policy = DPI(net,
                 optim,
                 discount_factor=0.99,
                 train_epochs=args.policy_epoch,
                 batch_size=args.batch_size)

    # # load a previous policy
    if args.resume_path:
        policy.load_state_dict(
            torch.load(args.resume_path, map_location=args.device))
        print("Loaded agent from: ", args.resume_path)

    if "MiniGrid" in args.task:
        tabular_env = Tabular_Minigrid(env)
    else:
        tabular_env = Tabular_CartPole(env)

    print(
        f"Num states: {tabular_env.nr_states}, Q table entries: {tabular_env.q.numel()}"
    )

    # Training loop
    steps = args.step
    scores_steps = np.zeros(steps)
    for s in range(steps):
        print(f"STEP {s}")
        # collect qs
        tabular_env.__init_q_states__()
        tabular_env.travel_state_actions(policy)

        # net = MLP(state_shape, action_shape)
        # optim = torch.optim.Adam(net.parameters(), lr=args.lr)
        # policy = DPI(net, optim, discount_factor=0.99,train_epochs=args.policy_epoch,batch_size=args.batch_size)

        # learn new policy
        policy.learn(tabular_env)

        # evaluation
        scores = evaluate(policy, env, render=args.render)
        scores_steps[s] = scores.mean()
        print(
            f"Eval Score: {scores.mean():.2f} +- {scores.std():.2f} Total registered q: {tabular_env.q[tabular_env.q!=-1].sum().item()}\n"
        )

    plt.figure()
    plt.plot(scores_steps, label="score")
    plt.legend()
    plt.xlabel("step")
    plt.ylabel("score")
コード例 #5
0
ファイル: recognition.py プロジェクト: sky-lzy/DailyCode
def train_val(im_dir,
              train_file_path,
              val_file_path,
              hidden_size,
              n_layers,
              act_type,
              norm_size,
              n_epochs,
              batch_size,
              n_letters,
              lr,
              optim_type,
              momentum,
              weight_decay,
              valInterval,
              device='cpu'):
    '''
    The main training procedure
    ----------------------------
    :param im_dir: path to directory with images
    :param train_file_path: file list of training image paths and labels
    :param val_file_path: file list of validation image paths and labels
    :param hidden_size: a list of hidden size for each hidden layer
    :param n_layers: number of layers in the MLP
    :param act_type: type of activation function, can be none, sigmoid, tanh, or relu
    :param norm_size: image normalization size, (height, width)
    :param n_epochs: number of training epochs
    :param batch_size: batch size of training and validation
    :param n_letters: number of classes, in this task it is 26 English letters
    :param lr: learning rate
    :param optim_type: optimizer, can be 'sgd', 'adagrad', 'rmsprop', 'adam', or 'adadelta'
    :param momentum: only used if optim_type == 'sgd'
    :param weight_decay: the factor of L2 penalty on network weights
    :param valInterval: the frequency of validation, e.g., if valInterval = 5, then do validation after each 5 training epochs
    :param device: 'cpu' or 'cuda', we can use 'cpu' for our homework if GPU with cuda support is not available
    '''

    # training and validation data loader
    trainloader = dataLoader(im_dir, train_file_path, norm_size, batch_size)
    valloader = dataLoader(im_dir, val_file_path, norm_size, batch_size)

    # TODO 1: initialize the MLP model and loss function
    # what is the input size of the MLP?
    # hint 1: we convert an image to a vector as the input of the MLP,
    # each image has shape [norm_size[0], norm_size[1]]
    # hint 2: Input parameters for MLP: input_size, output_size, hidden_size, n_layers, act_type
    model = MLP(norm_size[0] * norm_size[1], n_letters, hidden_size, n_layers,
                act_type)
    # loss function
    cal_loss = CrossEntropyLoss.apply
    # End TODO 1
    # put the model on CPU or GPU
    model = model.to(device)

    # optimizer
    if optim_type == 'sgd':
        optimizer = optim.SGD(model.parameters(),
                              lr,
                              momentum=momentum,
                              weight_decay=weight_decay)
    elif optim_type == 'adagrad':
        optimizer = optim.Adagrad(model.parameters(),
                                  lr,
                                  weight_decay=weight_decay)
    elif optim_type == 'rmsprop':
        optimizer = optim.RMSprop(model.parameters(),
                                  lr,
                                  weight_decay=weight_decay)
    elif optim_type == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr,
                               weight_decay=weight_decay)
    elif optim_type == 'adadelta':
        optimizer = optim.Adadelta(model.parameters(),
                                   lr,
                                   weight_decay=weight_decay)
    else:
        print(
            '[Error] optim_type should be one of sgd, adagrad, rmsprop, adam, or adadelta'
        )
        raise NotImplementedError

    # training
    # to save loss of each training epoch in a python "list" data structure
    losses = []

    for epoch in range(n_epochs):
        # set the model in training mode
        model.train()

        # to save total loss in one epoch
        total_loss = 0.

        #TODO 2: calculate losses and train the network using the optimizer
        for step, (ims,
                   labels) in enumerate(trainloader):  # get a batch of data

            # step 1: set data type and device
            ims = ims.to(device)
            labels = labels.to(device)
            # step 2: convert an image to a vector as the input of the MLP
            ims = ims.view(batch_size, norm_size[0] * norm_size[1])
            # hint: clear gradients in the optimizer
            optimizer.zero_grad()
            # step 3: run the model which is the forward process
            pred = model(ims)
            # step 4: compute the loss, and call backward propagation function
            loss = cal_loss(pred, labels)
            loss.backward()
            # step 5: sum up of total loss, loss.item() return the value of the tensor as a standard python number
            # this operation is not differentiable
            total_loss += loss.item()
            # step 6: call a function, optimizer.step(), to update the parameters of the model
            optimizer.step()
            # End TODO 2

        # average of the total loss for iterations
        avg_loss = total_loss / len(trainloader)
        losses.append(avg_loss)
        print('Epoch {:02d}: loss = {:.3f}'.format(epoch + 1, avg_loss))

        # validation
        if (epoch + 1) % valInterval == 0:

            # set the model in evaluation mode
            model.eval()

            n_correct = 0.  # number of images that are correctly classified
            n_ims = 0.  # number of total images

            with torch.no_grad(
            ):  # we do not need to compute gradients during validation

                # calculate losses for validation data and do not need train the network
                for ims, labels in valloader:
                    # set data type and device
                    ims, labels = ims.to(device), labels.type(
                        torch.float).to(device)

                    # convert an image to a vector as the input of the MLP
                    input = ims.view(ims.size(0), -1)

                    # run the model which is the forward process
                    out = model(input)

                    # get the predicted value by the output using out.argmax(1)
                    predictions = out.argmax(1)

                    # sum up the number of images correctly recognized and the total image number
                    n_correct += torch.sum(predictions == labels)
                    n_ims += ims.size(0)

            # show prediction accuracy
            print('Epoch {:02d}: validation accuracy = {:.1f}%'.format(
                epoch + 1, 100 * n_correct / n_ims))

    # save model parameters in a file
    model_save_path = 'saved_models/recognition.pth'.format(epoch + 1)

    torch.save(
        {
            'state_dict': model.state_dict(),
            'configs': {
                'norm_size': norm_size,
                'output_size': n_letters,
                'hidden_size': hidden_size,
                'n_layers': n_layers,
                'act_type': act_type
            }
        }, model_save_path)
    print('Model saved in {}\n'.format(model_save_path))

    # draw the loss curve
    plot_loss(losses)
コード例 #6
0
ファイル: run.py プロジェクト: ChengJiacheng/LeNet-5
                      transforms.ToTensor()]))
data_train_loader = DataLoader(data_train, batch_size=256, shuffle=True, num_workers=8)
data_test_loader = DataLoader(data_test, batch_size=1024, num_workers=8)

# net = LeNet()
n_hidden = 20
net = MLP(n_hidden=n_hidden)


criterion = nn.CrossEntropyLoss()

net = net.cuda()
criterion = criterion.cuda()

# optimizer = optim.Adam(net.parameters(), lr=1e-1)
optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9)


cur_batch_win = None
cur_batch_win_opts = {
    'title': 'Epoch Loss Trace',
    'xlabel': 'Batch Number',
    'ylabel': 'Loss',
    'width': 1200,
    'height': 600,
}


def train(epoch):
    global cur_batch_win
    net.train()
コード例 #7
0
ファイル: train_paired.py プロジェクト: Kozon2015/adv-attack
# clf.avgpool = nn.AdaptiveAvgPool2d(1)
# for param in clf.parameters():
#     param.requires_grad = False
# clf.to(device)
# clf.eval()
# clf_norm = Normalization(IMAGENET_MEAN, IMAGENET_STD)
# clf_norm.to(device)
# clf_norm.eval()
"""fixed"""

net = MLP(n_layers=args.n_layers, width=args.width)
net.to(device)
net = nn.DataParallel(net)

criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.Adam(net.parameters(),
                       lr=args.lr,
                       betas=(args.momentum, 0.999),
                       weight_decay=args.wd)
# scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)

# ckpt = torch.load(args.checkpoint)
# net.load_state_dict(ckpt['model'])
# optimizer.load_state_dict(ckpt['optimizer'])
# train_loss = ckpt['train_loss']
# test_loss = ckpt['test_loss']
# test_acc = ckpt['test_acc']
# epochs = ckpt['epoch']

if not os.path.exists('logs/'):
    os.makedirs('logs/')
コード例 #8
0
def main():
	parser = argparse.ArgumentParser(description='Pytorch example: MNIST')
	parser.add_argument('--batchsize', '-b', type=int, default=100,
						help='Number of images in each mini-batch')
	parser.add_argument('--epoch', '-e', type=int, default=20,
						help='Number of sweeps over the training data')
	parser.add_argument('--frequency', '-f', type=int, default=-1,
						help='Frequency of taking a snapshot')
	parser.add_argument('--gpu', '-g', type=int, default=-1,
						help='GPU ID (negative value indicates CPU)')
	parser.add_argument('--out', '-o', default='result',
						help='Directory to output the result')
	parser.add_argument('--resume', '-r', default='',
						help='Resume the training from snapshot')
	parser.add_argument('--unit', '-u', type=int, default=1000,
						help='Number of units')
	args = parser.parse_args()

	print('GPU: {}'.format(args.gpu))
	print('# unit: {}'.format(args.unit))
	print('# Minibatch-size: {}'.format(args.batchsize))
	print('# epoch: {}'.format(args.epoch))
	print('')

	# Set up a neural network to train
	net = MLP(args.unit, 28*28, 10)
	# Load designated network weight 
	if args.resume:
		net.load_state_dict(torch.load(args.resume))
	# Set model to GPU
	if args.gpu >= 0:
		# Make a specified GPU current
		device = 'cuda:' + str(args.gpu)
		net = net.to(device)

	# Setup a loss and an optimizer
	criterion = nn.CrossEntropyLoss()
	optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

	# Load the MNIST

	transform = transforms.Compose(	[transforms.ToTensor()] )

	trainvalset = datasets.MNIST(root='./data', train=True,
										download=True, transform=transform)
	# Split train/val
	n_samples = len(trainvalset)
	trainsize = int(n_samples * 0.9)
	valsize = n_samples - trainsize
	trainset, valset = torch.utils.data.random_split(trainvalset, [trainsize, valsize])

	trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batchsize,
										shuffle=True, num_workers=2)
	valloader = torch.utils.data.DataLoader(valset, batch_size=args.batchsize,
										shuffle=True, num_workers=2)
	# Setup result holder
	x = []
	ac_train = []
	ac_val = []
	# Train
	for ep in range(args.epoch):  # Loop over the dataset multiple times

		running_loss = 0.0
		correct_train = 0
		total_train = 0
		correct_val = 0
		total_val = 0

		for i, data in enumerate(trainloader, 0):
			# Get the inputs; data is a list of [inputs, labels]
			inputs, labels = data
			if args.gpu >= 0:
				inputs = inputs.to(device)
				labels = labels.to(device)
			# Reshape the input 
			inputs = inputs.view(-1, 28*28)
			# Reset the parameter gradients
			optimizer.zero_grad()

			# Forward
			outputs = net(inputs)
			# Predict the label
			_, predicted = torch.max(outputs, 1)
			# Check whether estimation is right
			c = (predicted == labels).squeeze()
			for i in range(len(predicted)):
				correct_train += c[i].item()
				total_train += 1
			# Backward + Optimize
			loss = criterion(outputs, labels)
			loss.backward()
			optimizer.step()
			# Add loss
			running_loss += loss.item()

		# Report loss of the epoch
		print('[epoch %d] loss: %.3f' % (ep + 1, running_loss))

		# Save the model
		if (ep + 1) % args.frequency == 0:
			path = args.out + "/model_" + str(ep + 1)
			torch.save(net.state_dict(), path)

		# Validation
		with torch.no_grad():
			for data in valloader:
				images, labels = data
				if args.gpu >= 0:
					images = images.to(device)
					labels = labels.to(device)
				# Reshape the input
				images = images.view(-1, 28*28)
				# Forward
				outputs = net(images)
				# Predict the label
				_, predicted = torch.max(outputs, 1)
				# Check whether estimation is right
				c = (predicted == labels).squeeze()
				for i in range(len(predicted)):
					correct_val += c[i].item()
					total_val += 1

		# Record result
		x.append(ep+1)
		ac_train.append(100 * correct_train / total_train)
		ac_val.append(100 * correct_val / total_val)

	print('Finished Training')
	path = args.out + "/model_final"
	torch.save(net.state_dict(), path)

	# Draw graph
	fig = plt.figure()
	ax = fig.add_subplot(1, 1, 1)
	ax.plot(x, ac_train, label='Training')
	ax.plot(x, ac_val, label='Validation')
	ax.legend()
	ax.set_xlabel("Epoch")
	ax.set_ylabel("Accuracy [%]")
	ax.set_ylim(80, 100)

	plt.savefig(args.out + '/accuracy_mnist_mlp.png')