Exemplo n.º 1
0
def m_embeding():
    M = np.loadtxt(mid_result_path + "M.txt", delimiter=',')
    EPOCH = 10
    BATCH_SIZE = 64
    LR = 0.005
    autoencoder = AutoEncoder(M.shape[1])
    M = torch.tensor(M, dtype=torch.float32)
    M_train = Data.DataLoader(dataset=M, batch_size=BATCH_SIZE, shuffle=True)

    optimizer = torch.optim.Adam(autoencoder.parameters(), lr=LR)
    loss_func = nn.MSELoss()
    for epoch in range(EPOCH):
        for step, x in enumerate(M_train):
            # print(x)
            b_x = Variable(x)#
            b_y = Variable(x)
            encoded, decoded = autoencoder(b_x)
            loss = loss_func(decoded, b_y)
            print(loss)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
    encoded_data, _ = autoencoder(Variable(M))
    np.savetxt(mid_result_path + "m_coder.txt", encoded_data.detach().numpy(), delimiter=',', fmt='%.4f')
    print(encoded_data)
Exemplo n.º 2
0
    # dataset = Cifar10Dataset(configuration.batch_size, dataset_path,
    #                           configuration.shuffle_dataset)  # Create an instance of CIFAR10 dataset

    print(f"batch_size: {configuration.batch_size}")

    dataset = ArnoDataset(batch_size=configuration.batch_size,
                          path="../../../datasets/arno_v1",
                          shuffle_dataset=configuration.shuffle_dataset,
                          num_workers=6,
                          im_size=128)

    auto_encoder = AutoEncoder(device, configuration).to(
        device)  # Create an AutoEncoder model using our GPU device

    optimizer = optim.Adam(auto_encoder.parameters(),
                           lr=configuration.learning_rate,
                           amsgrad=True)  # Create an Adam optimizer instance
    trainer = Trainer(device, auto_encoder, optimizer,
                      dataset)  # Create a trainer instance
    trainer.train(configuration.num_training_updates, results_path,
                  args)  # Train our model on the CIFAR10 dataset
    auto_encoder.save(results_path + os.sep +
                      args.model_name)  # Save our trained model
    trainer.save_loss_plot(results_path + os.sep +
                           args.loss_plot_name)  # Save the loss plot

    evaluator = Evaluator(
        device, auto_encoder,
        dataset)  # Create en Evaluator instance to evaluate our trained model
    evaluator.reconstruct()  # Reconstruct our images from the embedded space
Exemplo n.º 3
0
                            size=[5] * num_step_message_passing,
                            num_hops=num_step_message_passing,
                            batch_size=batch_size,
                            bipartite=False,
                            shuffle=True)
val_loader = NeighborSampler(val_data,
                             size=[5] * num_step_message_passing,
                             num_hops=num_step_message_passing,
                             batch_size=batch_size,
                             bipartite=False)
ts_loader = NeighborSampler(ts_data,
                            size=[5] * num_step_message_passing,
                            num_hops=num_step_message_passing,
                            batch_size=batch_size,
                            bipartite=False)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)


def train():
    l = []
    tr_x, tr_edge_index, tr_edge_attr = tr_data.x.to(
        dev), tr_data.edge_index.to(dev), tr_data.edge_attr.to(dev)
    model.train()

    for sub_data in tr_loader():
        sub_data = sub_data.to(dev)
        optimizer.zero_grad()
        z = tr_x.new_zeros(tr_x.size(0), node_hidden_dim)
        z[sub_data.n_id] = model.encode(tr_x[sub_data.n_id],
                                        sub_data.edge_index,
                                        tr_edge_attr[sub_data.e_id])
Exemplo n.º 4
0
def main(args=None):
	parser = argparse.ArgumentParser(description='Simple training script.')
	parser.add_argument('--cls_id', help='class id', type=int)
	parser.add_argument('--version', help='model version', type=float)
	parser.add_argument('--gamma', help='gamma for the SoftL1Loss', type=float, default=9.0)
	parser.add_argument('--lr', help='lr for optimization', type=float, default=1e-4)
	parser.add_argument('--epoches', help='num of epoches for optimization', type=int, default=4)
	parser.add_argument('--resume_epoch', help='trained model for resume', type=int, default=0)
	parser.add_argument('--batch_size', help='batch size for optimization', type=int, default=10)
	parser.add_argument('--checkpoints', help='checkpoints path', type=str, default='voc_checkpoints')
	parser = parser.parse_args(args)

	cls_name = classes[parser.cls_id]
	parser.checkpoints = '_'.join([parser.checkpoints,cls_name])
	if not os.path.isdir(parser.checkpoints):
		os.mkdir(parser.checkpoints)
	print('will save checkpoints in '+parser.checkpoints)
	cls_dir = "../context_profile/voc_detection_{:s}_p10/"\
		.format(cls_name)
	batch_size = parser.batch_size
	print('[data prepare]....')
	dataloader_train = DataLoader(Fetch('train_benign', root_dir=cls_dir), batch_size=batch_size, num_workers=2, shuffle=True)

	print('[model prepare]....')
	use_gpu = torch.cuda.device_count()>0

	model = AutoEncoder(parser.gamma)
	if use_gpu:
		model = torch.nn.DataParallel(model).cuda()
	optimizer = torch.optim.Adam(model.parameters(), lr=parser.lr)
	scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2, verbose=True)
	if parser.resume_epoch > 0 :
		checkpoint_name = os.path.join(parser.checkpoints, 'model_{:1.1f}_epoch{:d}.pt'.format(parser.version, parser.resume_epoch))
		if not os.path.isfile(checkpoint_name):
			raise ValueError('No checkpoint file {:s}'.format(checkpoint_name))
		model.load_state_dict(torch.load(checkpoint_name))
		print('model loaded from {:s}'.format(checkpoint_name))

	print('[model training]...')
	loss_hist = []
	epoch_loss = []
	num_iter = len(dataloader_train)
	for epoch_num in range(parser.resume_epoch, parser.epoches):
		model.train()
		for iter_num, sample in enumerate(dataloader_train):
			if True:#try:
				optimizer.zero_grad()
				if use_gpu:
					data = sample['data'].cuda().float()
				else:
					data = sample['data'].float()
					
				loss = model(data).mean()
				if bool(loss==0):
					continue 
				loss.backward()
				torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
				optimizer.step()
				epoch_loss.append(float(loss))
				loss_hist.append(float(loss))
				if iter_num % 30 == 0:
					print('Epoch {:d}/{:d} | Iteration: {:d}/{:d} | loss: {:1.5f}'.format(
						epoch_num+1, parser.epoches, iter_num+1, num_iter, float(loss)))
				if iter_num % 3000 == 0:
					scheduler.step(np.mean(epoch_loss))
					epoch_loss = []
		if epoch_num < 1:
			continue
		checkpoint_name = os.path.join(parser.checkpoints, 'model_{:1.1f}_epoch{:d}.pt'.format(parser.version, epoch_num+1))
		torch.save(model.state_dict(), checkpoint_name)
		print('Model saved as {:s}'.format(checkpoint_name))

	np.save('loss_hist.npy', loss_hist)
Exemplo n.º 5
0
    parser.add_argument('--model_name', nargs='?', default='model.pth', type=str, help='The file name of trained model')
    parser.add_argument('--original_images_name', nargs='?', default='original_images.png', type=str, help='The file name of the original images used in evaluation')
    parser.add_argument('--validation_images_name', nargs='?', default='validation_images.png', type=str, help='The file name of the reconstructed images used in evaluation')
    args = parser.parse_args()

    # Dataset and model hyperparameters
    configuration = Configuration.build_from_args(args)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Use GPU if cuda is available

    # Set the result path and create the directory if it doesn't exist
    results_path = '..' + os.sep + args.results_path
    if not os.path.isdir(results_path):
        os.mkdir(results_path)
    
    dataset_path = '..' + os.sep + args.data_path

    dataset = Cifar10Dataset(configuration.batch_size, dataset_path, configuration.shuffle_dataset) # Create an instance of CIFAR10 dataset
    auto_encoder = AutoEncoder(device, configuration).to(device) # Create an AutoEncoder model using our GPU device

    optimizer = optim.Adam(auto_encoder.parameters(), lr=configuration.learning_rate, amsgrad=True) # Create an Adam optimizer instance
    trainer = Trainer(device, auto_encoder, optimizer, dataset) # Create a trainer instance
    trainer.train(configuration.num_training_updates) # Train our model on the CIFAR10 dataset
    auto_encoder.save(results_path + os.sep + args.model_name) # Save our trained model
    trainer.save_loss_plot(results_path + os.sep + args.loss_plot_name) # Save the loss plot

    evaluator = Evaluator(device, auto_encoder, dataset) # Create en Evaluator instance to evaluate our trained model
    evaluator.reconstruct() # Reconstruct our images from the embedded space
    evaluator.save_original_images_plot(results_path + os.sep + args.original_images_name) # Save the original images for comparaison purpose
    evaluator.save_validation_reconstructions_plot(results_path + os.sep + args.validation_images_name) # Reconstruct the decoded images and save them