コード例 #1
0
def train_ae(dataloaders_dict, device=0, num_epochs=5):

    ae = AutoEncoder()
    ae = ae.to(device)
    distance = nn.MSELoss()
    optimizer = optim.Adam(ae.parameters(), lr=0.001)

    for epoch in range(num_epochs):
        print('\nEpoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)
        for phase in ["train", "val", "test"]:

            if phase == "train":
                ae.train()  # Set ae to training mode
            else:
                ae.eval()

            for data in dataloaders_dict[phase]:
                inputs, _ = data
                inputs = inputs.to(device)
                with torch.set_grad_enabled(phase == "train"):
                    output = ae(inputs)
                    loss = distance(output, inputs)
                    optimizer.zero_grad()
                    if phase == "train":
                        loss.backward()
                        optimizer.step()
            print("{} Loss: {:.4f}".format(phase, loss.item()))
    return ae
コード例 #2
0
global args
args = parser.parse_args()

'''set the training gpu''' 
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpuid)

'''load_data'''
Load_data = load_data()
test_data,test_gt = Load_data.test()

'''init model'''
autoencoder = AutoEncoder()
autoencoder.load_state_dict(torch.load(args.load_weight_dir)) #load pre-train
autoencoder.cuda()
autoencoder.eval()

loss_func = nn.L1Loss() 
loss = 0

with torch.no_grad(): #it can save the memory,prevent it allocated,we dont need to keep the grad during the evualation
    for index in range(0,test_data.size()[0],50):
        x_in = torch.tensor(test_data[index:index+49,:,:,:], dtype=torch.float32).cuda() 
        decoded = autoencoder(x_in)
        loss = loss+loss_func(decoded, x_in)   # L1 loss 
        '''pick some sample to check vision performance'''
        plt.title('autoencoder input')
        plt.imshow(x_in[0,0,:,:].data.cpu().numpy(), cmap='gray')
        plt.show()         
        plt.title('autoencoder outoput')
        plt.imshow(decoded[0,0,:,:].data.cpu().numpy(), cmap='gray')
    torch.save(model.state_dict(), 'conv_autoencoder_weight.pt')
    torch.save(optimizer.state_dict(), 'conv_autoencoder_optimizer.pt')


data = []
print('Reading Images')
for i in tqdm(range(len(os.listdir(image_filepath)[0:3]))):
    img_name = os.listdir(image_filepath)[i]
    img_name = image_filepath + img_name
    img = plt.imread(img_name)
    data.append(img)

CUDA = torch.cuda.is_available()

model = AutoEncoder()
model.eval()

model.load_state_dict(
    torch.load('conv_autoencoder_weight.pt', map_location='cpu'))
# optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay =1e-5)
# optimizer.load_state_dict(torch.load('conv_autoencoder_optimizer.pt'))

if CUDA:
    model = AutoEncoder().cuda()

for epoch in (range(num_epochs)):
    dataset = random.sample(data, batch_size)
    for image in dataset:
        image, orig_im, dim, w, h = prep_image(image, 256)
        image = Variable(torch.tensor(image))
        img = image.view(1, image.shape[2], image.shape[0], image.shape[1])