Exemplo n.º 1
0
def main():
    device = torch.device('cpu')
    transform=transforms.Compose([transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))]) # MNIST-specific values
    train_set = datasets.MNIST('mnist_data', train=True, transform=transform)
    test_set = datasets.MNIST('mnist_data', train=False, transform=transform)
    train_loader = torch.utils.data.DataLoader(train_set, batch_size=BATCH_SIZE)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=TEST_BATCH_SIZE)

    model = Model()
    model = model.to(device)
    optimizer = torch.optim.Adadelta(model.parameters(), lr=LEARNING_RATE)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, gamma=GAMMA, step_size=1)
    for i in range(EPOCHS):
        train(model, train_loader, optimizer, device)
        scheduler.step()

    test(model, test_loader, device)
    torch.save(model.state_dict(), "parameters.pt")
Exemplo n.º 2
0
def fine_tuning(target_image, init_pos):
    image_size = target_image.shape[0]

    mesh = load_moon_mesh(OBJ_filename)
    renderer = build_renderer(image_size)

    model = Model(mesh=mesh,
                  renderer=renderer,
                  image_ref=target_image,
                  init_pos=init_pos).to(DEVICE)

    optimizer = torch.optim.Adam(model.parameters(), lr=LR)
    best_loss = 1000000
    best_position = [0, 0, 0]

    for i in range(EPOCH):
        optimizer.zero_grad()
        loss = model()
        loss.backward()
        optimizer.step()

        if best_loss > loss.item():
            best_loss = copy.deepcopy(loss.item())
            best_position = copy.deepcopy([
                model.dist.cpu().item(),
                model.elev.cpu().item(),
                model.azim.cpu().item(),
                model.p.cpu()[0][0].item(),
                model.p.cpu()[0][1].item(),
                model.p.cpu()[0][2].item(),
                model.u.cpu()[0][0].item(),
                model.u.cpu()[0][1].item(),
                model.u.cpu()[0][2].item()
            ])
            # print("Best Loss:{}, Best Pos:{}".format(best_loss, best_position))

        if loss.item() < 0.05:  # ssim Loss
            break

    return best_position
Exemplo n.º 3
0
                                          num_workers=1,
                                          pin_memory=True)

writer = SummaryWriter()

device = torch.device('cuda:0')
model = Model()
model = model.to(device)
imsize = 256, 256

print(f"TrainSize = {len(train)}")
print(f"Raw1Size = {len(raw1)}")
print(f"Raw2Size = {len(raw2)}")
print(f"Raw3Size = {len(raw3)}")

optimizer = torch.optim.Adam(model.parameters(), lr=1.0e-5)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 4, 0.8)

model.train()

for epoch in range(50):
    print(f"------ EPOCH {epoch} ------")
    # -------------------------- TRAIN --------------------------
    train_losses = []
    for i_batch, sample_batched in tqdm(enumerate(train_loader)):
        optimizer.zero_grad()

        pred_images, next_images, event_images, _ = sample_batched

        pred_images = pred_images.to(device)
        next_images = next_images.to(device)
Exemplo n.º 4
0
def main(args):
    #------------ start to prepare dataset ------------'
    tr_dataset = Dataset(list_dir=args.train_dir, cv=0)
    cv_dataset = Dataset(list_dir=args.valid_dir, cv=1)

    tr_loader = DataLoader(tr_dataset,
                           batch_size=args.batch_size,
                           shuffle=True,
                           num_workers=0)
    cv_loader = DataLoader(cv_dataset,
                           batch_size=2,
                           shuffle=False,
                           num_workers=0)
    #'------------------ model -----------------------'
    model = Model(kernel_size=3, stride=1, dropout=0.1)
    print(model)
    model.apply(weight_init)

    if args.use_cuda == True and torch.cuda.is_available():
        device = torch.device("cuda")
        model = torch.nn.DataParallel(model)
    else:
        device = torch.device('cpu')

    model = model.to(device=device)

    # optimizer
    if args.optimizer == 'RMSprop':
        optimizier = torch.optim.RMSprop(model.parameters(), lr=args.lr)
    elif args.optimizer == 'Adam':
        optimizier = torch.optim.Adam(model.parameters(), lr=args.lr)
    else:
        print("Not support optimizer")
        return RuntimeError('Unrecognized optimizer')

    # Loss
    # Loss = torch.nn.MSELoss()

    train_total_loss = []
    cv_total_loss = []
    best_loss = float("inf")
    no_improve_nums = 0
    # ---------------------------------- Training ------------------------
    for epoch in range(0, args.epochs):
        model.train()
        tr_loss = torch.tensor(0.0)
        for i, (data) in enumerate(tr_loader):
            x, y = data
            x = x.to(device=device, dtype=torch.float32)
            y = y.to(device=device, dtype=torch.long)
            est = model(x)
            loss = torch.nn.functional.cross_entropy(input=est, target=y)
            # loss = Loss(input=est, target=y)
            tr_loss += loss
            optimizier.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(parameters=model.parameters(),
                                           max_norm=5)
            optimizier.step()

        tr_loss = tr_loss / i
        train_total_loss.append(tr_loss.cpu().detach().numpy())
        print('-' * 80)
        print('Epoch %d End train with loss: %.3f' % (epoch, tr_loss))
        print('-' * 80)

        # ---------------------------- validation  ---------------------------
        model.eval()
        cv_loss = torch.tensor(0.0)
        with torch.no_grad():
            for j, (data) in enumerate(cv_loader):
                x, y = data
                x = x.to(device=device, dtype=torch.float)
                y = y.to(device=device, dtype=torch.long)

                est = model(x)
                loss = torch.nn.functional.cross_entropy(input=est, target=y)
                # loss = Loss(input=est, target=y)
                cv_loss += loss
                if j % 5 == 0:
                    print('Epoch %d, Iter: %d,  Loss: %.3f' % (epoch, j, loss))
            cv_loss = cv_loss / j
            cv_total_loss.append(cv_loss.cpu().detach().numpy())
            print('-' * 80)

            if best_loss > cv_loss:
                best_loss = cv_loss
                torch.save(
                    model.module.serialize(model.module,
                                           optimizier,
                                           epoch + 1,
                                           tr_loss=tr_loss,
                                           cv_loss=cv_loss),
                    args.save_folder / args.save_name)
                print("Find best validation model, saving to %s" %
                      str(args.save_folder / args.save_name))
                no_improve_nums = 0
            else:
                print('no improve ...')
                no_improve_nums += 1
                if no_improve_nums >= 3:
                    optim_state = optimizier.state_dict()
                    optim_state['param_groups'][0][
                        'lr'] = optim_state['param_groups'][0]['lr'] / 2.0
                    optimizier.load_state_dict(optim_state)
                    print('Reduce learning rate to lr: %.8f' %
                          optim_state['param_groups'][0]['lr'])
                if no_improve_nums >= 6:
                    print('No improve for 6 epochs, stopping')
                    break
            print('Epoch %d End validation with loss: %.3f, best loss: %.3f' %
                  (epoch, cv_loss, best_loss))
            print('-' * 80)
Exemplo n.º 5
0
checkpoint = torch.load(resume)
model.load_state_dict(checkpoint['state_dict'])

# final_convname = 'clasifier'

features_blobs = []


def hook_feature(module, input, output):
    features_blobs.append(output.data.cpu().numpy())


# model._modules.get(final_convname).register_forward_hook(hook_feature)
# print(model.state_dict())
# get the softmax weight
params = list(model.parameters())

weight_softmax = np.squeeze(params[-2].data.numpy())


def returnCAM(feature_conv, weight_softmax, class_idx):
    # generate the class activation maps upsample to 256x256
    size_upsample = (256, 256)
    bz, nc, h, w = feature_conv.shape
    output_cam = []
    for idx in class_idx:
        cam = weight_softmax[idx].dot(feature_conv.reshape((nc, h * w)))
        cam = cam.reshape(h, w)
        cam = cam - np.min(cam)
        cam_img = cam / np.max(cam)
        cam_img = np.uint8(255 * cam_img)