Esempio n. 1
0
    'MNIST_data',
    train=False,
    download=True,
    transform=transforms.Compose([
        transforms.Lambda(transform_permute),
        transforms.ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, ))
    ])),
                                           shuffle=False,
                                           **data_params)

# Model
model = NeuralNetwork(**nn_params)
model = model.to(model.device)
model.load("modelA.ckpt")
prev_opt_thetas = deepcopy(list(model.parameters()))

optimizer = optim.Adam(model.parameters(), **opt_params)
base_loss_fn = torch.nn.CrossEntropyLoss(reduction="elementwise_mean")

# Generate permute indices
ind_permute = np.arange(0, 28)
np.random.seed(0)
np.random.shuffle(ind_permute)
np.save("permuteB.npy", ind_permute)

# Load the previous FIM
fishers_cpu = torch.load("fisherA.pth")
fishers = []
for fisher in fishers_cpu:
    fishers.append(fisher.to(model.device))
Esempio n. 2
0
    is_inception = False
    model_ft = NeuralNetwork(classes, image_size).to(device)
elif model_type == 'Inception3':
    is_inception = True
    model_ft = Inception3(num_classes=len(classes)).to(device)
elif model_type == 'Inception_transfer':
    is_inception = False
    model_ft = Inceptionnet(classes, aux_logits=True).to(device)
elif model_type == 'Resnet':
    model_ft = Resnet(classes).to(device)
elif model_type == 'Alexnet':
    model_ft = Alexnet(classes).to(device)
# print("model:", model_ft)
#train model
loss_fn = nn.CrossEntropyLoss()
optimizer_ft = torch.optim.SGD(model_ft.parameters(), lr=1e-3)

# set model: options [model, model_ft]
train_model = model_ft
train_optimizer = optimizer_ft


def train(dataloader, model, loss_fn, optimizer, is_inception=False):
    size = len(dataloader.dataset)
    print("train size", size)
    for batch, (X, y) in enumerate(dataloader):
        X, y = X.to(device), y.to(device)

        # compute prediction error
        if is_inception:
            pred, aux_outputs = model(X)
                                            **data_params)

testA_loader = torch.utils.data.DataLoader(datasets.MNIST(
    'MNIST_data',
    train=False,
    download=True,
    transform=transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])),
                                           shuffle=False,
                                           **data_params)

# Model
model = NeuralNetwork(**nn_params)
model = model.to(model.device)
optimizer = optim.Adam(model.parameters(), **opt_params)
loss_fn = torch.nn.CrossEntropyLoss(reduction="elementwise_mean")

# Create callbacks
checkpoint = CheckPoint(model, "modelA.ckpt")
earlystop = EarlyStopping(**earlystop_params)

# Train and evaluate the model
flg_stop = False
for epoch in range(1, params["n_epochs"] + 1):
    print("\n[EPOCH %d]" % (epoch))
    loss_train = train(model,
                       trainA_loader,
                       optimizer,
                       loss_fn,
                       epoch,
Esempio n. 4
0
train_val = torch.load('./cnn_train_feature/train_cnn_val.pt').type(
    torch.LongTensor)
valid_val = torch.load('./cnn_train_feature/valid_cnn_val.pt').type(
    torch.LongTensor)
print("train_features", train_features.shape)
print("train_val", train_val.shape)
print("valid_features", valid_features.shape)
print("valid_val", valid_val.shape)

# model, optimzer, loss function
device = torch.device("cuda") if torch.cuda.is_available() else torch.device(
    "cpu")
feature_size = 2048
model = NeuralNetwork(feature_size).to(device)
model = torch.load("./best_cnn.pth")
optimizer = torch.optim.Adam(model.parameters(), lr=0.000025)
loss_function = nn.CrossEntropyLoss()

# some training parameters
batch_size = 64
num_epoch = 100
total_length = len(train_features)
max_accuracy = 0
logfile = open('log.txt', 'w')
now = datetime.datetime.now()
logfile.writelines("start training at:" + str(now) + "\n")
logfile.flush()

# start training
model.train()
train_loss = []