Exemplo n.º 1
0
def train():
    with fluid.dygraph.guard():
        epoch_num = train_parameters["num_epochs"]
        net = DenseNet("densenet", layers=121, dropout_prob=train_parameters['dropout_prob'],
                       class_dim=train_parameters['class_dim'])
        optimizer = optimizer_rms_setting(net.parameters())
        file_list = os.path.join(train_parameters['data_dir'], "train.txt")
        train_reader = paddle.batch(reader.custom_image_reader(file_list, train_parameters['data_dir'], 'train'),
                                    batch_size=train_parameters['train_batch_size'],
                                    drop_last=True)
        test_reader = paddle.batch(reader.custom_image_reader(file_list, train_parameters['data_dir'], 'val'),
                                   batch_size=train_parameters['train_batch_size'],
                                   drop_last=True)
        if train_parameters["continue_train"]:
            model, _ = fluid.dygraph.load_dygraph(train_parameters["save_persistable_dir"])
            net.load_dict(model)

        best_acc = 0
        for epoch_num in range(epoch_num):

            for batch_id, data in enumerate(train_reader()):
                dy_x_data = np.array([x[0] for x in data]).astype('float32')
                y_data = np.array([x[1] for x in data]).astype('int')
                y_data = y_data[:, np.newaxis]

                img = fluid.dygraph.to_variable(dy_x_data)
                label = fluid.dygraph.to_variable(y_data)
                label.stop_gradient = True
                t1 = time.time()
                out, acc = net(img, label)
                t2 = time.time()
                forward_time = t2 - t1
                loss = fluid.layers.cross_entropy(out, label)
                avg_loss = fluid.layers.mean(loss)
                # dy_out = avg_loss.numpy()
                t3 = time.time()
                avg_loss.backward()
                t4 = time.time()
                backward_time = t4 - t3
                optimizer.minimize(avg_loss)
                net.clear_gradients()
                # print(forward_time, backward_time)

                dy_param_value = {}
                for param in net.parameters():
                    dy_param_value[param.name] = param.numpy

                if batch_id % 40 == 0:
                    logger.info("Loss at epoch {} step {}: {}, acc: {}".format(epoch_num, batch_id, avg_loss.numpy(),
                                                                               acc.numpy()))

            net.eval()
            epoch_acc = eval_net(test_reader, net)
            net.train()
            if epoch_acc > best_acc:
                fluid.dygraph.save_dygraph(net.state_dict(), train_parameters["save_persistable_dir"])
                fluid.dygraph.save_dygraph(optimizer.state_dict(), train_parameters["save_persistable_dir"])
                best_acc = epoch_acc
                logger.info("model saved at epoch {}, best accuracy is {}".format(epoch_num, best_acc))
        logger.info("Final loss: {}".format(avg_loss.numpy()))
Exemplo n.º 2
0
def eval():

    file_list = os.path.join(train_parameters['data_dir'], "eval.txt")
    with fluid.dygraph.guard():
        model, _ = fluid.dygraph.load_dygraph(
            train_parameters["save_persistable_dir"])
        net = DenseNet("densenet",
                       layers=121,
                       dropout_prob=train_parameters['dropout_prob'],
                       class_dim=train_parameters['class_dim'])
        net.load_dict(model)
        net.eval()
        test_reader = paddle.batch(
            reader.custom_image_reader(file_list,
                                       reader.train_parameters['data_dir'],
                                       'val'),
            batch_size=train_parameters['train_batch_size'],
            drop_last=True)
        accs = []
        for batch_id, data in enumerate(test_reader()):
            dy_x_data = np.array([x[0] for x in data]).astype('float32')
            y_data = np.array([x[1] for x in data]).astype('int')
            y_data = y_data[:, np.newaxis]

            img = fluid.dygraph.to_variable(dy_x_data)
            label = fluid.dygraph.to_variable(y_data)
            label.stop_gradient = True

            out, acc = net(img, label)
            lab = np.argsort(out.numpy())
            #print(batch_id, label.numpy()[0][0], lab[0][-1])
            accs.append(acc.numpy()[0])
    print(np.mean(accs))
Exemplo n.º 3
0
def infer():
    with fluid.dygraph.guard():
        net = DenseNet("densenet", layers = 121, dropout_prob = train_parameters['dropout_prob'], class_dim = train_parameters['class_dim'])
        # load checkpoint
        model_dict, _ = fluid.dygraph.load_dygraph(train_parameters["save_persistable_dir"])
        net.load_dict(model_dict)
        print("checkpoint loaded")

        # start evaluate mode
        net.eval()
        
        label_dic = train_parameters["label_dict"]
        label_dic = {v: k for k, v in label_dic.items()}
        
        img_path = train_parameters['infer_img']
        img = read_img(img_path)
        
        results = net(fluid.dygraph.to_variable(img))
        lab = np.argsort(results.numpy())
        print("image {} Infer result is: {}".format(img_path, label_dic[lab[0][-1]]))
Exemplo n.º 4
0
import torch.optim as optim
import torch.utils.data as data

import imgaug  # https://github.com/aleju/imgaug
from imgaug import augmenters as iaa

import misc
import dataset
from net import DenseNet
from config import Config
import cv2

device = 'cuda'

net = DenseNet(3, 2)
net.eval()  # infer mode

viable_saved_state = torch.load('log/v1.0.0.1/model_net_46.pth')

new_saved_state = {}

for key, value in viable_saved_state.items():
    new_saved_state[key[7:]] = value

net.load_state_dict(new_saved_state)
net = torch.nn.DataParallel(net).to(device)

wsi_img = openslide.OpenSlide('01_01_0138.svs')
wsi_w, wsi_h = wsi_img.level_dimensions[0]

prediction = np.zeros((wsi_h, wsi_w))
Exemplo n.º 5
0
		if state['best_loss'] is None:
			continue
		print(state['best_loss'])

		if state['best_loss'] > 0.50 or state['best_loss'] < 0.47:
			continue
	if 'best_acc' in state:
		print(state['best_acc'])
		if state['best_acc'] is None or state['best_acc'] < 85.5:
			continue
	try:
		net.load_state_dict(state['state_dict'])
	except:
		continue
	net.cuda()
	net.eval()
	#eval()

import pickle
for ID in final_dict:
	pred = final_dict[ID]
	#pickle.dump(pred, file=open("den_pred", 'w'))
	#pred = pred_dict[ID]
	subm['predicted'][ID] = np.argmax(pred) + 1

'''
for (data, ids) in test_dataloader:
	data = Variable(data).cuda()
	preds = net(data)
	preds = F.softmax(preds, dim=1).cpu().data.numpy()
	for _ in range(preds.shape[0]):