コード例 #1
0
def prediction(model_fp, input_fp, output_fp, limit):
    model = FCN()
    model.load_state_dict(tor.load(model_fp))
    model.cuda()

    dir_size = len(os.listdir(input_fp))
    limit = limit if limit else float("inf")

    for i in range(dir_size):
        if i < limit:
            file_name = os.path.join(input_fp, "{:0>4}_sat.jpg".format(i))
            img = plt.imread(file_name)
            img = np.moveaxis(img, 2, 0)
            img = tor.FloatTensor(np.array([img]))
            img_var = Variable(img).type(tor.FloatTensor).cuda()
            pred_img = model(img_var)
            pred_img = tor.max(pred_img, 1)[1]
            pred_img = pred_img.cpu().data.numpy()
            pred_img = np.moveaxis(pred_img, 0, 2)
            output_img = img_recovery(pred_img)
            scipy.misc.imsave(
                os.path.join(output_fp, "{:0>4}_mask.png".format(i)),
                output_img)

        else:
            break
コード例 #2
0
            noised = hisEqulColor(noised) / 255.       # noised -> 64x64x3
            Val_train_batch[i] = noised
            Val_ground_truth[i] = 1 - origin / 255.
            Val_label_batch[i] = Label[i][0]
    global generated_size
    for i in range(1000):     # Gets the image, original and the label from generate method in synth.py module
        id = generated_size % data_size
        img, origin, label = G.generate()     #G is an object of Generator inside the synth file
        origin = 255. - cv2.cvtColor(origin, cv2.COLOR_RGB2GRAY)
        Train_batch[id] = hisEqulColor(img) / 255.   #G.generate() is used to get the training data
        Ground_truth[id] = origin / 255.
        Label_batch[id] = label
        generated_size += 1
        
if use_cuda:
    Decoder = Decoder.cuda()
    Encoder = Encoder.cuda()

def train_bezier(x, img):       #Takes the ground truth and 
    Decoder.train()
    x = x.reshape(-1, 9)
    bezier = []
    for i in range(x.shape[0]):        
        bezier.append(draw(x[i]))
    bezier = torch.tensor(bezier).float()
    if use_cuda:
        bezier = bezier.cuda()
    optimizerD.zero_grad()
    loss = criterion(img, bezier)
    loss.backward()
    optimizerD.step()
コード例 #3
0
        batch_size=1,
        shuffle=False,
        num_workers=2)

    numClass = 8
    numPlanes = 32
    levels = 5
    levelDepth = 2
    kernelSize = 3

    model = FCN(numPlanes, levels, levelDepth, numClass, kernelSize, 0.1)

    indices = []
    mapLoc = None if haveCuda else {'cuda:0': 'cpu'}
    if haveCuda:
        model = model.cuda()

    criterion = CrossEntropyLoss2d()

    epochs = 200
    lr = 1e-1
    weight_decay = 1e-3
    momentum = 0.5
    patience = 20

    optimizer = torch.optim.SGD([
        {
            'params': model.parameters()
        },
    ],
                                lr=lr,
コード例 #4
0
def train(data_loader, model_index, x_eval_train, y_eval_train):
    ### Model Initiation
    fcn = FCN()
    #print (fcn.b_1_conv_1[0].weight.data)

    d = tor.load("./models/vgg16_pretrained.pkl")
    fcn.vgg16_load(d)
    #d = tor.load("./models/fcn_model_1_1.pkl")
    #fcn.load_state_dict(d)
    fcn.cuda()
    #loss_func = tor.nn.CrossEntropyLoss(weight=w)
    loss_func = tor.nn.CrossEntropyLoss()
    #loss_func = tor.nn.MSELoss()
    #optim = tor.optim.SGD(fcn.parameters(), lr=LR, momentum=MOMENTUM)
    optim1 = tor.optim.Adam(fcn.b_6_conv_1.parameters(), lr=LR)

    optim2 = tor.optim.Adam(fcn.b_6_conv_2.parameters(), lr=LR)
    optim3 = tor.optim.Adam(fcn.b_6_conv_3.parameters(), lr=LR)
    optim4 = tor.optim.Adam(fcn.b_7_trans_1.parameters(), lr=LR)
    optim = tor.optim.Adam(fcn.parameters(), lr=LR)
    ### Training
    for epoch in range(EPOCH):
        print("|Epoch: {:>4} |".format(epoch + 1), end="")

        ### Training
        for step, (x_batch, y_batch) in enumerate(data_loader):
            x = Variable(x_batch).type(tor.FloatTensor).cuda()
            y = Variable(y_batch).type(tor.LongTensor).cuda()

            pred = fcn(x)
            optim1.zero_grad()
            optim2.zero_grad()
            optim3.zero_grad()
            optim4.zero_grad()
            optim.zero_grad()
            loss = loss_func(pred, y)
            loss.backward()
            optim1.step()
            optim2.step()
            optim3.step()
            optim4.step()
        print(pred[:2])
        print(tor.max(pred[:5], 1)[1])
        ### Evaluation
        loss = float(loss.data)
        acc = evaluate(fcn, x_eval_train, y_eval_train)

        print("|Loss: {:<8} |Acc: {:<8}".format(loss, acc))

        ### Save model
        if epoch % RECORD_MODEL_PERIOD == 0:
            tor.save(
                fcn.state_dict(),
                os.path.join(MODEL_ROOT,
                             "fcn_model_{}_{}.pkl".format(model_index, epoch)))

        ### Record
        record_data = dict()
        if epoch == 0:
            record_data["model_name"] = "fcn_model_{}.pkl".format(model_index)
            record_data["data_size"] = AVAILABLA_SIZE
            record_data["batch_size"] = BATCHSIZE
            record_data["decay"] = str((LR_STEPSIZE, LR_GAMMA))
            record_data["lr_init"] = float(optim1.param_groups[0]["lr"])
            record_data["lr"] = float(optim1.param_groups[0]["lr"])
            record_data["record_epoch"] = RECORD_MODEL_PERIOD
            record_data["loss"] = loss
            record_data["acc"] = acc
        else:
            record_data["model_name"] = "fcn_model_{}.pkl".format(model_index)
            record_data["lr"] = float(optim1.param_groups[0]["lr"])
            record_data["loss"] = loss
            record_data["acc"] = acc

        record(RECORD_FP, record_data)
コード例 #5
0
logger.addHandler(console)
def weights_init(m):
    if isinstance(m, nn.Conv2d):
        torch.nn.init.normal(m.weight.data,mean=0,std=1)
        torch.nn.init.normal(m.bias.data,mean=0,std=1)

with open("/home/yuchen/Programs/cancer-prognosis/best.txt", 'r') as file:
    best = float(file.readline())
    epoch0 = int(file.readline())
print("Last Train: accu %f , epoch0 %d" % (best, epoch0))
try:
    net = torch.load('/home/yuchen/Programs/cancer-prognosis/seg_model.pkl')
except:
    net = FCN()
    net.apply(weights_init)
net.cuda()


optimizer = torch.optim.Adam(net.parameters(),lr=LR, weight_decay=0.5)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.9)

loss_func = nn.NLLLoss2d(weight=torch.FloatTensor([1,8000]).cuda())

data_loder = DataLoader()
zeros = np.zeros((512,512))
for epoch in range(epoch0, epoch0+EPOCH):
    scheduler.step()
    train_step = 0
    test_step = 0
    train_loss = 0
    test_loss = 0