tumorSlice=False,
                  path='./data/Datalist-T2-bei3-withMask.csv',
                  dataAug=[False, False, False])

# Validate Train
acc_val = []
Preds = []
Scores = []
Fts = []
n_batch = dataset.sampleNum // BATCH_SIZE

model.eval()
for batch_i in range(0, n_batch):
    #for batch_i in range(0,1):
    print(batch_i)
    X_, y_ = dataset.generateDataBatch(batch_i, batchsize=BATCH_SIZE)
    X = torch.FloatTensor(X_).to(device)
    y = torch.LongTensor(y_).to(device)

    if model_name == 'pnasnet5large':
        ft, pred = model.myforward(X)
    else:
        pred = model.forward(X)
    # Process pred
    pred_np = pred.argmax(1).cpu().detach().numpy()
    Preds.extend(pred_np)
    pred_array = pred.cpu().detach().numpy()
    for i in range(pred_array.shape[0]):
        Scores.append(SoftMax(pred_array[i, :]))
    acc_val.append(accuracy_score(pred_np, y.cpu().numpy()))
예제 #2
0
valset = DataSet(model,
                 shuffle=False,
                 tumorSlice=True,
                 path=args.ValListPath,
                 dataAug=[False, False, False])

Train_loss = []
Train_acc = []
Val_acc = []
for epoch in range(EPOCHS):
    start = time.time()
    model.train()
    n_batch = dataset.sampleNum // BATCH_SIZE
    for batch_i in range(n_batch):
        #    for batch_i in range(1):
        X_train, y_train = dataset.generateDataBatch(batch_i,
                                                     batchsize=BATCH_SIZE)
        #        X = torch.autograd.Variable(X,requires_grad=False)
        X = torch.FloatTensor(X_train).to(device)
        y = torch.LongTensor(y_train).to(device)

        pred = model(X)
        loss = loss_func(pred, y)  # for loss
        optimizer.zero_grad()  # clear gradiant
        loss.backward()
        optimizer.step()

        if batch_i % 20 == 0:
            acc = ACC(pred, y)
            Train_loss.append(float(loss.data.cpu().numpy()))
            print('epoch:{},iter:{},loss:{},ACC={}'.format(
                epoch, batch_i, loss.data, acc))