Ejemplo n.º 1
0
 def __init__(self):
     super(FCN_RNN, self).__init__()
     self.fcn = FCN()
     self.reshape = RESHAPE()
     self.encoder1 = Encoder(5000, 512)
     self.encoder2 = Encoder(512, 32)
     self.linear = nn.Linear(384, 1)
     self.sigmoid = nn.Sigmoid()
Ejemplo n.º 2
0
def main(_):
    check_dir()
    print_config()
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
    run_option = tf.ConfigProto(gpu_options=gpu_options)
    with tf.Session(config=run_option) as sess:
        fcn = FCN(config=FLAGS, sess=sess)
        fcn.build_model()
        if FLAGS.is_training:
            fcn.train_model()
        if FLAGS.is_testing:
            fcn.test_model()
Ejemplo n.º 3
0
    def __init__(self, proof):
        """Constructeur de la classe.

        :param proof: Une preuve
        :type proof: Proof

        :return: objet Demonstration
        :rtype: Demonstration"""
        super().__init__(proof)
        self.__fcn = FCN(proof)
        self.__clause_list = self.__fcn.clause_list
Ejemplo n.º 4
0
def getCandidates(imageDir, FCN):
    mhd = re.compile(r".*\.mhd")
    files = [f for f in os.listdir(imageDir) if mhd.match(f) != None]
    savedOut = [f[0:-4] for f in os.listdir("../Data/savedFCNOut/")]
    imageNames = []
    keptIndiciesList = []
    for f in files:
        image, orig, spacing = handler.load_itk_image("{}{}".format(
            imageDir, f))
        print("Analyzing {}".format(f[0:-4]))
        if (savedOut.__contains__(f[0:-4])):
            pred = np.load("../Data/savedFCNOut/{}.npy".format(f[0:-4]))
        else:
            pred, inputSize = FCN.predict(image, spacing, (1, 0.625, 0.625))
            np.save("../Data/savedFCNOut/{}".format(f[0:-4]), pred)

        print("FCN output size: {}".format(pred.shape))
        #pred = np.load("fullresnorm4.npy")
        #reshape
        #pred = pred.reshape(1, *pred.shape)
        yyes = pred[0, :, :, :, 1].reshape(1, pred.shape[1], pred.shape[2],
                                           pred.shape[3])
        ynot = pred[0, :, :, :, 0].reshape(1, pred.shape[1], pred.shape[2],
                                           pred.shape[3])
        pred = np.concatenate((ynot, yyes))
        pred = pred.reshape(1, 2, pred.shape[1], pred.shape[2], pred.shape[3])
        #handler.show_3d_img(pred[0][1])
        inputSize = (int(image.shape[0] * float(spacing[0])),
                     int(image.shape[1] * float(spacing[1]) / 0.625),
                     int(image.shape[2] * float(spacing[2]) / 0.625))
        print("input size: {}".format(inputSize))
        populatedArray, indicies = FCN.indexMapping(pred, inputSize)
        print("{} indicies found".format(len(indicies)))
        removedOverlap, keptIndicies = non_max_suppression(
            populatedArray, indicies)
        print("{} indicies kept".format(len(keptIndicies)))
        keptIndiciesList += [keptIndicies]
        imageNames += [f[0:-4]]
        #generateStats(imageNames, keptIndiciesList)

    return imageNames, keptIndiciesList
Ejemplo n.º 5
0
def initialize_model(model_name,
                     feature_extract=False,
                     use_pretrained=False,
                     pre_model=None):
    # Initialize these variables which will be set in this if statement. Each of these
    #   variables is model specific.
    # Other wise we will need to define the structure by ourselves with forward function using module and sequential to organize

    model_ft = None
    input_size = 0
    output_size = 0

    if model_name == "UNet_Adapted":
        """ UNet_Adapted
        """
        model_ft = UNet_Adapted(n_Channels, n_Classes)
        if use_pretrained:
            model_ft.load_state_dict(torch.load(pre_model))
        input_size = 512
        output_size = 512
        learningrate = 1e-1

    elif model_name == "U_Net":
        """ U_Net
        """
        model_ft = U_Net(n_Channels, n_Classes)
        if use_pretrained:
            model_ft.load_state_dict(torch.load(pre_model))
        input_size = 572
        output_size = 388
        learningrate = 1e-1

    elif model_name == "FCN":
        model_ft = models.resnet50(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        model_ft = FCN(model_ft, n_Classes, 224)

        input_size = 224
        output_size = 224
        learningrate = 1e-3
    else:
        print("Invalid model name, exiting...")
        exit()

    return model_ft, input_size, output_size, learningrate
def prediction(test_images, test_labels, weights, n_dim):
    """
    Run CNN on test_images.
    
    Input:
        test_images --> numpy array containing all test images
        weights --> str containing filename of weights to be loaded and tested
        n_dim --> height/width of test_images   
    
    Output:
        preds --> numpy array containing the predictions of each pixel on test_images
    """

    print("Loading model...")
    model = FCN((n_dim, n_dim, 3))
    print(model.summary())

    print("Loading weights...")
    model.load_weights(weights)
    model.compile(loss='binary_crossentropy',
                  optimizer=SGD(lr=0.1,
                                momentum=0.9,
                                decay=1e-6,
                                nesterov=True),
                  metrics=["accuracy"])

    print('Predicting...')
    preds = model.predict(test_images)

    print('Evaluating...')
    score = model.evaluate(test_images, test_labels, verbose=1)

    #Accuracy may not be best metric because many more 0s than 1s (~50mill more)
    print("%s: %.2f%%" % (model.metrics_names[1], score[1] * 100))

    return preds
Ejemplo n.º 7
0
    def __init__(self, f, f_verbose=False, printlevel=0, pedantic=True, **kwds):
        """
        construct minuit object
        
        arguments of f are pased automatically by the following order
        1) using f.func_code.co_varnames,f.func_code.co_argcount (all python function has this)
        2) using f.__call__.func_code.co_varnames, f.__call__.co_argcount (with self docked off)
        3) using inspect.getargspec(for some rare builtin function)
        
        user can set limit on paramater by passing limit_<varname>=(min,max) keyword argument
        user can set initial value onparameter by passing <varname>=value keyword argument
        user can fix parameter by doing fix_<varname>=True
        user can set initial step by passing error_<varname>=initialstep keyword argument
        
        if f_verbose is set to True FCN will be built for verbosity printing value and argument for every function call
        """
        self.fcn = FCN(f, verbose=f_verbose)

        args = better_arg_spec(f)
        narg = len(args)

        self.fitarg = {}
        #maintain 2 dictionary 1 is position to varname
        #and varname to position
        self.varname = args
        self.pos2var = {i: k for i, k in enumerate(args)}
        self.var2pos = {k: i for i, k in enumerate(args)}

        self.tmin = ROOT.TMinuit(narg)
        self.set_printlevel(printlevel)
        self.prepare(**kwds)

        self.last_migrad_result = 0
        self.args, self.values, self.errors = None, None, None

        for vn in self.varname:
            if vn in kwds: self.fitarg[vn] = kwds[vn]
            if 'limit_' + vn in kwds: self.fitarg['limit_' + vn] = kwds['limit_' + vn]
            if 'fix_' + vn in kwds: self.fitarg['fix_' + vn] = kwds['fix_' + vn]

        if pedantic: self.pedantic(kwds)
Ejemplo n.º 8
0
def main(argv):
    indir = args.indir
    mode = args.mode  # binary or multiclass or nonwear
    outdir = args.outdir

    if mode == 'multiclass':
        states = ['Wake', 'NREM 1', 'NREM 2', 'NREM 3', 'REM', 'Wake_ext']
    elif mode == 'binary':
        states = ['Wake', 'Sleep', 'Wake_ext']
        collate_states = ['NREM 1', 'NREM 2', 'NREM 3', 'REM']
    elif mode == 'nonwear':
        states = ['Wear', 'Nonwear']
        collate_states = ['Wake', 'NREM 1', 'NREM 2', 'NREM 3', 'REM']

    valid_states = [state for state in states if state != 'Wake_ext']
    num_classes = len(valid_states)

    if not os.path.exists(outdir):
        os.makedirs(outdir)

    resultdir = os.path.join(outdir, mode, 'models')
    if not os.path.exists(resultdir):
        os.makedirs(resultdir)

    # Read data from disk
    data = pd.read_csv(os.path.join(indir, 'features_30.0s.csv'))
    labels = data['label'].values
    users = data['user'].values
    if mode == 'binary':
        labels = np.array(
            ['Sleep' if lbl in collate_states else lbl for lbl in labels])
    elif mode == 'nonwear':
        labels = np.array(
            ['Wear' if lbl in collate_states else lbl for lbl in labels])

    # Read raw data
    shape_df = pd.read_csv(os.path.join(indir, 'datashape_30.0s.csv'))
    num_samples = shape_df['num_samples'].values[0]
    seqlen = shape_df['num_timesteps'].values[0]
    n_channels = shape_df['num_channels'].values[0]
    raw_data = np.memmap(os.path.join(indir, 'rawdata_30.0s.npz'),
                         dtype='float32',
                         mode='r',
                         shape=(num_samples, seqlen, n_channels))

    # Hyperparameters
    lr = args.lr  # learning rate
    num_epochs = args.num_epochs
    batch_size = args.batchsize
    max_seqlen = 1504
    num_channels = args.num_channels  # number of raw data channels
    feat_channels = args.feat_channels  # Add ENMO, z-angle and LIDS as additional channels

    # Use nested cross-validation based on users
    # Outer CV
    unique_users = list(set(users))
    random.shuffle(unique_users)
    cv_splits = 5
    user_cnt = Counter(users[np.isin(labels, valid_states)]).most_common()
    samp_per_fold = len(users) // cv_splits

    # Get users to be used in test for each fold such that each fold has similar
    # number of samples
    fold_users = [[] for i in range(cv_splits)]
    fold_cnt = [[] for i in range(cv_splits)]
    for user, cnt in user_cnt:
        idx = -1
        maxdiff = 0
        for j in range(cv_splits):
            if (samp_per_fold - sum(fold_cnt[j])) > maxdiff:
                maxdiff = samp_per_fold - sum(fold_cnt[j])
                idx = j
        fold_users[idx].append(user)
        fold_cnt[idx].append(cnt)

    predictions = []
    if mode != 'nonwear':
        wake_idx = states.index('Wake')
        wake_ext_idx = states.index('Wake_ext')
    for fold in range(cv_splits):
        print('Evaluating fold %d' % (fold + 1))
        test_users = fold_users[fold]
        trainval_users = [(key, val) for key, val in user_cnt
                          if key not in test_users]
        random.shuffle(trainval_users)
        # validation data is approximately 10% of total samples
        val_samp = 0.1 * sum([tup[1] for tup in user_cnt])
        nval = 0
        val_sum = 0
        while (val_sum < val_samp):
            val_sum += trainval_users[nval][1]
            nval += 1
        val_users = [key for key, val in trainval_users[:nval]]
        train_users = [key for key, val in trainval_users[nval:]]
        print('#users: Train = {:d}, Val = {:d}, Test = {:d}'.format(
            len(train_users), len(val_users), len(test_users)))

        # Create partitions
        # make a copy to change wake_ext for this fold
        fold_labels = np.array(
            [states.index(lbl) if lbl in states else -1 for lbl in labels])
        train_indices = get_partition(raw_data,
                                      fold_labels,
                                      users,
                                      train_users,
                                      states,
                                      mode,
                                      is_train=True)
        val_indices = get_partition(raw_data, fold_labels, users, val_users,
                                    states, mode)
        test_indices = get_partition(raw_data, fold_labels, users, test_users,
                                     states, mode)
        nsamples = len(train_indices) + len(val_indices) + len(test_indices)
        print('Train: {:0.2f}%, Val: {:0.2f}%, Test: {:0.2f}%'\
                .format(len(train_indices)*100.0/nsamples, len(val_indices)*100.0/nsamples,\
                        len(test_indices)*100.0/nsamples))

        if mode != 'nonwear':
            chosen_indices = train_indices[
                fold_labels[train_indices] != wake_ext_idx]
        else:
            chosen_indices = train_indices
        class_wts = class_weight.compute_class_weight(
            class_weight='balanced',
            classes=np.unique(fold_labels[chosen_indices]),
            y=fold_labels[chosen_indices])

        # Rename wake_ext as wake for training samples
        if mode != 'nonwear':
            rename_indices = train_indices[fold_labels[train_indices] ==
                                           wake_ext_idx]
            fold_labels[rename_indices] = wake_idx

        print('Train', Counter(np.array(fold_labels)[train_indices]))
        print('Val', Counter(np.array(fold_labels)[val_indices]))
        print('Test', Counter(np.array(fold_labels)[test_indices]))

        # Data generators for computing statistics
        stat_gen = DataGenerator(train_indices, raw_data, fold_labels, valid_states, partition='stat',\
                                  batch_size=batch_size, seqlen=seqlen, n_channels=num_channels, feat_channels=feat_channels,\
                                  n_classes=num_classes, shuffle=True)
        mean, std = stat_gen.fit()
        np.savez(os.path.join(resultdir, 'Fold' + str(fold + 1) + '_stats'),
                 mean=mean,
                 std=std)

        # Data generators for train/val/test
        train_gen = DataGenerator(train_indices, raw_data, fold_labels, valid_states, partition='train',\
                                  batch_size=batch_size, seqlen=seqlen, n_channels=num_channels, feat_channels=feat_channels,\
                                  n_classes=num_classes, shuffle=True, augment=True, aug_factor=0.75, balance=True,
                                  mean=mean, std=std)
        val_gen = DataGenerator(val_indices, raw_data, fold_labels, valid_states, partition='val',\
                                batch_size=batch_size, seqlen=seqlen, n_channels=num_channels, feat_channels=feat_channels,\
                                n_classes=num_classes, mean=mean, std=std)
        test_gen = DataGenerator(test_indices, raw_data, fold_labels, valid_states, partition='test',\
                                 batch_size=batch_size, seqlen=seqlen, n_channels=num_channels, feat_channels=feat_channels,\
                                 n_classes=num_classes, mean=mean, std=std)

        # Create model
        # Use batchnorm as first step since computing mean and std
        # across entire dataset is time-consuming
        model = FCN(input_shape=(seqlen, num_channels + feat_channels),
                    max_seqlen=max_seqlen,
                    num_classes=len(valid_states),
                    norm_max=args.maxnorm)
        #print(model.summary())
        model.compile(optimizer=Adam(lr=lr),
                      loss=focal_loss(),
                      metrics=['accuracy', macro_f1])

        # Train model
        # Use callback to compute F-scores over entire validation data
        metrics_cb = Metrics(val_data=val_gen, batch_size=batch_size)
        # Use early stopping and model checkpoints to handle overfitting and save best model
        model_checkpt = ModelCheckpoint(os.path.join(resultdir,'fold'+str(fold+1)+'_'+mode+'-{epoch:02d}-{val_f1:.4f}.h5'),\
                                                     monitor='val_f1',\
                                                     mode='max', save_best_only=True)
        batch_renorm_cb = BatchRenormScheduler(len(train_gen))
        history = model.fit(
            train_gen,
            epochs=num_epochs,
            validation_data=val_gen,
            verbose=1,
            shuffle=False,
            callbacks=[batch_renorm_cb, metrics_cb, model_checkpt],
            workers=2,
            max_queue_size=20,
            use_multiprocessing=False)

        # Plot training history
        plot_results(fold+1, history.history['loss'], history.history['val_loss'],\
                     os.path.join(resultdir,'Fold'+str(fold+1)+'_'+mode+'_loss.jpg'), metric='Loss')
        plot_results(fold+1, history.history['accuracy'], history.history['val_accuracy'],\
                     os.path.join(resultdir,'Fold'+str(fold+1)+'_'+mode+'_accuracy.jpg'), metric='Accuracy')
        plot_results(fold+1, history.history['macro_f1'], metrics_cb.val_f1,\
                     os.path.join(resultdir,'Fold'+str(fold+1)+'_'+mode+'_macro_f1.jpg'), metric='Macro F1')

        # Predict probability on validation data using best model
        best_model_file, epoch, val_f1 = get_best_model(resultdir, fold + 1)
        print('Predicting with model saved at Epoch={:d} with val_f1={:0.4f}'.
              format(epoch, val_f1))
        model.load_weights(os.path.join(resultdir, best_model_file))
        probs = model.predict(test_gen)
        y_pred = probs.argmax(axis=1)
        y_true = fold_labels[test_indices]
        predictions.append(
            (users[test_indices], data.iloc[test_indices]['timestamp'],
             data.iloc[test_indices]['filename'], test_indices, y_true, probs))

        # Save user report
        cv_save_classification_result(
            predictions,
            valid_states,
            os.path.join(
                resultdir, 'fold' + str(fold + 1) + '_deeplearning_' + mode +
                '_results.csv'),
            method='dl')
        cv_get_classification_report(predictions,
                                     mode,
                                     valid_states,
                                     method='dl')

    cv_get_classification_report(predictions, mode, valid_states, method='dl')

    # Save user report
    cv_save_classification_result(predictions,
                                  valid_states,
                                  os.path.join(
                                      resultdir,
                                      'deeplearning_' + mode + '_results.csv'),
                                  method='dl')
Ejemplo n.º 9
0
from FCN import FCN  # 模型载入

device = t.device('cuda') if t.cuda.is_available() else t.device(
    'cpu')  # 指定训练方式

BATCH_SIZE = 2
train_data = DataLoader(data_augmentation.Cam_train,
                        batch_size=BATCH_SIZE,
                        shuffle=True,
                        num_workers=4)
val_data = DataLoader(data_augmentation.Cam_val,
                      batch_size=BATCH_SIZE,
                      shuffle=True,
                      num_workers=4)

net = FCN(12)
net = net.to(device)
criterion = nn.NLLLoss().to(device)
optimizer = optim.Adam(net.parameters(), lr=1e-4)

eval_miou_list = []
best = [0]
print('-----------------------train-----------------------')

for epoch in range(500):
    if epoch % 50 == 0 and epoch != 0:
        for group in optimizer.param_groups:
            group['lr'] *= 0.5

    train_loss = 0
    train_acc = 0
def imshow(inp, title=None):
    """Imshow for Tensor."""
    inp = inp.numpy().transpose((1, 2, 0))
    inp = np.clip(inp, 0, 1)
    plt.imshow(inp)
    if title is not None:
        plt.title(title)
    plt.pause(0.001)  # pause a bit so that plots are updated


if __name__ == '__main__':
    # Get a batch of training data
    inputs, classes = next(iter(dataloaders['train']))
    out = torchvision.utils.make_grid(inputs)
    imshow(out)
    print(classes)

    # Define the model
    model_ft = FCN()
    model_ft = model_ft.to(device)
    train_model(model_ft,
                torch.nn.CrossEntropyLoss(),
                torch.optim.SGD(params=model_ft.parameters(),
                                lr=0.0001,
                                momentum=0.9),
                num_epochs=2,
                dataloaders=dataloaders,
                device=device,
                dataset_sizes=dataset_sizes)
    test_model(model_ft, dataloaders, device, dataset_sizes)
Ejemplo n.º 11
0
Archivo: test.py Proyecto: liu8526/FCN
from evalution_segmentaion import eval_semantic_segmentation
from dataset import CamvidDataset
from FCN import FCN
import cfg

device = t.device('cuda') if t.cuda.is_available() else t.device('cpu')
t.cuda.set_device('cuda:1')

BATCH_SIZE = 4
miou_list = [0]

Cam_test = CamvidDataset([cfg.TEST_ROOT, cfg.TEST_LABEL], cfg.crop_size)
test_data = DataLoader(Cam_test, batch_size=BATCH_SIZE, shuffle=True, num_workers=0)

net = FCN(12)
net.to(device)
net.load_state_dict(t.load('1.pth'))

train_acc = 0
train_miou = 0
train_class_acc = 0
train_mpa = 0
error = 0

for i, sample in enumerate(test_data):
	data = Variable(sample['img']).to(device)
	label = Variable(sample['label']).to(device)
	out = net(data)
	out = F.log_softmax(out, dim=1)
Ejemplo n.º 12
0
CHECKPOINT_INTERVAL = 5  # number of epochs between checkpoints (save model and loss curve)
NUM_TEST_SAMPLES = 30  # for generating test samples at the end

# GENERATE SAVE DIRECTORY PATH
timestamp = '{:%Y%m%d_%H%M%S}'.format(datetime.datetime.now())  # for save_dir
if not os.path.exists('output'):
    os.makedirs('output')
save_dir = 'output/' + timestamp + '_' + str(EPOCHS) + 'epochs_' + str(
    REG) + 'reg'
if DEBUG:
    save_dir += '_debug'
if not os.path.exists(save_dir):
    os.makedirs(save_dir)

if TEST_ONLY:
    model = FCN(use_6_channels=USE_6_CHANNELS)
    model.load_state_dict(torch.load(MODEL_PATH))
    dlo = DataLoader(BATCH_SIZE, use_6_channels=USE_6_CHANNELS, debug=DEBUG)
else:
    # TRAIN AND SAVE MODEL AND OPTIMIZER
    if CONTINUE_TRAINING:
        model = FCN(use_6_channels=USE_6_CHANNELS)
        model.load_state_dict(torch.load(MODEL_PATH))

        if os.path.exists(OPTIMIZER_PATH):
            optimizer = torch.optim.Adam(model.parameters())
            optimizer.load_state_dict(torch.load(OPTIMIZER_PATH))
        else:
            optimizer = None

        model, optimizer, losses, dlo = trainer.train(
Ejemplo n.º 13
0
def train(save_dir, model=None, optimizer=None,
          epochs=EPOCHS, batch_size=BATCH_SIZE, lr=LR, reg=REG,
          checkpoint_interval=5, use_6_channels=True, debug=False, use_cross_entropy_loss=True):

    if model is None:
        model = FCN(use_6_channels=use_6_channels)

    if optimizer is None:
        optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=reg)

    if use_cross_entropy_loss:
        loss_function = nn.CrossEntropyLoss()
    else:
        loss_function = nn.BCEWithLogitsLoss()

    dlo = DataLoader(batch_size, use_6_channels=use_6_channels, debug=debug)
    training_set_size = dlo.get_training_set_size()
    iters_per_epoch = int(np.ceil(training_set_size / batch_size))
    losses = np.zeros([epochs * iters_per_epoch,])

    start_time = time.time()
    for e in range(epochs):
        model.train()  # to make sure components are in 'train' mode; use model.eval() at 'test' time
        dlo.shuffle_training_set()
        for i in range(iters_per_epoch):
            model.zero_grad()

            x, y = dlo.get_next_training_batch()
            x = torch.autograd.Variable(torch.FloatTensor(x))
            if use_cross_entropy_loss:
                y = torch.autograd.Variable(torch.LongTensor(y))
            else:
                y = torch.autograd.Variable(torch.FloatTensor(y))
            output = model(x)
            loss = loss_function(output, y)
            losses[i + e * iters_per_epoch] = loss.item()
            loss.backward()
            optimizer.step()
            del x, y, output, loss

            print('{:3}%  Time: {:21}  Epoch: {:3}  Iter: {:3}  Loss: {}'.format(
                int((i + 1 + iters_per_epoch * e) / (iters_per_epoch * epochs) * 100),
                time_since(start_time, (i + 1 + iters_per_epoch * e) / (iters_per_epoch * epochs)),
                str(e + 1), str(i + 1), losses[i + e * iters_per_epoch]))
            sys.stdout.flush()

        # after every checkpoint_interval epochs: save checkpoint model, save loss curve, display test error
        if (e + 1) % checkpoint_interval == 0:
            torch.save(model.state_dict(), save_dir+'/model_after_epoch_'+str(e+1)+'.pth')
            torch.save(optimizer.state_dict(), save_dir+'/optimizer_after_epoch_'+str(e+1)+'.pth')
            np.save(save_dir+'/losses_after_epoch_'+str(e+1), losses)

            # save loss curve so far
            plt.plot(np.arange(losses.shape[0]) + 1, losses)
            plt.xlabel('Iterations')
            plt.ylabel('Loss')
            plt.ylim(ymin=0)
            plt.tight_layout()
            plt.savefig(save_dir+'/loss_curve_after_epoch_'+str(e+1)+'.png')
            plt.close()

            # display test error
            _, test_acc, test_iou = test(model, dlo)
            print('Test accuracies:')
            print('Per-pixel classification: {0:.3f}%'.format(test_acc * 100))
            print('Intersection-over-Union:  {0:.3f}%'.format(test_iou * 100))

    return model, optimizer, losses, dlo
Ejemplo n.º 14
0
device = t.device("cuda") if t.cuda.is_available() else t.device("cpu")

Cam_train = CamvidDataset([cfg.TRAIN_ROOT, cfg.TRAIN_LABEL], cfg.crop_size)
Cam_val = CamvidDataset([cfg.VAL_ROOT, cfg.VAL_LABEL], cfg.crop_size)

train_data = DataLoader(Cam_train,
                        batch_size=cfg.BATCH_SIZE,
                        shuffle=True,
                        num_workers=4)
val_data = DataLoader(Cam_val,
                      batch_size=cfg.BATCH_SIZE,
                      shuffle=True,
                      num_workers=4)

# 参数 12 表示数据集分类数
fcn = FCN(12)
fcn = fcn.to(device)
criterion = nn.NLLLoss().to(device)
optimizer = optim.Adam(fcn.parameters(), lr=1e-4)


def train(model):
    best = [0]
    net = model.train()
    # 训练轮次
    for epoch in range(cfg.EPOCH_NUMBER):
        print("Eopch is [{}/{}]".format(epoch + 1, cfg.EPOCH_NUMBER))
        if epoch % 50 == 0 and epoch != 0:
            for group in optimizer.param_groups:
                group["lr"] *= 0.5
Ejemplo n.º 15
0
import numpy as np
import torch as t
import torch.nn.functional as F
from torch.utils.data import DataLoader
from PIL import Image
from dataset import CamvidDataset
from FCN import FCN
import cfg

device = t.device('cuda') if t.cuda.is_available() else t.device('cpu')
t.cuda.set_device('cuda:1')

Cam_test = CamvidDataset([cfg.TEST_ROOT, cfg.TEST_LABEL], cfg.crop_size)
test_data = DataLoader(Cam_test, batch_size=1, shuffle=True, num_workers=0)

net = FCN(12).to(device)
net.load_state_dict(t.load("1.pth"))
net.eval()

pd_label_color = pd.read_csv('./CamVid/class_dict.csv', sep=',')
name_value = pd_label_color['name'].values
num_class = len(name_value)
colormap = []
for i in range(num_class):
    tmp = pd_label_color.iloc[i]
    color = [tmp['r'], tmp['g'], tmp['b']]
    colormap.append(color)

cm = np.array(colormap).astype('uint8')

basePath = './results/'
Ejemplo n.º 16
0
input_size = 224
output_size = 224
n_Classes = 1
input_data_transforms = transforms.Compose([
            transforms.Resize((input_size, input_size)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ])

image = input_data_transforms(Image.open(image_path))
#image.save('./test_mask.png')

image = image.unsqueeze(0) # for 4 channel input

model_ft = models.resnet50(pretrained=None)
model = FCN(model_ft, n_Classes, input_size) # UNet_Adapted(3, n_Classes)
model.load_state_dict(torch.load('../model/FCN_model_task2.pkl'))

model.eval()

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
image = image.cuda()
# Send the model to GPU
model = model.to(device)

# Output 1*1*388*388

mask_pred = model(image)
output_transforms = transforms.Compose([
                transforms.ToPILImage(),
                transforms.Resize(output_size),
Ejemplo n.º 17
0
def train_model(train_images, train_labels, n_dim,valid=False, numepochs = 20, folds = 5, shuffle = True,  \
                sgd = SGD(lr=0.1, momentum=0.9, decay=1e-6, nesterov=False), \
                metrics_list = ['accuracy'], callbacks_list = [], sample_weights = None):
    """
    Train CNN
    """

    print("Setting up model...")
    model = FCN((n_dim, n_dim, 3))
    plot_model(model, to_file='model.png')
    print(model.summary())

    print("Training...")

    if valid == True:
        #Set up K-Fold cross-validation due to lack of data
        kf = KFold(folds, shuffle)
        fold = 1

        for train_index, valid_index in kf.split(train_images):
            #K-1 used for training, last K fold used for testing/validation
            data_train, data_valid = train_images[train_index], train_images[
                valid_index]
            labels_train, labels_valid = train_labels[
                train_index], train_labels[valid_index]

            # Compile model

            model.compile(loss='binary_crossentropy',
                          optimizer=sgd,
                          metrics=metrics_list,
                          sample_weight_mode="temporal")
            # Fit the model
            if callbacks_list != []:
                history = model.fit(data_train,
                                    labels_train,
                                    epochs=numepochs,
                                    verbose=1,
                                    callbacks=callbacks_list,
                                    validation_data=(data_valid, labels_valid),
                                    sample_weight=sample_weights)
            else:
                history = model.fit(data_train,
                                    labels_train,
                                    epochs=numepochs,
                                    verbose=1,
                                    validation_data=(data_valid, labels_valid),
                                    sample_weight=sample_weights)
            model.save("Fold%s.h5" % fold)
            fold += 1

    else:
        model.compile(loss='binary_crossentropy',
                      optimizer=sgd,
                      metrics=metrics_list,
                      sample_weight_mode="temporal")
        if callbacks_list != []:
            history = model.fit(train_images,
                                train_labels,
                                epochs=numepochs,
                                verbose=1,
                                callbacks=callbacks_list,
                                sample_weight=sample_weights)
        else:
            history = model.fit(train_images,
                                train_labels,
                                epochs=numepochs,
                                verbose=1,
                                sample_weight=sample_weights)
        model.save("Weights/weights.h5")

    with open('trainHistoryDict', 'wb') as file_pi:
        pickle.dump(history.history, file_pi)
Ejemplo n.º 18
0
def train(epo_num=50, show_vgg_params=False):
    vis = visdom.Visdom()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    vgg_model = VGGNet(requires_grad=True, show_params=show_vgg_params)
    fcn_model = FCN(pretrained_net=vgg_model, n_class=2)
    fcn_model = fcn_model.to(device)
    criterion = nn.BCELoss().to(device)
    optimizer = optim.SGD(fcn_model.parameters(), lr=1e-2, momentum=0.7)

    all_train_iter_loss = []
    all_test_iter_loss = []

    #start timing
    prev_time = datetime.now()
    for epo in range(epo_num):
        train_loss = 0
        fcn_model.train()

        for index, (bag, bagmsk) in enumerate(train_dataloader):
            #bag shape is torch.Size([4,3,160,160])
            #bag_msk.shape is torch.Size([4,2,160,160])

            bag = bag.to(device)
            bagmsk = bagmsk.to(device)

            optimizer.zero_grad()
            output = fcn_model(bag)
            output = torch.sigmoid(
                output)  #output.shape is torch.Size([4,2,160,160])
            loss = criterion(output, bagmsk)
            loss.backward()
            iter_loss = loss.item()
            all_train_iter_loss.append(iter_loss)
            train_loss += iter_loss
            optimizer.step()

            output_np = output.cpu().detach().numpy().copy(
            )  #out_put_np.shape = (4,2,160,160)
            output_np = np.argmin(output_np, axis=1)
            bag_mask_np = bagmsk.cpu().detach().numpy().copy()
            bag_mask_np = np.argmin(bag_mask_np, axis=1)

            if np.mod(index, 15) == 0:
                print('epoch{},{}/{},train loss is{}'.format(
                    epo, index, len(train_dataloader), iter_loss))
                #vis.close()
                vis.images(output_np[:, None, :, :],
                           win='train_pred',
                           opts=dict(title='train prediction'))
                vis.images(bag_mask_np[:, None, :, :],
                           win='train_label',
                           opts=dict(title='label'))
                vis.line(all_train_iter_loss,
                         win='train_iter_loss',
                         opts=dict(title='train iter loss'))

            #plt.subplot(1, 2, 1)
            #plt.imshow(np.squeeze(bag_mask_np[0, ...]), 'gray')
            #plt.subplot(1, 2, 2)
            #plt.imshow(np.squeeze(output_np[0, ...]),'gray')
            #plt.pause(0.5)

        test_loss = 0
        fcn_model.eval()
        with torch.no_grad():
            for index, (bag, bagmsk) in enumerate(test_dataloader):
                bag = bag.to(device)
                bagmsk = bagmsk.to(device)

                optimizer.zero_grad()
                output = fcn_model(bag)
                output = torch.sigmoid(
                    output)  #output.shape is torch.Size([4,2,160,160])
                loss = criterion(output, bagmsk)
                iter_loss = loss.item()
                all_test_iter_loss.append(iter_loss)
                test_loss += iter_loss

                output_np = output.cpu().detach().numpy().copy()
                output_np = np.argmin(output_np, axis=1)  #给出axis方向最小值的下标
                bag_mask_np = bagmsk.cpu().detach().numpy().copy()
                bag_mask_np = np.argmin(bag_mask_np, axis=1)

                if np.mod(index, 15) == 0:
                    print(
                        r'Testing... Open http://localhost:8097/ to see test result'
                    )
                    #vis.close()
                    vis.images(output_np[:, None, :, :],
                               win='test_pred',
                               opts=dict(title='test prediction'))
                    vis.images(bag_mask_np[:, None, :, :],
                               win='test_label',
                               opts=dict(title='label'))
                    vis.line(all_test_iter_loss,
                             win='test_iter_loss',
                             opts=dict(title='test iter loss'))

                #plt.subplot(1, 2, 1)
                #plt.imshow(np.squeeze(bag_mask_np[0, ...]), 'gray')
                #plt.subplot(1, 2, 2)
                #plt.imshow(np.squeeze(output_np[0, ...]),'gray')
                #plt.pause(0.5)
        cur_time = datetime.now()
        h, remainder = divmod((cur_time - prev_time).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = "Time%02d:%02:%02d" % (h, m, s)
        prev_time = cur_time

        print('epoch train loss = %f, epoch test loss = %f, %s' %
              (train_loss / len(train_dataloader),
               test_loss / len(test_dataloader), time_str))

        if np.mod(epo, 5) == 0:
            torch.save(fcn_model, 'checkpoint/fcn_model_{}.pt'.format(epo))
            print('saving checkpoints/fcn_model_{}.pt'.format(epo))
Ejemplo n.º 19
0
        if ind[1] == 1:
            noduleCount = noduleCount + 1
            if correctIndicies[ind] == 1:
                nodulesFound = nodulesFound + 1
        #If we found it
        if correctIndicies[ind] == 1:
            truePositives = truePositives + 1
        if correctIndicies[ind] == 0:
            falseNegatives = falseNegatives + 1

    numPositives = len(correctIndicies)
    return truePositives, numPositives, falsePositives, falseNegatives, noduleCount, nodulesFound


handler = DataHandler()
FCN = FCN(2, "../Data/model/", "model/8")
imageNames, keptIndiciesList = getCandidates("../Data/toPredict/", FCN)
#print(keptIndiciesList)
generateStats(imageNames, keptIndiciesList)
'''
y , orig, spacing = handler.load_itk_image("../Data/data/1.3.6.1.4.1.14519.5.2.1.6279.6001.220596530836092324070084384692.mhd")
#pred = predFull(FCN, y, spacing, (1, 0.625, 0.625))
#print(pred.shape)
cand = np.load("../Data/fullresnorm5.npy")
print(cand.shape)
handler.save_slices(y, 10, "../Data/images/CTscan/")

xs, ys = handler.load_samples("../Data/sampless/subset{}/".format(i), (10,16,16,1))
print(xs.shape)
print(ys.shape)
trainSize = int(xs.shape[0]*0.2)
Ejemplo n.º 20
0
from dataset import CamvidDataset
from FCN import FCN
import cfg

device = t.device('cuda') if t.cuda.is_available() else t.device('cpu')

BATCH_SIZE = 4
miou_list = [0]

Cam_test = CamvidDataset([cfg.TEST_ROOT, cfg.TEST_LABEL], cfg.crop_size)
test_data = DataLoader(Cam_test,
                       batch_size=BATCH_SIZE,
                       shuffle=True,
                       num_workers=0)

net = FCN(12)
net.eval()
net.to(device)
net.load_state_dict(t.load('xxx.pth'))

train_acc = 0
train_miou = 0
train_class_acc = 0
train_mpa = 0
error = 0

for i, sample in enumerate(test_data):
    data = Variable(sample['img']).to(device)
    label = Variable(sample['label']).to(device)
    out = net(data)
    out = F.log_softmax(out, dim=1)
Ejemplo n.º 21
0
Archivo: predict.py Proyecto: Lxtjx/cv
from dataset import CamvidDataset
from FCN import FCN
import cfg
import pandas as pd
import numpy as np
from PIL import Image

"""
预测在测试的基础上 将数字结果转变为图片
"""
device = t.device('cuda') if t.cuda.is_available() else t.device('cpu')

Cam_test = CamvidDataset([cfg.test_root, cfg.test_label], cfg.crop_size)
test_data = DataLoader(Cam_test, batch_size=cfg.batch_size, shuffle=True, num_workers=0)

net = FCN(12)

net.to(device)
net.load_state_dict(t.load('0.pth'))  # 需要导入一个模型
net.eval()
pd_label_color = pd.read_csv(cfg.class_dict_path, sep=',')
name_value = pd_label_color['name'].values
num_class = len(name_value)
colormap = []
for i in range(num_class):
    tmp = pd_label_color.iloc[i]  # iol 按行读取
    color = [tmp['r'], tmp['g'], tmp['b']]
    colormap.append(color)

cm = np.array(colormap).astype('uint8')
"""