def test_small():
    tf.logging.set_verbosity(tf.logging.INFO)
    config = utils.load_config()
    with tf.Graph().as_default():
        dermis = inputs.SkinData(config['data_dir'], 'dermis')
        dermis = prep(dermis)
        image, label = dermis.images[0], dermis.labels[0]
        images, labels = tf.constant(
            image[None], dtype=tf.float32), tf.constant(label[None],
                                                        dtype=tf.float32)
        global_step = tf.train.get_or_create_global_step()
        net = model.FCN(images, labels, net_params=config['net_params'])
        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess,
                          tf.train.latest_checkpoint(config['train_dir']))
            logger.info('Model-%d restored successfully!' %
                        sess.run(global_step))

            logits = np.squeeze(sess.run(net.endpoints['conv4']))
            out = np.zeros_like(logits, dtype=np.uint8)
            out[logits > 0] = 1
            correct = np.array(label == out, dtype=np.float32)
            accuracy = np.mean(correct)
            logger.info('accuracy: %.3f' % accuracy)
            plt.subplot(121)
            plt.imshow(label, cmap='gray')
            plt.subplot(122)
            plt.imshow(out, cmap='gray')
            plt.show()
Exemple #2
0
 def get_model(self):
     if self.model_type == 'fcn':
         self.input_length = 29 * 16000
         return Model.FCN()
     elif self.model_type == 'musicnn':
         self.input_length = 3 * 16000
         return Model.Musicnn(dataset=self.dataset)
     elif self.model_type == 'crnn':
         self.input_length = 29 * 16000
         return Model.CRNN()
     elif self.model_type == 'sample':
         self.input_length = 59049
         return Model.SampleCNN()
     elif self.model_type == 'se':
         self.input_length = 59049
         return Model.SampleCNNSE()
     elif self.model_type == 'short':
         self.input_length = 59049
         return Model.ShortChunkCNN()
     elif self.model_type == 'short_res':
         self.input_length = 59049
         return Model.ShortChunkCNN_Res()
     elif self.model_type == 'attention':
         self.input_length = 15 * 16000
         return Model.CNNSA()
     elif self.model_type == 'hcnn':
         self.input_length = 5 * 16000
         return Model.HarmonicCNN()
     else:
         print(
             'model_type has to be one of [fcn, musicnn, crnn, sample, se, short, short_res, attention]'
         )
def train_small():
    tf.logging.set_verbosity(tf.logging.INFO)
    config = utils.load_config()
    with tf.Graph().as_default():
        dermis = inputs.SkinData(config['data_dir'], 'dermis')
        dermis = prep(dermis)
        image, label = dermis.images[0], dermis.labels[0]
        images, labels = tf.constant(
            image[None], dtype=tf.float32), tf.constant(label[None],
                                                        dtype=tf.float32)
        net = model.FCN(images,
                        labels,
                        net_params=config['net_params'],
                        lr=config['learning_rate'])
        net.train_from_scratch(config)
def train():
    tf.logging.set_verbosity(tf.logging.INFO)
    config = utils.load_config()
    with tf.Graph().as_default():
        dermis = inputs.SkinData(config['data_dir'], 'dermis')
        batch_images, batch_labels = dermis.train_batch_v1(
            config['batch_size'],
            config['input_size'],
            seed=config['split_seed'])
        net = model.FCN(batch_images,
                        batch_labels,
                        net_params=config['net_params'],
                        reg=config['reg'],
                        lr=config['learning_rate'],
                        class_weights=config['class_weights'])
        net.train_from_scratch(config)
Exemple #5
0
 def get_model(self):
     if self.model_type == 'fcn':
         return Model.FCN()
     elif self.model_type == 'musicnn':
         return Model.Musicnn(dataset=self.dataset)
     elif self.model_type == 'crnn':
         return Model.CRNN()
     elif self.model_type == 'sample':
         return Model.SampleCNN()
     elif self.model_type == 'se':
         return Model.SampleCNNSE()
     elif self.model_type == 'short':
         return Model.ShortChunkCNN()
     elif self.model_type == 'short_res':
         return Model.ShortChunkCNN_Res()
     elif self.model_type == 'attention':
         return Model.CNNSA()
     elif self.model_type == 'hcnn':
         return Model.HarmonicCNN()
Exemple #6
0
def train(train_loader, val_loader, train_param, data_param, loc_param, _log, _run):
    writer = SummaryWriter()
    model_dir, _ = create_dir(writer.file_writer.get_logdir())
    sig = nn.Sigmoid()

    if train_param['model'] == "FCN":
        train_model = model.FCN(data_param['feature_num']).cuda()
    elif train_param['model'] == 'FCNwPool':
        train_model = model.FCNwPool(data_param['feature_num'], data_param['pix_res']).cuda()
    elif train_param['model'] == 'UNet':
        train_model = UNet(data_param['feature_num'], 1).cuda()
    elif train_param['model'] == 'FCNwBottleneck':
        train_model = model.FCNwBottleneck(data_param['feature_num'], data_param['pix_res']).cuda()
    elif train_param['model'] == 'SimplerFCNwBottleneck':
        train_model = model.SimplerFCNwBottleneck(data_param['feature_num']).cuda()
    elif train_param['model'] == 'Logistic':
        train_model = model.Logistic(data_param['feature_num']).cuda()
    elif train_param['model'] == 'PolyLogistic':
        train_model = model.PolyLogistic(data_param['feature_num']).cuda()
    
    if th.cuda.device_count() > 1:
        train_model = nn.DataParallel(train_model)
    
    if loc_param['load_model']:
        train_model.load_state_dict(th.load(loc_param['load_model']))
    _log.info('[{}] model is initialized ...'.format(ctime()))
    
    if train_param['optim'] == 'Adam':
        optimizer = to.Adam(train_model.parameters(), lr=train_param['lr'], weight_decay=train_param['decay'])
    else:
        optimizer = to.SGD(train_model.parameters(), lr=train_param['lr'], weight_decay=train_param['decay'])
    
    scheduler = ReduceLROnPlateau(optimizer, mode='min', patience=train_param['patience'], verbose=True, factor=0.5)
    criterion = nn.BCEWithLogitsLoss(pos_weight=th.Tensor([train_param['pos_weight']]).cuda())

    valatZero = validate(train_model, val_loader, data_param, train_param, _log)
    _log.info('[{}] validation loss before training: {}'.format(ctime(), valatZero))
    _run.log_scalar('training.val_loss', valatZero, 0)
    trainatZero = validate(train_model, train_loader, data_param, train_param, _log)
    _log.info('[{}] train loss before training: {}'.format(ctime(), trainatZero))
    _run.log_scalar('training.loss_epoch', trainatZero, 0)
    
    loss_ = 0
    prune = data_param['prune']
    for epoch in range(train_param['n_epochs']):
        running_loss = 0
        train_iter = iter(train_loader)
        for iter_ in range(len(train_iter)):
            optimizer.zero_grad()

            batch_sample = train_iter.next()
            data, gt = batch_sample['data'].cuda(), batch_sample['gt'].cuda()
            if train_param['model'] == 'UNET':
                prds = train_model(data)[:, :, prune:-prune, prune:-prune]
            else:
                prds = train_model.forward(data)[:, :, prune:-prune, prune:-prune]
            indices = gt>=0
            loss = criterion(prds[indices], gt[indices])
            running_loss += loss.item()
            loss_ += loss.item()
            loss.backward()
            optimizer.step()

            _run.log_scalar("training.loss_iter", loss.item(), epoch*len(train_iter)+iter_+1)
            _run.log_scalar("training.max_prob", th.max(sig(prds)).item(), epoch*len(train_iter)+iter_+1)
            _run.log_scalar("training.min_prob", th.min(sig(prds)).item(), epoch*len(train_iter)+iter_+1)

            writer.add_scalar("loss/train_iter", loss.item(), epoch*len(train_iter)+iter_+1)
            writer.add_scalars(
                "probRange",
                {'min': th.min(sig(prds)), 'max': th.max(sig(prds))},
                epoch*len(train_iter)+iter_+1
            )
            if (epoch*len(train_iter)+iter_+1) % 20 == 0:
                _run.log_scalar("training.loss_20", loss_/20, epoch*len(train_iter)+iter_+1)
                writer.add_scalar("loss/train_20", loss_/20, epoch*len(train_iter)+iter_+1)
                _log.info(
                    '[{}] loss at [{}/{}]: {}'.format(
                        ctime(),
                        epoch*len(train_iter)+iter_+1,
                        train_param['n_epochs']*len(train_iter),
                        loss_/20
                    )
                )
                loss_ = 0
        
        v_loss = validate(train_model, val_loader, data_param, train_param, _log)
        scheduler.step(v_loss)
        _log.info('[{}] validation loss at [{}/{}]: {}'.format(ctime(), epoch+1, train_param['n_epochs'], v_loss))
        _run.log_scalar('training.val_loss', v_loss, epoch+1)
        _run.log_scalar('training.loss_epoch', running_loss/len(train_iter), epoch+1)
        writer.add_scalars(
            "loss/grouped",
            {'test': v_loss, 'train': running_loss/len(train_iter)},
            epoch+1
        )
        del data, gt, prds, indices
        if (epoch+1) % loc_param['save'] == 0:
            th.save(train_model.cpu().state_dict(), model_dir+'model_{}.pt'.format(str(epoch+1)))
            train_model = train_model.cuda()
    
    writer.export_scalars_to_json(model_dir+'loss.json')
    th.save(train_model.cpu().state_dict(), model_dir+'trained_model.pt')
    save_config(writer.file_writer.get_logdir()+'/config.txt', train_param, data_param)
    _log.info('[{}] model has been trained and config file has been saved.'.format(ctime()))
    
    return v_loss
Exemple #7
0
EPOCH_NUM = 1000
BATCH_SIZE = 6
NUM_WORKERS = 2
USE_GPU = True
USE_PRE_TRAIN = True
PROPOTION_OF_DATA = 0.5
CHECKPONT = 1  # when the number of epoch can be divided by checkpoint,
# the training will stop, and you can choose whether
# to keep on training

if __name__ == '__main__':

    if USE_PRE_TRAIN:
        net = torch.load('./bak/model.pkl')
    else:
        net = model.FCN(34)  # 34 classes for Cityscape Dataset

    if USE_GPU:
        net = net.cuda()

    optimizer = optim.Adam(net.parameters(),
                           lr=LEARNING_RATE,
                           weight_decay=WEIGHT_DECAY)
    criterion = nn.CrossEntropyLoss()
    evaluator = evaluation.Evaluation(34)

    transformed_data = data_loader.CityScape(rand=PROPOTION_OF_DATA)
    dataloaders = DataLoader(transformed_data,
                             batch_size=BATCH_SIZE,
                             shuffle=True,
                             num_workers=NUM_WORKERS)