def train(self):
   data_manager = DataManager(self.batch_size, logger=self.logger,
                              is_many_to_one=self.is_many_to_one,
                              data_file_count=self.data_file_count,
                              pretrained_file=self.pre_train)
   if self.is_many_to_one:
     net = RNN_M2O(len(data_manager.word_list), self.embedding_len,
                   self.hidden_size, self.learning_rate, self.num_hidden_layer,
                   self.drop_rate, use_adam=True, use_cuda=self.use_cuda,
                   pretrained_emb=data_manager.pretrained_embeddings())
   else:
     net = RNN_M2M(len(data_manager.word_list), self.embedding_len,
                   self.hidden_size, self.learning_rate, self.num_hidden_layer,
                   self.drop_rate, use_adam=True, use_cuda=self.use_cuda,
                   pretrained_emb=data_manager.pretrained_embeddings())
   self._train(net, data_manager)
  def test(self, id):
    _, lr, hs, nh = re.search(r'M2(M|O)_([0-9]+)_([0-9]+)_([0-9]+)_?', id).groups()
    lr, hs, nh = float('0.'+lr[1:]), int(hs), int(nh)

    data_manager = DataManager(self.batch_size, logger=self.logger,
                               is_many_to_one=self.is_many_to_one,
                               data_file_count=self.data_file_count,
                               pretrained_file=self.pre_train, is_test=True)
    if self.is_many_to_one:
      model = RNN_M2O
    else:
      model = RNN_M2M
    net = model(len(data_manager.word_list), self.embedding_len,
                hs, lr, nh, self.drop_rate, use_adam=True, use_cuda=self.use_cuda,
                pretrained_emb=data_manager.pretrained_embeddings())
    status, _epoch_index, _perplexity_history, _min_perplexity = self._load(net, id)
    if status:
      loss_fn = net.get_loss()

      # Testing
      test_losses = 0.
      test_acc = 0.
      test_counter = 0

      net.eval()
      for data, label in data_manager.test_loader():
        data = T.autograd.Variable(T.LongTensor(data))
        label = T.autograd.Variable(T.LongTensor(label))
        if self.use_cuda:
          data = data.cuda()
          label = label.cuda()
        output, predicted = net(data)
        test_losses += loss_fn(output.view(-1, len(data_manager.word_list)), label.view(-1)) \
                                .data.cpu()[0] * data.size(0)
        test_acc += (label.squeeze() == predicted).float().mean().data * data.size(0)
        test_counter += data.size(0)
      mean_test_loss = test_losses/test_counter
      mean_test_acc = test_acc/test_counter
      perplexity = np.exp(mean_test_loss)
      self.logger.i('Loss: %.4f, Acc: %.4f, Perp: %.4f'%(mean_test_loss, mean_test_acc, perplexity))
      return mean_test_loss, mean_test_acc, perplexity
    else:
      raise AssertionError('Model file not found!')
Пример #3
0
        output_mu, output_logSigma2, latent_mu, latent_logSigma2 = CVAE(
            test_minibatch)
        loss = loss_function(output_mu, output_logSigma2, test_minibatch,
                             latent_mu, latent_logSigma2)
        test_loss += loss.data[0]
        #print("Test: batch_index: ", batch_index, " Loss: ", loss)
    test_loss /= test_set.size()[0]
    print('====> Test set loss: {:.4f} at Epoch: {:}'.format(test_loss, epoch))
    return test_loss


image_path = 'data/processed'
mask_path = 'data/ISIC-2017_Training_Part1_GroundTruth'
label_file = 'data/ISIC-2017_Training_Part3_GroundTruth.csv'

mymanager = DataManager(image_path, mask_path, label_file)
#getting only sebor images
images = mymanager.get_melanoma(as_tensor=True, normalize=False)
#shuffle before splitting into test and train set
images = mymanager.shuffle_withoutLabels(images, replace=False)
#split into train and test set
train_set, test_set = mymanager.datasplit_withoutLabels(images, train_size=0.7)

seed = 1
batch_size = 5
epochs = 400
on_cuda = False
lr = 1e-4
#global parameters for early stopping ar indicated with ES_variablename
ES_Refepoches = 5  #if the test error does not decrease over this nomber of epoches, do not contiunue with training
ES_epocheCounter = 0
                final_scores[m] = s
        else:
            final_scores[method] = score
    return final_scores


if __name__ == '__main__':
    # get configs
    args = get_args()
    print(args)

    writer = SummaryWriter(log_dir=os.path.join(args.savedir, args.exp_name))
    save_args(args,os.path.join(args.savedir,args.exp_name))

    # load data
    dm = DataManager(args)

    # prepare model
    model = init_model(args, dm)
    model = model.cuda()

    # prepare training
    optimizer = Adam(lr=args.lr, params=model.parameters())
    schedule = ExponentialLR(optimizer, args.lr_decay)
    loss_fn = NLLLossWithLength(ignore_index=dm.word2idx['<pad>'])
    # loss_fn = nn.NLLLoss(ignore_index=dm.word2idx['<pad>'])

    # split data
    train_data, val_data, test_data = dm.split()
    train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers,
                              collate_fn=partial(collate_fn, split='train', padding_idx=dm.word2idx['<pad>']))
from pycocoevalcap.meteor.meteor import Meteor
from tensorboardX import SummaryWriter
import os
from models import init_model
from utils import NLLLossWithLength
from torch.optim.lr_scheduler import ExponentialLR
from opts import get_eval_args
from train import evaluate

if __name__ == '__main__':
    # get configs
    args = get_eval_args()
    print(args)

    # load data
    dm = DataManager(args)

    # prepare model
    model = init_model(args, dm)
    model = model.cuda()
    model.load_state_dict(torch.load(args.model_path))

    # split data
    _, _, test_data = dm.split()
    test_loader = DataLoader(test_data,
                             batch_size=args.batch_size,
                             num_workers=args.num_workers,
                             collate_fn=partial(
                                 collate_fn,
                                 split='test',
                                 padding_idx=dm.word2idx['<pad>']))
Пример #6
0
from collections import Counter
import pickle

from dataloader import DataManager
import numpy as np

if __name__ == '__main__':
    data_manager = DataManager()
    data = data_manager.train_dataset.data
    if data_manager.word_counter is None:
        word_counter = pickle.load(open('data/word_counter', 'rb'))
    else:
        word_counter = data_manager.word_counter

    print('Number of sentences: %d' % (len(data)))
    print('Number of words: %d' % (sum(word_counter.values())))
    print(
        'Number of unique words: %d(w/ min freq 3) %d(w/o min freq)' %
        (len([k
              for k, v in word_counter.items() if v >= 3]), len(word_counter)))
    unk_rate = word_counter['<unk>'] / sum(word_counter.values()) * 100.
    print(
        'Coverage of your limited vocabulary: %.2f%%, UNK token rate: %.2f%%' %
        (100. - unk_rate, unk_rate))
    print('Top 10 most frequent words: ', word_counter.most_common(10))
    len_list = [len(line) for line in data]
    print('Maximum sentence length: %d' % (np.max(len_list)))
    print('Minimum sentence length: %d' % (np.min(len_list)))
    print('Average sentence length: %.2f' % (np.mean(len_list)))
    print('Sentence length variation: %.2f' % (np.std(len_list)))
    # print('Distribution of classes: ', class_counter)
Пример #7
0
    load = False
    train_model = True
    save = False
    test_model = True

    # Parameter
    train_size = 0.9
    validation_size = 0.1

    # image_path = 'data/ISIC-2017_Training_Data'
    image_path = 'data/processed'
    mask_path = 'data/ISIC-2017_Training_Part1_GroundTruth'
    label_file = 'data/ISIC-2017_Training_Part3_GroundTruth.csv'

    # call class DataManger
    mymanager = DataManager(image_path, mask_path, label_file)

    # call class MainResNEt
    r = MainResNet()

    # Load images and labels
    r.load_data()

    # Generate dataloaders for train, test, validation
    r.dataloader()

    # Initialize the model
    model = rs.resnet101(pretrained=False)
    save_name = 'resNet101_1.pt'

    # Load model
    # Load / Safe
    load = True
    safe = True

    # Parameter
    train_size = 0.7
    validation_size = 0.1

    # image_path = 'data/ISIC-2017_Training_Data'
    image_path = 'data/processed'
    mask_path = 'data/ISIC-2017_Training_Part1_GroundTruth'
    label_file = 'data/ISIC-2017_Training_Part3_GroundTruth.csv'

    # call class DataManger
    mymanager = DataManager(image_path, mask_path, label_file)

    X_train, y_train, X_test, y_test, X_val, y_val = load_data()

    # Initialize the variables
    if torch.cuda.is_available():
        X_train = X_train.cuda()
        y_train = mymanager.convert_labels(y_train).cuda()
        X_test = X_test.cuda()
        y_test = mymanager.convert_labels(y_test).cuda()
    else:
        X_train = X_train
        y_train = mymanager.convert_labels(y_train)  #.type(torch.LongTensor)
        X_test = X_test
        y_test = mymanager.convert_labels(y_test)  #.type(torch.LongTensor)
Пример #9
0
    best_prec1 = 0.0
    torch.cuda.device(1)

    # Parameter
    train_size = 0.7
    validation_size = 0.1

    start = time.time()

    # image_path = 'data/ISIC-2017_Training_Data'
    image_path = 'data/processed'
    mask_path = 'data/ISIC-2017_Training_Part1_GroundTruth'
    label_file = 'data/ISIC-2017_Training_Part3_GroundTruth.csv'

    # call class DataManger
    mymanager = DataManager(image_path, mask_path, label_file)

    # load images and labels (actually, it is get_sebor because this set is smaller and therefore faster loaded, no time to load the whole set)
    images = mymanager.get_images(as_tensor=True)
    labels = mymanager.get_labels(as_tensor=True)

    # check if images and labels have similar size
    if len(images) != len(labels):
        raise Exception('Error: Images and labels does not have equal length')

    # shuffle images and labels
    images, labels = mymanager.shuffle(images, labels)

    # split data in train, test and validation subset
    X_train, y_train, X_test, y_test, X_val, y_val = mymanager.datasplit(
        images, labels, train_size=train_size, validation_size=0.1)
Пример #10
0
    std = [x / 255 for x in [63.0, 62.1, 66.7]]

    train_transform = transforms.Compose(
        [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),
         transforms.Normalize(mean, std)])
    test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])

    if args.dataset == 'melanoma':

        # Path
        image_path = './data/processed'
        mask_path = './data/ISIC-2017_Training_Part1_GroundTruth'
        label_file = './data/ISIC-2017_Training_Part3_GroundTruth.csv'

        # Generate a DataManager
        mymanager = DataManager(image_path, mask_path, label_file)

        # Load the data
        X_train, y_train, X_test, y_test, X_val, y_val = load_data()
        y_train = mymanager.convert_labels(y_train)
        y_test = mymanager.convert_labels(y_test)

        train_data = torch.utils.data.TensorDataset(X_train, y_train)
        test_data = torch.utils.data.TensorDataset(X_test, y_test)

        # Number of labels
        nlabels = 3
    else:
        train_data = dset.CIFAR100(args.data_path, train=True, transform=train_transform, download=True)
        test_data = dset.CIFAR100(args.data_path, train=False, transform=test_transform, download=True)
        nlabels = 100