Пример #1
0
def main():
    # SET THE PARAMETERS
    parser = argparse.ArgumentParser()
    parser.add_argument('--lr', type=float, default=1e-3,
                        help='Initial learning rate (default: 1e-3)')
    parser.add_argument('--epochs', type=int, default=100,
                        help='Maximum number of epochs (default: 100)')
    parser.add_argument('--patience', type=int, default=10,
                        help='lr scheduler patience (default: 10)')
    parser.add_argument('--batch', type=int, default=4,
                        help='Batch size (default: 4)')
    parser.add_argument('--name', type=str, default='Prueba',
                        help='Name of the current test (default: Prueba)')

    parser.add_argument('--load_model', type=str, default='best_acc',
                        help='Weights to load (default: best_acc)')
    parser.add_argument('--test', action='store_false', default=True,
                        help='Only test the model')
    parser.add_argument('--resume', action='store_true', default=False,
                        help='Continue training a model')
    parser.add_argument('--load_path', type=str, default=None,
                        help='Name of the folder with the pretrained model')
    parser.add_argument('--ft', action='store_true', default=False,
                        help='Fine-tune a model')
    parser.add_argument('--psFactor', type=float, default=1,
                        help='Fine-tune a model')

    parser.add_argument('--gpu', type=str, default='0',
                        help='GPU(s) to use (default: 0)')
    args = parser.parse_args()

    training = args.test
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    if args.ft:
        args.resume = True

    args.patch_size = [int(128*args.psFactor), int(128*args.psFactor), int(96*args.psFactor)]
    args.num_classes = 2

    # PATHS AND DIRS
    save_path = os.path.join('TRAIN', args.name)
    out_path = os.path.join(save_path, 'Val')
    load_path = save_path
    if args.load_path is not None:
        load_path = os.path.join('TRAIN/', args.load_path)

    root = '../../Data/Heart'
    train_file = 'train_paths.csv'
    test_file = 'val_paths.csv'

    if not os.path.exists(save_path):
        os.makedirs(save_path)
        os.makedirs(out_path)

    # SEEDS
    np.random.seed(12345)
    torch.manual_seed(12345)

    cudnn.deterministic = False
    cudnn.benchmark = True

    # CREATE THE NETWORK ARCHITECTURE
    model = GNet(num_classes=args.num_classes, backbone='xception')
    print('---> Number of params: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))
    model = model.cuda()

    optimizer = optim.Adam(model.parameters(), lr=args.lr,
                           weight_decay=1e-5, amsgrad=True)

    model, optimizer = amp.initialize(model, optimizer, opt_level="O1")

    annealing = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, verbose=True, patience=args.patience, threshold=0.001,
        factor=0.5, threshold_mode="abs")

    criterion = utils.segmentation_loss(alpha=1)
    metrics = utils.Evaluator(args.num_classes)

    # LOAD A MODEL IF NEEDED (TESTING OR CONTINUE TRAINING)
    args.epoch = 0
    best_acc = 0
    if args.resume or not training:
        name = 'epoch_' + args.load_model + '.pth.tar'
        checkpoint = torch.load(
            os.path.join(load_path, name),
            map_location=lambda storage, loc: storage)
        args.epoch = checkpoint['epoch']
        best_acc = checkpoint['best_acc']
        args.lr = checkpoint['lr']

        print('Loading model and optimizer {}.'.format(args.epoch))

        amp.load_state_dict(checkpoint['amp'])
        model.load_state_dict(checkpoint['state_dict'], strict=(not args.ft))
        if not args.ft:
            optimizer.load_state_dict(checkpoint['optimizer'])

    # DATALOADERS
    train_loader = Read_data.MRIdataset(True, train_file, root,
                                        args.patch_size)
    val_loader = Read_data.MRIdataset(True, test_file, root, args.patch_size,
                                      val=True)
    test_loader = Read_data.MRIdataset(False, test_file, root, args.patch_size)

    train_loader = DataLoader(train_loader, shuffle=True, sampler=None,
                              batch_size=args.batch, num_workers=10)
    val_loader = DataLoader(val_loader, shuffle=False, sampler=None,
                            batch_size=args.batch * 2, num_workers=10)
    test_loader = DataLoader(test_loader, shuffle=False, sampler=None,
                             batch_size=1, num_workers=0)

    # TRAIN THE MODEL
    is_best = True
    if training:
        torch.cuda.empty_cache()
        out_file = open(os.path.join(save_path, 'progress.csv'), 'a+')

        for epoch in range(args.epoch + 1, args.epochs + 1):
            args.epoch = epoch
            lr = utils.get_lr(optimizer)
            print('--------- Starting Epoch {} --> {} ---------'.format(
                epoch, time.strftime("%H:%M:%S")))
            print('Learning rate:', lr)

            train_loss = train(args, model, train_loader, optimizer, criterion)
            val_loss, acc = val(args, model, val_loader, criterion, metrics)

            acc = acc.item()
            out_file.write('{},{},{},{}\n'.format(
                args.epoch, train_loss, val_loss, acc))
            out_file.flush()

            annealing.step(val_loss)
            save_graph(save_path)

            is_best = best_acc < acc
            best_acc = max(best_acc, acc)

            state = {
                'epoch': epoch,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'amp': amp.state_dict(),
                'loss': [train_loss, val_loss],
                'lr': lr,
                'acc': acc,
                'best_acc': best_acc}

            checkpoint = epoch % 20 == 0
            utils.save_epoch(state, save_path, epoch,
                             checkpoint=checkpoint, is_best=is_best)

            if lr <= (args.lr / (10 ** 3)):
                print('Stopping training: learning rate is too small')
                break
        out_file.close()

    # TEST THE MODEL
    if not is_best:
        checkpoint = torch.load(
            os.path.join(save_path, 'epoch_best_acc.pth.tar'),
            map_location=lambda storage, loc: storage)
        model.load_state_dict(checkpoint['state_dict'])
        args.epoch = checkpoint['epoch']
        print('Testing epoch with best dice ({}: acc {})'.format(
            args.epoch, checkpoint['acc']))

    test(args, model, test_loader, out_path, test_file)
Пример #2
0
def train(batch_size=32,
          vocab_threshold=5,
          vocab_from_file=True,
          embed_size=256,
          hidden_size=512,
          num_epochs=10,
          latest_model=None,
          cocoapi_dir="./Coco/"):
    # Keep track of train and validation losses and validation Bleu-4 scores by epoch
    train_losses = []

    # Define a transform to pre-process the training images
    transform_train = transforms.Compose([
        transforms.Resize(256),
        transforms.RandomCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    ])

    # Build data loader, applying the transforms
    train_loader = get_loader(transform=transform_train,
                              mode='train',
                              batch_size=batch_size,
                              vocab_threshold=vocab_threshold,
                              vocab_from_file=vocab_from_file,
                              cocoapi_loc=cocoapi_dir)

    # The size of the vocabulary
    vocab_size = len(train_loader.dataset.vocab)

    # Initialize the encoder and decoder
    checkpoint = None
    if latest_model:
        checkpoint = torch.load(latest_model)
    start_epoch = 1
    if checkpoint:
        train_losses = checkpoint['train_losses']
        val_losses = checkpoint['val_losses']
        start_epoch = checkpoint['epoch']
    encoder = EncoderCNN(embed_size)
    decoder = DecoderRNN(embed_size, hidden_size, vocab_size)
    if checkpoint:
        encoder.load_state_dict(checkpoint['encoder'])
        decoder.load_state_dict(checkpoint['decoder'])

    # Move models to GPU if CUDA is available
    if torch.cuda.is_available():
        torch.cuda.set_device(1)
        encoder.cuda()
        decoder.cuda()

    # Define the loss function
    loss = nn.CrossEntropyLoss().cuda() if torch.cuda.is_available(
    ) else nn.CrossEntropyLoss()

    # Specify the learnable parameters of the model
    params = list(decoder.parameters()) + list(
        encoder.embed.parameters()) + list(encoder.bn.parameters())

    # Define the optimizer
    optimizer = torch.optim.Adam(params=params, lr=0.001)
    if checkpoint:
        optimizer.load_state_dict(checkpoint['optimizer'])

    # Set the total number of training and validation steps per epoch
    total_train_step = math.ceil(
        len(train_loader.dataset.caption_lengths) /
        train_loader.batch_sampler.batch_size)

    start_time = time.time()
    for epoch in range(start_epoch, num_epochs + 1):
        train_loss = train_one(train_loader, encoder, decoder, loss, optimizer,
                               vocab_size, epoch, total_train_step)
        train_losses.append(train_loss)
        # Save the entire model anyway, regardless of being the best model so far or not
        filename = os.path.join("./models", "model-{}.pkl".format(epoch))
        save_epoch(filename, encoder, decoder, optimizer, train_losses, epoch)
        print("Epoch [%d/%d] took %ds" %
              (epoch, num_epochs, time.time() - start_time))
        start_time = time.time()
Пример #3
0
for epoch in range(1, num_epochs + 1):
    train_loss = train(train_loader, encoder, decoder, criterion, optimizer,
                       vocab_size, epoch, total_train_step)
    train_losses.append(train_loss)
    val_loss, val_bleu = validate(val_loader, encoder, decoder, criterion,
                                  train_loader.dataset.vocab, epoch,
                                  total_val_step)
    val_losses.append(val_loss)
    val_bleus.append(val_bleu)
    if val_bleu > best_val_bleu:
        print(
            "Validation Bleu-4 improved from {:0.4f} to {:0.4f}, saving model to best-model.pkl"
            .format(best_val_bleu, val_bleu))
        best_val_bleu = val_bleu
        filename = os.path.join("./models", "best-model.pkl")
        save_epoch(filename, encoder, decoder, optimizer, train_losses,
                   val_losses, val_bleu, val_bleus, epoch)
    else:
        print(
            "Validation Bleu-4 did not improve, saving model to model-{}.pkl".
            format(epoch))
    # Save the entire model anyway, regardless of being the best model so far or not
    filename = os.path.join("./models", "model-{}.pkl".format(epoch))
    save_epoch(filename, encoder, decoder, optimizer, train_losses, val_losses,
               val_bleu, val_bleus, epoch)
    print("Epoch [%d/%d] took %ds" %
          (epoch, num_epochs, time.time() - start_time))
    if epoch > 5:
        # Stop if the validation Bleu doesn't improve for 3 epochs
        if early_stopping(val_bleus, 3):
            break
    start_time = time.time()
Пример #4
0
def testing_data_routine():
    for s_id, sbj_name in enumerate(f_names_test):

        epoch_filepath = os.path.join(epoch_test_loc, f'{sbj_name}_epoch.npy')
        features_test_filepath = os.path.join(features_test_folder, f'{sbj_name}_features.npy')

        # TODO: Corrigir a forma como pegar o conjunto de treinos dentro da área de teste
        csp_filepath = os.path.join(csp_folder, f'{f_names_train[s_id]}_Wcsp.npy')

        if os.path.exists(epoch_filepath):
            X = np.load(epoch_filepath, allow_pickle=True).item()

        else:
            X = dict()

            for sbj_idx in range(n_runs):
                # Carrega o arquivo raw e o conjunto de eventos referentes a ele
                raw, eve = utils.pick_file(raw_fif_folder, sbj_name, sbj_idx + 1)

                # Separa o arquivo em epocas e aplica o ica
                x_temp = utils.epoch_raw_data(
                    raw, eve, e_dict, t_start, t_end, ica_start, ica_end
                )

                # Tenta adicionar a epoca atual ao dicionário, se não conseguir, reinicia o dicionário
                for i in e_classes:
                    if sbj_idx == 0:
                        X[i] = x_temp[i]
                    else:
                        X[i].add_epoch(x_temp[i].data)

            utils.save_epoch(epoch_filepath, X)
            del x_temp, raw, eve

        Wfb = np.load(csp_filepath, allow_pickle=True).item()

        # Verifica se já existe um arquivo de caracteristicas de teste
        if not os.path.exists(features_test_filepath):
            # Se não existir, cria
            f = dict()

            for k, (i, j) in product(X, combinations(e_classes, 2)):
                # k - Classes do conjunto de dados X
                # i, j - Todas as combinações de CSP possíveis a partir das classes em e_dict
                if k not in e_classes:
                    continue

                # Laço Passando por todos os sinais de um conjunto de matrizes
                for n in range(X[k].n_trials):

                    # Cálculo dos vetores de características utilizando a corrente classe de W e de X
                    f_temp = np.append(
                        Wfb[f'{i}{j}'].csp_feature(X[k].data[:, :, n]).transpose(),
                        [[k_id for k_id in e_dict if e_dict[k_id] == k]], axis=1
                    )

                    # Tenta adicionar esse vetor de características na matriz de caracteristicas
                    try:
                        f[f'{i}{j}'] = np.append(f[f'{i}{j}'], f_temp, axis=0)
                    except KeyError:
                        f[f'{i}{j}'] = f_temp

            utils.save_csp(features_test_filepath, f)
Пример #5
0
def training_data_routine():
    for sbj_name in f_names_train:

        epoch_filepath = os.path.join(epoch_train_loc, f'{sbj_name}_epoch.npy')
        csp_filepath = os.path.join(csp_folder, f'{sbj_name}_Wcsp.npy')
        features_filepath = os.path.join(features_train_folder, f'{sbj_name}_features.npy')

        if os.path.exists(epoch_filepath):
            X = Epochs.dict_from_filepath(epoch_filepath)

        else:
            X = dict()

            for sbj_idx in range(n_runs):

                # Carrega o arquivo raw e o conjunto de eventos referentes a ele
                raw, eve = utils.pick_file(raw_fif_folder, sbj_name, sbj_idx + 1)
                # Do arquivo raw, retorna as epocas, após ter sido passado pelo processo de ICA
                x_temp = utils.epoch_raw_data(
                    raw, eve, e_dict, t_start, t_end, ica_start, ica_end)

                # Salva os dados epocados no dicionário de épocas X
                for i in e_classes:
                    if sbj_idx == 0:
                        X[i] = x_temp[i]
                    else:
                        X[i].add_epoch(x_temp[i].data)

            utils.save_epoch(epoch_filepath, X)
            del x_temp, raw, eve

        # %% ============== Calculo das matrizes de projeção Espacial =====================

        if os.path.exists(csp_filepath):
            W = np.load(csp_filepath, allow_pickle=True).item()

        else:
            # Calcula-se a matriz se projeção espacial de um único sujeito
            print('Calculo das matrizes de projeção Espacial do sujeito {}'.format(sbj_name))
            W = dict()

            for i, j in combinations(e_classes, 2):
                W[f'{i}{j}'] = FBCSP(X[i], X[j], m=m, filterbank=fb_freqs)

            utils.save_csp(csp_filepath, W)

        # ====================== Construção do vetor de caracteristicas ==================

        if not os.path.exists(features_filepath):
            # Se os arquivos não existirem, então calcule-os e salve em seguida
            print('Criando os vetores de caracteristicas do sujeito {}'.format(sbj_name))
            features = dict()

            # Realiza o CSP e extrai as caracteristicas entre todas as classes possíveis
            for i, j in combinations(e_classes, 2):
                # Executa a extração das características em todas as combinações de classes
                features[f'{i}{j}'] = W[f'{i}{j}'].generate_train_features(
                    X[i], X[j], dict(zip(e_dict.values(), e_dict.keys()))
                )

            utils.save_csp(features_filepath, features)