示例#1
0
def train():
    iterators, dataset = data_loaders(batch_size)
    model = RNN(input_size, hidden_size, num_layers, dropout, n_classes,
                dataset.get_class_weights())
    optimizer = optim.SGD(model.parameters(), lr)
    log = train_procedure(model, iterators, n_epochs, optimizer)
    save_log(log)
示例#2
0
def train(args):
    train_path = args.dataset / args.train_dir
    val_path = args.dataset / args.val_dir
    logs_dir = args.logs_dir / args.name
    logs_dir.mkdir(exist_ok=True, parents=True)

    # determine number of classes
    try:
        with open(args.dataset / 'classes.json') as f:
            labels = json.load(f)
            num_classes = len(labels.keys())
    except FileNotFoundError:
        num_classes = int(input("Number of distinct classes in the dataset: "))

    train_loader, val_loader = data_loaders(args.dataset,
                                            args.dataset_sampling,
                                            batch_size=cfg.batch_size,
                                            num_workers=args.num_workers,
                                            pin_memory=True)

    d_in = next(iter(train_loader))[0].size(-1)

    model = RandLANet(d_in,
                      num_classes,
                      num_neighbors=args.neighbors,
                      decimation=args.decimation,
                      device=args.gpu)

    print('Computing weights...', end='\t')
    samples_per_class = np.array(cfg.class_weights)
    # weight = samples_per_class / float(sum(samples_per_class))
    # class_weights = 1 / (weight + 0.02)
    # effective = 1.0 - np.power(0.99, samples_per_class)
    # class_weights = (1 - 0.99) / effective
    # class_weights = class_weights / (np.sum(class_weights) * num_classes)
    # class_weights = class_weights / float(sum(class_weights))
    # weights = torch.tensor(class_weights).float().to(args.gpu)
    n_samples = torch.tensor(cfg.class_weights,
                             dtype=torch.float,
                             device=args.gpu)
    ratio_samples = n_samples / n_samples.sum()
    weights = 1 / (ratio_samples + 0.02)
    #weights = F.softmin(n_samples)
    # weights = (1/ratio_samples) / (1/ratio_samples).sum()

    print('Done.')
    print('Weights:', weights)
    criterion = nn.CrossEntropyLoss(weight=weights)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.adam_lr)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,
                                                       args.scheduler_gamma)

    first_epoch = 1
    if args.load:
        path = max(list((args.logs_dir / args.load).glob('*.pth')))
        print(f'Loading {path}...')
        checkpoint = torch.load(path)
        first_epoch = checkpoint['epoch'] + 1
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        scheduler.load_state_dict(checkpoint['scheduler_state_dict'])

    with SummaryWriter(logs_dir) as writer:
        for epoch in range(first_epoch, args.epochs + 1):
            print(f'=== EPOCH {epoch:d}/{args.epochs:d} ===')
            t0 = time.time()
            # Train
            model.train()

            # metrics
            losses = []
            accuracies = []
            ious = []

            # iterate over dataset
            for points, labels in tqdm(train_loader,
                                       desc='Training',
                                       leave=False):
                points = points.to(args.gpu)
                labels = labels.to(args.gpu)
                optimizer.zero_grad()

                scores = model(points)

                logp = torch.distributions.utils.probs_to_logits(
                    scores, is_binary=False)
                loss = criterion(logp, labels)
                # logpy = torch.gather(logp, 1, labels)
                # loss = -(logpy).mean()

                loss.backward()

                optimizer.step()

                losses.append(loss.cpu().item())
                accuracies.append(accuracy(scores, labels))
                ious.append(intersection_over_union(scores, labels))

            scheduler.step()

            accs = np.nanmean(np.array(accuracies), axis=0)
            ious = np.nanmean(np.array(ious), axis=0)

            val_loss, val_accs, val_ious = evaluate(model, val_loader,
                                                    criterion, args.gpu)

            loss_dict = {
                'Training loss': np.mean(losses),
                'Validation loss': val_loss
            }
            acc_dicts = [{
                'Training accuracy': acc,
                'Validation accuracy': val_acc
            } for acc, val_acc in zip(accs, val_accs)]
            iou_dicts = [{
                'Training accuracy': iou,
                'Validation accuracy': val_iou
            } for iou, val_iou in zip(ious, val_ious)]

            # acc_dicts = [
            #     {
            #         f'{i:02d}_train_acc':    acc,
            #         f'{}':  val_acc
            #     }
            #     for i, (acc, val_accs) in enumerate(zip(accs, val_accs))
            # ]

            t1 = time.time()
            d = t1 - t0
            # Display results
            for k, v in loss_dict.items():
                print(f'{k}: {v:.7f}', end='\t')
            print()

            print('Accuracy     ',
                  *[f'{i:>5d}' for i in range(num_classes)],
                  '   OA',
                  sep=' | ')
            print('Training:    ',
                  *[
                      f'{acc:.3f}' if not np.isnan(acc) else '  nan'
                      for acc in accs
                  ],
                  sep=' | ')
            print('Validation:  ',
                  *[
                      f'{acc:.3f}' if not np.isnan(acc) else '  nan'
                      for acc in val_accs
                  ],
                  sep=' | ')

            print('IoU          ',
                  *[f'{i:>5d}' for i in range(num_classes)],
                  ' mIoU',
                  sep=' | ')
            print('Training:    ',
                  *[
                      f'{iou:.3f}' if not np.isnan(iou) else '  nan'
                      for iou in ious
                  ],
                  sep=' | ')
            print('Validation:  ',
                  *[
                      f'{iou:.3f}' if not np.isnan(iou) else '  nan'
                      for iou in val_ious
                  ],
                  sep=' | ')

            print(
                'Time elapsed:', '{:.0f} s'.format(d)
                if d < 60 else '{:.0f} min {:02.0f} s'.format(*divmod(d, 60)))

            # send results to tensorboard
            writer.add_scalars('Loss', loss_dict, epoch)

            for i in range(num_classes):
                writer.add_scalars(f'Per-class accuracy/{i+1:02d}',
                                   acc_dicts[i], epoch)
                writer.add_scalars(f'Per-class IoU/{i+1:02d}', iou_dicts[i],
                                   epoch)
            writer.add_scalars('Per-class accuracy/Overall', acc_dicts[-1],
                               epoch)
            writer.add_scalars('Per-class IoU/Mean IoU', iou_dicts[-1], epoch)

            if epoch % args.save_freq == 0:
                torch.save(
                    dict(epoch=epoch,
                         model_state_dict=model.state_dict(),
                         optimizer_state_dict=optimizer.state_dict(),
                         scheduler_state_dict=scheduler.state_dict()),
                    args.logs_dir / args.name / f'checkpoint_{epoch:02d}.pth')
示例#3
0
import torch
import torch.nn as nn

from data import data_loaders
from model import RandLANet
from utils.ply import read_ply, write_ply

t0 = time.time()

path = Path('datasets') / 's3dis' / 'subsampled' / 'test'

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

print('Loading data...')
loader, _ = data_loaders(path)

print('Loading model...')

d_in = 6
num_classes = 14

model = RandLANet(d_in, num_classes, 16, 4, device)
model.load_state_dict(
    torch.load('runs/2020-04-11_17:03/checkpoint_10.pth')['model_state_dict'])
model.eval()

points, labels = next(iter(loader))

print('Predicting labels...')
with torch.no_grad():
def main():

    #parsing the arguments
    args, _ = parse_arguments()

    #setup logging
    #output_dir = Path('/content/drive/My Drive/image-captioning/output')
    output_dir = Path(args.output_directory)
    output_dir.mkdir(parents=True, exist_ok=True)
    logfile_path = Path(output_dir / "output.log")
    setup_logging(logfile=logfile_path)

    #setup and read config.ini
    #config_file = Path('/content/drive/My Drive/image-captioning/config.ini')
    config_file = Path('../config.ini')
    reading_config(config_file)

    #tensorboard
    tensorboard_logfile = Path(output_dir / 'tensorboard')
    tensorboard_writer = SummaryWriter(tensorboard_logfile)

    #load dataset
    #dataset_dir = Path('/content/drive/My Drive/Flickr8k_Dataset')
    dataset_dir = Path(args.dataset)
    images_path = Path(dataset_dir / Config.get("images_dir"))
    captions_path = Path(dataset_dir / Config.get("captions_dir"))
    training_loader, validation_loader, testing_loader = data_loaders(
        images_path, captions_path)

    #load the model (encoder, decoder, optimizer)
    embed_size = Config.get("encoder_embed_size")
    hidden_size = Config.get("decoder_hidden_size")
    batch_size = Config.get("training_batch_size")
    epochs = Config.get("epochs")
    feature_extraction = Config.get("feature_extraction")
    raw_captions = read_captions(captions_path)
    id_to_word, word_to_id = dictionary(raw_captions, threshold=5)
    vocab_size = len(id_to_word)
    encoder = Encoder(embed_size, feature_extraction)
    decoder = Decoder(embed_size, hidden_size, vocab_size, batch_size)

    #load pretrained embeddings
    #pretrained_emb_dir = Path('/content/drive/My Drive/word2vec')
    pretrained_emb_dir = Path(args.pretrained_embeddings)
    pretrained_emb_file = Path(pretrained_emb_dir /
                               Config.get("pretrained_emb_path"))
    pretrained_embeddings = load_pretrained_embeddings(pretrained_emb_file,
                                                       id_to_word)

    #load the optimizer
    learning_rate = Config.get("learning_rate")
    optimizer = adam_optimizer(encoder, decoder, learning_rate)

    #loss funciton
    criterion = cross_entropy

    #load checkpoint
    checkpoint_file = Path(output_dir / Config.get("checkpoint_file"))
    checkpoint_captioning = load_checkpoint(checkpoint_file)

    #using available device(gpu/cpu)
    encoder = encoder.to(Config.get("device"))
    decoder = decoder.to(Config.get("device"))
    pretrained_embeddings = pretrained_embeddings.to(Config.get("device"))

    start_epoch = 1
    if checkpoint_captioning is not None:
        start_epoch = checkpoint_captioning['epoch'] + 1
        encoder.load_state_dict(checkpoint_captioning['encoder'])
        decoder.load_state_dict(checkpoint_captioning['decoder'])
        optimizer.load_state_dict(checkpoint_captioning['optimizer'])
        logger.info(
            'Initialized encoder, decoder and optimizer from loaded checkpoint'
        )

    del checkpoint_captioning

    #image captioning model
    model = ImageCaptioning(encoder, decoder, optimizer, criterion,
                            training_loader, validation_loader, testing_loader,
                            pretrained_embeddings, output_dir,
                            tensorboard_writer)

    #training and testing the model
    if args.training:
        validate_every = Config.get("validate_every")
        model.train(epochs, validate_every, start_epoch)
    elif args.testing:
        images_path = Path(images_path / Config.get("images_dir"))
        model.testing(id_to_word, images_path)
示例#5
0
def train(args):
    train_path = args.dataset / args.train_dir
    val_path = args.dataset / args.val_dir
    logs_dir = args.logs_dir / args.name
    logs_dir.mkdir(exist_ok=True, parents=True)

    num_classes = len(cfg.class_weights)

    train_loader, val_loader = data_loaders(
        args.dataset,
        args.dataset_sampling,
        batch_size=args.batch_size,
        num_workers=args.num_workers,
        pin_memory=True,
    )

    d_in = next(iter(train_loader))[0].size(-1)

    model = RandLANet(
        d_in,
        num_classes,
        num_neighbors=args.neighbors,
        decimation=args.decimation,
        device=args.gpu,
    )

    print("Computing weights...", end="\t")
    samples_per_class = np.array(cfg.class_weights)

    n_samples = torch.tensor(cfg.class_weights,
                             dtype=torch.float,
                             device=args.gpu)
    ratio_samples = n_samples / n_samples.sum()
    weights = 1 / (ratio_samples + 0.02)

    print("Done.")
    print("Weights:", weights)
    criterion = nn.CrossEntropyLoss(weight=weights)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.adam_lr)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,
                                                       args.scheduler_gamma)

    first_epoch = 1
    if args.load:
        path = max(list((args.logs_dir / args.load).glob("*.pth")))
        print(f"Loading {path}...")
        checkpoint = torch.load(path)
        first_epoch = checkpoint["epoch"] + 1
        model.load_state_dict(checkpoint["model_state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
        scheduler.load_state_dict(checkpoint["scheduler_state_dict"])

    with SummaryWriter(logs_dir) as writer:
        for epoch in range(first_epoch, args.epochs + 1):
            print(f"=== EPOCH {epoch:d}/{args.epochs:d} ===")
            t0 = time.time()
            # Train
            model.train()

            # metrics
            losses = []
            accuracies = []
            ious = []

            # iterate over dataset
            for points, labels in tqdm(train_loader,
                                       desc="Training",
                                       leave=False):
                points = points.to(args.gpu)
                labels = labels.to(args.gpu)
                optimizer.zero_grad()

                scores = model(points)

                logp = torch.distributions.utils.probs_to_logits(
                    scores, is_binary=False)
                loss = criterion(logp, labels)
                # logpy = torch.gather(logp, 1, labels)
                # loss = -(logpy).mean()

                loss.backward()

                optimizer.step()

                losses.append(loss.cpu().item())
                accuracies.append(accuracy(scores, labels))
                ious.append(intersection_over_union(scores, labels))

            scheduler.step()

            accs = np.nanmean(np.array(accuracies), axis=0)
            ious = np.nanmean(np.array(ious), axis=0)

            val_loss, val_accs, val_ious = evaluate(model, val_loader,
                                                    criterion, args.gpu)

            loss_dict = {
                "Training loss": np.mean(losses),
                "Validation loss": val_loss
            }
            acc_dicts = [{
                "Training accuracy": acc,
                "Validation accuracy": val_acc
            } for acc, val_acc in zip(accs, val_accs)]
            iou_dicts = [{
                "Training accuracy": iou,
                "Validation accuracy": val_iou
            } for iou, val_iou in zip(ious, val_ious)]

            t1 = time.time()
            d = t1 - t0
            # Display results
            for k, v in loss_dict.items():
                print(f"{k}: {v:.7f}", end="\t")
            print()

            print(
                "Accuracy     ",
                *[f"{i:>5d}" for i in range(num_classes)],
                "   OA",
                sep=" | ",
            )
            print(
                "Training:    ",
                *[
                    f"{acc:.3f}" if not np.isnan(acc) else "  nan"
                    for acc in accs
                ],
                sep=" | ",
            )
            print(
                "Validation:  ",
                *[
                    f"{acc:.3f}" if not np.isnan(acc) else "  nan"
                    for acc in val_accs
                ],
                sep=" | ",
            )

            print(
                "IoU          ",
                *[f"{i:>5d}" for i in range(num_classes)],
                " mIoU",
                sep=" | ",
            )
            print(
                "Training:    ",
                *[
                    f"{iou:.3f}" if not np.isnan(iou) else "  nan"
                    for iou in ious
                ],
                sep=" | ",
            )
            print(
                "Validation:  ",
                *[
                    f"{iou:.3f}" if not np.isnan(iou) else "  nan"
                    for iou in val_ious
                ],
                sep=" | ",
            )

            print(f"Time elapsed: {d}")

            # send results to tensorboard
            writer.add_scalars("Loss", loss_dict, epoch)

            for i in range(num_classes):
                writer.add_scalars(f"Per-class accuracy/{i+1:02d}",
                                   acc_dicts[i], epoch)
                writer.add_scalars(f"Per-class IoU/{i+1:02d}", iou_dicts[i],
                                   epoch)
            writer.add_scalars("Per-class accuracy/Overall", acc_dicts[-1],
                               epoch)
            writer.add_scalars("Per-class IoU/Mean IoU", iou_dicts[-1], epoch)

            if epoch % args.save_freq == 0:
                torch.save(
                    dict(
                        epoch=epoch,
                        model_state_dict=model.state_dict(),
                        optimizer_state_dict=optimizer.state_dict(),
                        scheduler_state_dict=scheduler.state_dict(),
                    ),
                    args.logs_dir / args.name / f"checkpoint_{epoch:02d}.pth",
                )
def main():
    #output directory
    output_dir = Path('../output/')

    #setup logging
    output_dir.mkdir(parents=True, exist_ok=True)
    logfile_path = Path(output_dir / "output.log")
    setup_logging(logfile = logfile_path)

    #reading the config file
    config_file = Path('../config.ini')
    reading_config(config_file)

    #dataset paths
    rworldnews_path = Path(Config.get("rworldnews"))
    millionnews_path = Path(Config.get("millionnews"))

    #loading the dataset
    raw_data = load_data(rworldnews_path)
    #raw_data = load_data(millionnews_path)

    dates, labels, news = clean_data(raw_data)
    id_to_word, word_to_id = dictionary(news, threshold = 5)
    training_loader, validation_loader, testing_loader = data_loaders(rworldnews_path)

    #tensorboard

    #loading pretrained embeddings
    pretrained_emb_file = Path(Config.get("pretrained_emb_path"))
    pretrained_embeddings, emb_dim = load_pretrained_embeddings(pretrained_emb_file, id_to_word)

    #text classification model
    num_classes = 2
    model = TextClassifier(pretrained_embeddings, emb_dim, num_classes)

    #load the optimizer
    learning_rate = Config.get("learning_rate")
    optimizer = adam_optimizer(model, learning_rate)

    #load the loss function
    criterion = cross_entropy

    #load checkpoint
    checkpoint_file = Path(output_dir / Config.get("checkpoint_file"))
    checkpoint_stocks = load_checkpoint(checkpoint_file)

    #using available device(gpu/cpu)
    model = model.to(Config.get("device"))
    pretrained_embeddings = pretrained_embeddings.to(Config.get("device"))

    #intializing the model and optimizer from the save checkpoint.
    start_epoch = 1
    if checkpoint_stocks is not None:
        start_epoch = checkpoint_stocks['epoch'] + 1
        model.load_state_dict(checkpoint_stocks['model'])
        optimizer.load_state_dict(checkpoint_stocks['optimizer'])
        logger.info('Initialized model and the optimizer from loaded checkpoint.')
    
    del checkpoint_stocks

    #stock prediction model
    model = StockPrediction(model, optimizer, criterion, 
                            training_loader, validation_loader, testing_loader, output_dir)

    #training and testing the model
    epochs = Config.get("epochs")
    validate_every = Config.get("validate_every")
    model.train(epochs, validate_every, start_epoch)
示例#7
0
def evaluate():
    iterators, dataset = data_loaders(batch_size)
    model = RNN(input_size, hidden_size, num_layers, dropout, n_classes,
                dataset.get_class_weights())
    model.load_state_dict(load('model.pt'))
    print(model.measure(iterators[1]))
示例#8
0
def main(args):
    # create paths to useful directories
    exp_dir = args.runs_dir / args.name
    checkpoints_dir = exp_dir / 'checkpoints'
    logs_dir = exp_dir / 'logs'
    results_dir = exp_dir / 'results'
    for dir in [checkpoints_dir, logs_dir, results_dir]:
        dir.mkdir(exist_ok=True, parents=True)

    print('Loading dataset...', end='\t', flush=True)
    train_loader, val_loader = data_loaders(args)
    print('Done.')

    print('Initializing model...', end='\t', flush=True)
    # choose how to initialize bias layer
    length = args.nfft // 2 + 1
    if args.init_bias == 'constant':
        init_bias = (torch.zeros(length), torch.ones(length))
    elif args.init_bias == 'mean':
        stats_path = args.dataset / 'stats'
        bias = load(stats_path / f'mean_fft{args.nfft:d}-hop{args.nhop:d}.npy')
        scale = load(stats_path / f'std_fft{args.nfft:d}-hop{args.nhop:d}.npy')
        init_bias = (torch.from_numpy(bias), torch.from_numpy(scale))
    elif args.init_bias == 'random':
        init_bias = (torch.randn(length), torch.exp(torch.randn(length)))
    else:
        raise NotImplementedError

    model = MSS(init_bias,
                n_fft=args.nfft,
                n_hop=args.nhop,
                context_frames=args.context_frames,
                window=args.window).to(args.device)
    print('Done.')

    # initialize optimizer
    optimizer = torch.optim.Adam(model.parameters(), args.lr)

    # choose criterion
    criterion = nn.MSELoss()
    ISTFT = model.stft.istft

    # eventually load a previous model
    first_epoch = 1
    if args.load_model:
        path = max(list(checkpoints_dir.glob('*.pth')))
        print(f'Loading {path}...')
        checkpoint = torch.load(path)
        first_epoch = checkpoint['epoch'] + 1
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])

    # main loop
    with Writer(args.epochs, logs_dir) as writer:
        for epoch in range(first_epoch, args.epochs + 1):
            writer.epoch = epoch

            t0 = time.time()

            train_loss = train(model,
                               train_loader,
                               criterion,
                               optimizer,
                               args.device,
                               desc=writer.epoch_desc + 'Training')
            example, val_loss = val(model,
                                    val_loader,
                                    criterion,
                                    args.device,
                                    desc=writer.epoch_desc + 'Validation')

            # TODO: scheduler
            t1 = time.time()

            loss_dict = {
                'Training loss': train_loss,
                'Validation loss': val_loss
            }
            # print results
            writer.display_results(loss_dict, t1 - t0)

            # add results to tensorboard
            writer.add_scalars('Reconstruction loss', loss_dict)

            if epoch % args.save_freq == 0:
                t0 = time.time()
                print(f'Saving model in {exp_dir}...', end='\t', flush=True)

                # compute reconstructed audio
                audio_ex = ISTFT(example).squeeze(0)

                # save it in a file
                torchaudio.save(
                    str(results_dir / f'{args.instrument}_{epoch:03d}.wav'),
                    audio_ex, 44100)

                # save it to tensorboard
                writer.add_audio(args.instrument.capitalize(),
                                 audio_ex,
                                 start=5,
                                 duration=30,
                                 subsampling=2)

                # save checkpoint
                torch.save(
                    dict(epoch=epoch,
                         state_dict=model.state_dict(),
                         optimizer=optimizer.state_dict()),
                    checkpoints_dir / f'checkpoint_{epoch:03d}.pth')

                t1 = time.time()
                print(f'Done ({t1-t0:.0f} s).')