Beispiel #1
0
            options['gamma'] = 0
        elif options['type'] == 'cls_clust_LELMEL':
            options['gamma'] = 0
            options['mu'] = 0
        elif options['type'] == 'cls_clust_LEL':
            options['gamma'] = 0
        elif options['type'] == 'only_clust':
            options['gamma'] = 0
            #TODO
            # Make classification weight zero too

        print 'Start training...'
        for epoch in range(options['start_epoch'], options['epochs']):
            # Validate
            if options['val_on']:
                avg_prec = validate_model(val_loader, model, criterion_cls,
                                          options)
                is_best = avg_prec > best_avg_prec
                if is_best:
                    print 'Best model till now: ', epoch
                    best_avg_prec = max(avg_prec, best_avg_prec)
                    print 'Saving checkpoint after ', epoch, ' epochs...'
                    save_checkpoint(
                        {
                            'epoch': epoch + 1,
                            'base_arch': options['base_arch'],
                            'state_dict': model.state_dict(),
                            'best_avg_prec': best_avg_prec
                        },
                        filename=options['save_dir'] +
                        'checkpoint_{}_epoch_{}.pth.tar'.format(
                            options['type'], epoch),
Beispiel #2
0
def main():
    parser = get_parser()
    args = parser.parse_args()
    name = get_name(parser, args)
    print(f"Experiment {name}")

    if args.musdb is None and args.rank == 0:
        print(
            "You must provide the path to the MusDB dataset with the --musdb flag. "
            "To download the MusDB dataset, see https://sigsep.github.io/datasets/musdb.html.",
            file=sys.stderr)
        sys.exit(1)

    eval_folder = args.evals / name
    eval_folder.mkdir(exist_ok=True, parents=True)
    args.logs.mkdir(exist_ok=True)
    metrics_path = args.logs / f"{name}.json"
    eval_folder.mkdir(exist_ok=True, parents=True)
    args.checkpoints.mkdir(exist_ok=True, parents=True)
    args.models.mkdir(exist_ok=True, parents=True)

    if args.device is None:
        device = "cpu"
        if th.cuda.is_available():
            device = "cuda"
    else:
        device = args.device

    th.manual_seed(args.seed)
    # Prevents too many threads to be started when running `museval` as it can be quite
    # inefficient on NUMA architectures.
    os.environ["OMP_NUM_THREADS"] = "1"

    if args.world_size > 1:
        if device != "cuda" and args.rank == 0:
            print("Error: distributed training is only available with cuda device", file=sys.stderr)
            sys.exit(1)
        th.cuda.set_device(args.rank % th.cuda.device_count())
        distributed.init_process_group(backend="nccl",
                                       init_method="tcp://" + args.master,
                                       rank=args.rank,
                                       world_size=args.world_size)

    checkpoint = args.checkpoints / f"{name}.th"
    checkpoint_tmp = args.checkpoints / f"{name}.th.tmp"
    if args.restart and checkpoint.exists():
        checkpoint.unlink()

    if args.tasnet:
        model = ConvTasNet(audio_channels=args.audio_channels, X=args.X).to(device)
    else:
        model = Demucs(
            audio_channels=args.audio_channels,
            channels=args.channels,
            context=args.context,
            depth=args.depth,
            glu=args.glu,
            growth=args.growth,
            kernel_size=args.kernel_size,
            lstm_layers=args.lstm_layers,
            rescale=args.rescale,
            rewrite=args.rewrite,
            sources=4,
            stride=args.conv_stride,
            upsample=args.upsample,
        ).to(device)
    if args.show:
        print(model)
        size = sizeof_fmt(4 * sum(p.numel() for p in model.parameters()))
        print(f"Model size {size}")
        return

    optimizer = th.optim.Adam(model.parameters(), lr=args.lr)

    try:
        saved = th.load(checkpoint, map_location='cpu')
    except IOError:
        saved = SavedState()
    else:
        model.load_state_dict(saved.last_state)
        optimizer.load_state_dict(saved.optimizer)

    if args.save_model:
        if args.rank == 0:
            model.to("cpu")
            model.load_state_dict(saved.best_state)
            save_model(model, args.models / f"{name}.th")
        return

    if args.rank == 0:
        done = args.logs / f"{name}.done"
        if done.exists():
            done.unlink()

    if args.augment:
        augment = nn.Sequential(FlipSign(), FlipChannels(), Shift(args.data_stride),
                                Remix(group_size=args.remix_group_size)).to(device)
    else:
        augment = Shift(args.data_stride)

    if args.mse:
        criterion = nn.MSELoss()
    else:
        criterion = nn.L1Loss()

    # Setting number of samples so that all convolution windows are full.
    # Prevents hard to debug mistake with the prediction being shifted compared
    # to the input mixture.
    samples = model.valid_length(args.samples)
    print(f"Number of training samples adjusted to {samples}")

    if args.raw:
        train_set = Rawset(args.raw / "train",
                           samples=samples + args.data_stride,
                           channels=args.audio_channels,
                           streams=[0, 1, 2, 3, 4],
                           stride=args.data_stride)

        valid_set = Rawset(args.raw / "valid", channels=args.audio_channels)
    else:
        if not args.metadata.is_file() and args.rank == 0:
            build_musdb_metadata(args.metadata, args.musdb, args.workers)
        if args.world_size > 1:
            distributed.barrier()
        metadata = json.load(open(args.metadata))
        duration = Fraction(samples + args.data_stride, args.samplerate)
        stride = Fraction(args.data_stride, args.samplerate)
        train_set = StemsSet(get_musdb_tracks(args.musdb, subsets=["train"], split="train"),
                             metadata,
                             duration=duration,
                             stride=stride,
                             samplerate=args.samplerate,
                             channels=args.audio_channels)
        valid_set = StemsSet(get_musdb_tracks(args.musdb, subsets=["train"], split="valid"),
                             metadata,
                             samplerate=args.samplerate,
                             channels=args.audio_channels)

    best_loss = float("inf")
    for epoch, metrics in enumerate(saved.metrics):
        print(f"Epoch {epoch:03d}: "
              f"train={metrics['train']:.8f} "
              f"valid={metrics['valid']:.8f} "
              f"best={metrics['best']:.4f} "
              f"duration={human_seconds(metrics['duration'])}")
        best_loss = metrics['best']

    if args.world_size > 1:
        dmodel = DistributedDataParallel(model,
                                         device_ids=[th.cuda.current_device()],
                                         output_device=th.cuda.current_device())
    else:
        dmodel = model

    for epoch in range(len(saved.metrics), args.epochs):
        begin = time.time()
        model.train()
        train_loss = train_model(epoch,
                                 train_set,
                                 dmodel,
                                 criterion,
                                 optimizer,
                                 augment,
                                 batch_size=args.batch_size,
                                 device=device,
                                 repeat=args.repeat,
                                 seed=args.seed,
                                 workers=args.workers,
                                 world_size=args.world_size)
        model.eval()
        valid_loss = validate_model(epoch,
                                    valid_set,
                                    model,
                                    criterion,
                                    device=device,
                                    rank=args.rank,
                                    split=args.split_valid,
                                    world_size=args.world_size)

        duration = time.time() - begin
        if valid_loss < best_loss:
            best_loss = valid_loss
            saved.best_state = {
                key: value.to("cpu").clone()
                for key, value in model.state_dict().items()
            }
        saved.metrics.append({
            "train": train_loss,
            "valid": valid_loss,
            "best": best_loss,
            "duration": duration
        })
        if args.rank == 0:
            json.dump(saved.metrics, open(metrics_path, "w"))

        saved.last_state = model.state_dict()
        saved.optimizer = optimizer.state_dict()
        if args.rank == 0:
            th.save(saved, checkpoint_tmp)
            checkpoint_tmp.rename(checkpoint)

        print(f"Epoch {epoch:03d}: "
              f"train={train_loss:.8f} valid={valid_loss:.8f} best={best_loss:.4f} "
              f"duration={human_seconds(duration)}")

    del dmodel
    model.load_state_dict(saved.best_state)
    if args.eval_cpu:
        device = "cpu"
        model.to(device)
    model.eval()
    evaluate(model,
             args.musdb,
             eval_folder,
             rank=args.rank,
             world_size=args.world_size,
             device=device,
             save=args.save,
             split=args.split_valid,
             shifts=args.shifts,
             workers=args.eval_workers)
    model.to("cpu")
    save_model(model, args.models / f"{name}.th")
    if args.rank == 0:
        print("done")
        done.write_text("done")
            print 'Creating data loaders...'
            if 'reg' in options['type']:
                train_loader, val_loader = dataLoader.regularized_weighted_loaders(options)
            else:
                train_loader, val_loader = dataLoader.weighted_loaders(options)
            print 'Created data loaders'

            optimizer = torch.optim.SGD(model.parameters(), options['learning_rate'], nesterov=True,
                    momentum=0.9, dampening=0, weight_decay=0.0001)

            print 'Start training for run: ', run
            for epoch in range(options['start_epoch'], options['epochs']):
                # Validate
                if options['val_on']:
                    avg_prec, avg_err = validate_model(val_loader, model, criterion_cls, options)
                    is_best = avg_prec > best_avg_prec
                    if is_best:
                        print 'Best model till now: ', epoch
                        best_avg_prec = max(avg_prec, best_avg_prec)
                        best_avg_err = min(avg_err, best_avg_err)
                        print 'Saving checkpoint after ', epoch, ' epochs...'
                        save_dir = os.path.join(options['save_dir'], 'run_{}'.format(run))
                        if not os.path.exists(save_dir):
                            os.makedirs(save_dir)
                        save_checkpoint({'epoch': epoch+1,
                                         'base_arch': options['base_arch'],
                                         'state_dict': model.state_dict(),
                                         'best_avg_prec': best_avg_prec},
                                        filename = os.path.join(save_dir,
                                            'checkpoint_{}_epoch_{}.pth.tar'.format(options['type'],epoch)),
Beispiel #4
0
if use_gpu:
    print("CUDA is available, hooray!")

if args.train:
    train_model(train_iter,
                val_iter,
                val_iter_bs1,
                encoder,
                attn_decoder,
                optimizer,
                criterion,
                DE,
                EN,
                max_norm=1.0,
                num_epochs=args.num_epochs,
                logger=logger,
                beam_width=args.beam_width)
elif args.val:
    validate_model(val_iter,
                   val_iter_bs1,
                   encoder,
                   attn_decoder,
                   criterion,
                   DE,
                   EN,
                   logger=None,
                   beam_width=args.beam_width,
                   compute_bleu=True)
else:
    predict("source_test.txt", "predictions.txt", encoder, attn_decoder, DE,
            EN)
Beispiel #5
0
from train import train
from train import validate_model
from nltk.tokenize import word_tokenize
import re
import string
import io
import csv

with open('dataset.csv') as f:
    reader = csv.reader(f, delimiter=',')
    #Skip first row
    next(reader)
    with open('processed.csv', 'w', encoding='UTF-8') as new_file:
        for row in reader:
            for messages in row[1:-3]:
                # Remove all non alphabetic letters
                encoded_string = messages.encode("ascii", "ignore")
                messages = encoded_string.decode()
                # Call word tokenize for each row of messages
                text = preprocess(word_tokenize(messages))
            for classification in row[:1]:
                classify = classification
            # Write each classification to new proceesed file
            new_file.write(classify + "," + text + "\n")
    new_file.close()
f.close()

X_train, X_test, y_train, y_test = get_data()
model = train(X_train, y_train)
validate_model(model, X_test, y_test)