Exemplo n.º 1
0
    val_length = len(dset) - train_length
    train_set, val_set = random_split(dset, [train_length, val_length])

    # Prepare dataloaders
    train_loader = DataLoader(train_set,
                              batch_size=5000,
                              shuffle=True,
                              num_workers=4)
    val_loader = DataLoader(val_set,
                            batch_size=10000,
                            shuffle=False,
                            num_workers=4)

    # Prepare trainer
    #trainer = Trainer(cpic(), CrossEntropyLoss(), lr=0.1)
    trainer = Trainer(polarity(), CrossEntropyLoss(), lr=0.01)

    # Train model over training dataset
    trainer.train(train_loader, val_loader, epochs=50, print_freq=100)
    #resume='checkpoint_best.pth.tar')

    # Save training results to disk
    trainer.results(path='scsn_polarity_results.pth.tar')

    # Validate saved model
    results = torch.load('scsn_polarity_results.pth.tar')
    #model = cpic()
    model = polarity()
    model.load_state_dict(results['model'])
    trainer = Trainer(model, CrossEntropyLoss(), lr=0.1)
    trainer.validate(val_loader, print_freq=100)
    train_set, val_set = random_split(dset, [train_length, val_length])

    # Prepare dataloaders
    train_loader = DataLoader(train_set,
                              batch_size=20,
                              shuffle=True,
                              num_workers=4)
    val_loader = DataLoader(val_set,
                            batch_size=20,
                            shuffle=False,
                            num_workers=4)

    # Prepare trainer
    #trainer = Trainer(cpic(), CrossEntropyLoss(), lr=0.1)
    #trainer = Trainer(polarity(), CrossEntropyLoss(), lr=0.1)
    trainer = Trainer(focal_mechanism(), CrossEntropyLoss(), lr=0.0001)

    # Train model over training dataset
    trainer.train(train_loader, val_loader, epochs=100, print_freq=10)
    #resume='checkpoint_best.pth.tar')

    # Save training results to disk
    trainer.results(path='taiwan_focal_mechanism_results.pth.tar')

    # Validate saved model
    results = torch.load('taiwan_focal_mechanism_results.pth.tar')
    #model = cpic()
    #model = polarity()
    model = focal_mechanism()
    model.load_state_dict(results['model'])
    trainer = Trainer(model, CrossEntropyLoss(), lr=0.1)
Exemplo n.º 3
0
                   sample_transform=waveform_transform,
                   target_transform=target_transform)
    train_length = int(len(dset) * 0.8)
    val_length = len(dset) - train_length
    train_set, val_set = random_split(dset, [train_length, val_length])
    # Uncomment for chronological split
    # train_set = Subset(dset, range(train_length))
    # val_set = Subset(dset, range(train_length, len(dset)))

    # Dataloaders
    train_loader = DataLoader(train_set,
                              batch_size=512,
                              shuffle=True,
                              num_workers=4)
    val_loader = DataLoader(val_set,
                            batch_size=10000,
                            shuffle=False,
                            num_workers=8)

    # CNN model
    model = Cpic40()

    # Trainer
    trainer = Trainer(model, CrossEntropyLoss(), lr=0.1)

    # Training process
    trainer.train(train_loader, val_loader, epochs=200, print_freq=1000)

    # Save training results to disk
    trainer.results(path='ok_results.pth.tar')
Exemplo n.º 4
0
    train_length = int(len(dset) * 0.8)
    val_length = len(dset) - train_length
    train_set, val_set = random_split(dset, [train_length, val_length])

    # Prepare dataloaders
    train_loader = DataLoader(train_set,
                              batch_size=100,
                              shuffle=True,
                              num_workers=4)
    val_loader = DataLoader(val_set,
                            batch_size=1000,
                            shuffle=False,
                            num_workers=8)

    # Prepare trainer
    trainer = Trainer(cpic(), CrossEntropyLoss(), lr=0.1)

    # Train model over training dataset
    trainer.train(train_loader, val_loader, epochs=100, print_freq=100)
    #resume='checkpoint_best.pth.tar')

    # Save training results to disk
    trainer.results(path='wenchuan_results.pth.tar')

    # Validate saved model
    results = torch.load('wenchuan_results.pth.tar')
    model = cpic()
    model.load_state_dict(results['model'])
    trainer = Trainer(model, CrossEntropyLoss(), lr=0.1)
    trainer.validate(val_loader, print_freq=100)