Пример #1
0
def test(model_path, submit_csv=hparams.submit_file, submit_file=hparams.submit_file, best_thresh=None):

    test_dataset = AudioData(data_csv=submit_csv, data_file=submit_file, ds_type='submit',
                        transform=transforms.Compose([
                            transforms.ToTensor(),
                        ]))

    test_loader = DataLoader(test_dataset, batch_size=hparams.batch_size,
                            shuffle=False, num_workers=2)


    discriminator = Discriminator().to(hparams.gpu_device)
    if hparams.cuda:
        discriminator = nn.DataParallel(discriminator, device_ids=hparams.device_ids)
    checkpoint = torch.load(model_path, map_location=hparams.gpu_device)
    discriminator.load_state_dict(checkpoint['discriminator_state_dict'])

    discriminator = discriminator.eval()
    # print('Model loaded')

    Tensor = torch.cuda.FloatTensor if hparams.cuda else torch.FloatTensor

    print('Testing model on {0} examples. '.format(len(test_dataset)))

    with torch.no_grad():
        pred_logits_list = []
        labels_list = []
        img_names_list = []
        # for _ in range(hparams.repeat_infer):
        for (inp, labels, img_names) in tqdm(test_loader):
            inp = Variable(inp.float(), requires_grad=False)
            labels = Variable(labels.long(), requires_grad=False)

            inp = inp.to(hparams.gpu_device)
            labels = labels.to(hparams.gpu_device)

            inp = inp.view(-1, 1, 640, 64)
            inp = torch.cat([inp]*3, dim=1)

            pred_logits = discriminator(inp)

            pred_logits_list.append(pred_logits)
            labels_list.append(labels)
            img_names_list.append(img_names)

        pred_logits = torch.cat(pred_logits_list, dim=0)
        labels = torch.cat(labels_list, dim=0)

        pred_labels = pred_logits.max(1)[1]

        with open 
Пример #2
0
"""
This script should be used to coordinate the training of a PyTorch model. 
"""

# This import must be maintained in order for script to work on paperspace
from os import path

# Any parameters that may change from run-to-run
RUN_CONFIG_FILE = "config_1.json"

# Run Configs
model_configs, _ = get_config_from_json(path.join('./configs',
                                                  RUN_CONFIG_FILE))

# Training Data
train_data = AudioData(configs=model_configs)
train_loader = DataLoader(dataset=train_data,
                          batch_size=model_configs.batch_size,
                          shuffle=True,
                          num_workers=4)

# Test Data
test_data = AudioData(configs=model_configs, training_data=False)
test_loader = DataLoader(dataset=train_data,
                         batch_size=model_configs.batch_size,
                         shuffle=True,
                         num_workers=4)

# Model
audio_model = BidirectionalLSTM(model_configs=model_configs)
audio_model.cuda()
Пример #3
0
if args.cuda:
    model = DataParallel(model)
    print("move model to gpu")
    model.cuda()

print('model: ', model)
print('input size: ', model.input_size)
print('output size: ', model.output_size)
print('parameter count: ', str(sum(p.numel() for p in model.parameters())))

writer = SummaryWriter(args.log_dir)

### DATASET
train_audio_data = AudioData(os.path.join(args.preprocessed_dataset_dir,
                                          "audio_train.hdf5"),
                             sr=args.sr,
                             channels=1)
if train_audio_data.is_empty():
    train_audio_data.add(
        glob.glob(os.path.join(args.dataset_dir, "train", "*.*")))
test_audio_data = AudioData(os.path.join(args.preprocessed_dataset_dir,
                                         "audio_test.hdf5"),
                            sr=args.sr,
                            channels=1)
if test_audio_data.is_empty():
    test_audio_data.add(
        glob.glob(os.path.join(args.dataset_dir, "test", "*.*")))

transform = lambda x: audio_to_onehot(x, model.output_size, NUM_CLASSES)
train_data = AudioDataset(train_audio_data,
                          input_size=model.output_size,
Пример #4
0
def test(model_path,
         data=(hparams.valid_csv, hparams.dev_file),
         plot_auc='valid',
         plot_path=hparams.result_dir + 'valid',
         best_thresh=None):

    test_dataset = AudioData(data_csv=data[0],
                             data_file=data[1],
                             ds_type='valid',
                             augment=True,
                             transform=transforms.Compose([
                                 transforms.ToTensor(),
                             ]))

    test_loader = DataLoader(test_dataset,
                             batch_size=hparams.batch_size,
                             shuffle=True,
                             num_workers=2)

    discriminator = Discriminator().to(hparams.gpu_device)
    if hparams.cuda:
        discriminator = nn.DataParallel(discriminator,
                                        device_ids=hparams.device_ids)
    checkpoint = torch.load(model_path, map_location=hparams.gpu_device)
    discriminator.load_state_dict(checkpoint['discriminator_state_dict'])

    discriminator = discriminator.eval()
    # print('Model loaded')

    Tensor = torch.cuda.FloatTensor if hparams.cuda else torch.FloatTensor

    print('Testing model on {0} examples. '.format(len(test_dataset)))

    with torch.no_grad():
        pred_logits_list = []
        labels_list = []
        img_names_list = []
        # for _ in range(hparams.repeat_infer):
        for (inp, labels, img_names) in tqdm(test_loader):
            inp = Variable(inp.float(), requires_grad=False)
            labels = Variable(labels.long(), requires_grad=False)

            inp = inp.to(hparams.gpu_device)
            labels = labels.to(hparams.gpu_device)

            if hparams.dim3:
                inp = inp.view(-1, 1, 640, 64)
                inp = torch.cat([inp] * 3, dim=1)

            pred_logits = discriminator(inp)

            pred_logits_list.append(pred_logits)
            labels_list.append(labels)
            img_names_list.append(img_names)

        pred_logits = torch.cat(pred_logits_list, dim=0)
        labels = torch.cat(labels_list, dim=0)

        auc, f1, acc, conf_mat = accuracy_metrics(labels,
                                                  pred_logits,
                                                  plot_auc=plot_auc,
                                                  plot_path=plot_path,
                                                  best_thresh=best_thresh)

        fig = plot_cf(conf_mat)
        plt.savefig(hparams.result_dir + 'test_conf_mat.png')
        res = ' -- avg_acc - {0:.4f}'.format(acc['avg'])
        for it in range(10):
            res += ', acc_{}'.format(
                hparams.id_to_class[it]) + ' - {0:.4f}'.format(acc[it])
        print('== Test on -- ' + model_path + res)
        # print('== Test on -- '+model_path+' == \n\
        #     auc_{0} - {10:.4f}, auc_{1} - {11:.4f}, auc_{2} - {12:.4f}, auc_{3} - {13:.4f}, auc_{4} - {14:.4f}, auc_{5} - {15:.4f}, auc_{6} - {16:.4f}, auc_{7} - {17:.4f}, auc_{8} - {18:.4f}, auc_{9} - {19:.4f}, auc_micro - {20:.4f}, auc_macro - {21:.4f},\n\
        #     acc_{0} - {22:.4f}, acc_{1} - {23:.4f}, acc_{2} - {24:.4f}, acc_{3} - {25:.4f}, acc_{4} - {26:.4f}, acc_{5} - {27:.4f}, acc_{6} - {28:.4f}, acc_{7} - {29:.4f}, acc_{8} - {30:.4f}, acc_{9} - {31:.4f}, acc_avg - {32:.4f},\n\
        #     f1_{0} - {33:.4f}, f1_{1} - {34:.4f}, f1_{2} - {35:.4f}, f1_{3} - {36:.4f}, f1_{4} - {37:.4f}, f1_{5} - {38:.4f}, f1_{6} - {39:.4f}, f1_{7} - {40:.4f}, f1_{8} - {41:.4f}, f1_{9} - {42:.4f}, f1_micro - {42:.4f}, f1_macro - {43:.4f}, =='.\
        #     format([hparams.id_to_class[it] for it in range(10)]+[auc[it] for it in range(10)]+[auc['micro'], auc['macro']]+[acc[it] for it in range(10)]+[acc['avg']]+[f1[it] for it in range(10)]+[f1['micro'], f1['macro']]))
    return acc['avg']
Пример #5
0
def train(resume=False):

    writer = SummaryWriter('../runs/' + hparams.exp_name)

    for k in hparams.__dict__.keys():
        writer.add_text(str(k), str(hparams.__dict__[k]))

    train_dataset = AudioData(
        data_csv=hparams.train_csv,
        data_file=hparams.dev_file,
        ds_type='train',  # augment=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
        ]))

    validation_dataset = AudioData(data_csv=hparams.valid_csv,
                                   data_file=hparams.dev_file,
                                   ds_type='valid',
                                   augment=False,
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                   ]))

    # train_sampler = WeightedRandomSampler()

    train_loader = DataLoader(train_dataset,
                              batch_size=hparams.batch_size,
                              shuffle=True,
                              num_workers=2)

    validation_loader = DataLoader(validation_dataset,
                                   batch_size=hparams.batch_size,
                                   shuffle=True,
                                   num_workers=2)

    print('loaded train data of length : {}'.format(len(train_dataset)))

    adversarial_loss = torch.nn.CrossEntropyLoss().to(hparams.gpu_device)
    discriminator = Discriminator().to(hparams.gpu_device)

    if hparams.cuda:
        discriminator = nn.DataParallel(discriminator,
                                        device_ids=hparams.device_ids)

    params_count = 0
    for param in discriminator.parameters():
        params_count += np.prod(param.size())
    print('Model has {0} trainable parameters'.format(params_count))

    if not hparams.pretrained:
        discriminator.apply(weights_init_normal)

    optimizer_D = torch.optim.Adam(discriminator.parameters(),
                                   lr=hparams.learning_rate)

    scheduler_D = ReduceLROnPlateau(optimizer_D,
                                    mode='min',
                                    factor=0.3,
                                    patience=4,
                                    verbose=True,
                                    cooldown=0)

    Tensor = torch.cuda.FloatTensor if hparams.cuda else torch.FloatTensor

    def validation(discriminator, send_stats=False, epoch=0):
        print('Validating model on {0} examples. '.format(
            len(validation_dataset)))
        discriminator_ = discriminator.eval()

        with torch.no_grad():
            pred_logits_list = []
            labels_list = []

            for (inp, labels, imgs_names) in tqdm(validation_loader):
                inp = Variable(inp.float(), requires_grad=False)
                labels = Variable(labels.long(), requires_grad=False)

                if hparams.dim3:
                    inp = inp.view(-1, 1, 640, 64)
                    inp = torch.cat([inp] * 3, dim=1)

                inp = inp.to(hparams.gpu_device)
                labels = labels.to(hparams.gpu_device)

                pred_logits = discriminator_(inp)

                pred_logits_list.append(pred_logits)
                labels_list.append(labels)

            pred_logits = torch.cat(pred_logits_list, dim=0)
            labels = torch.cat(labels_list, dim=0)

            val_loss = adversarial_loss(pred_logits, labels)

        return accuracy_metrics(
            labels.long(), pred_logits
        ), val_loss  #, plot_auc='train_val_'+str(epoch+1), plot_path=hparams.result_dir+'train_val_{}_'.format(epoch)), val_loss

    print('Starting training.. (log saved in:{})'.format(hparams.exp_name))
    start_time = time.time()
    best_valid_acc = 0

    # print(model)
    for epoch in range(hparams.num_epochs):
        train_logits = []
        train_labels = []
        for batch, (inp, labels, imgs_name) in enumerate(tqdm(train_loader)):

            inp = Variable(inp.float(), requires_grad=False)
            labels = Variable(labels.long(), requires_grad=False)

            inp = inp.to(hparams.gpu_device)
            labels = labels.to(hparams.gpu_device)

            if hparams.dim3:
                inp = inp.view(-1, 1, 640, 64)
                inp = torch.cat([inp] * 3, dim=1)

            # ---------------------
            #  Train Discriminator
            # ---------------------
            optimizer_D.zero_grad()

            pred_logits = discriminator(inp)
            train_logits.append(pred_logits)
            train_labels.append(labels)

            d_loss = adversarial_loss(pred_logits, labels)

            d_loss.backward()
            optimizer_D.step()

            writer.add_scalar('d_loss',
                              d_loss.item(),
                              global_step=batch + epoch * len(train_loader))

            # if batch % hparams.print_interval == 0:
            #     pred_labels = (pred_logits >= hparams.thresh)
            #     pred_labels = pred_labels.float()
            #     auc, f1, acc, _, _ = accuracy_metrics(pred_labels, labels.long(), pred_logits)
            #     print('[Epoch - {0:.1f}, batch - {1:.3f}, d_loss - {2:.6f}, acc - {3:.4f}, f1 - {4:.5f}, auc - {5:.4f}]'.\
            #     format(1.0*epoch, 100.0*batch/len(train_loader), d_loss.item(), acc['avg'], f1[hparams.avg_mode], auc[hparams.avg_mode]))

        (val_auc, val_f1, val_acc,
         val_conf_mat), val_loss = validation(discriminator, epoch=epoch)

        train_logits = torch.cat(train_logits, dim=0)
        train_labels = torch.cat(train_labels, dim=0)

        train_auc, train_f1, train_acc, train_conf_mat = accuracy_metrics(
            train_labels.long(), train_logits)

        fig = plot_cf(val_conf_mat)
        writer.add_figure('val_conf', fig, global_step=epoch)
        plt.close(fig)
        for lbl in range(hparams.num_classes):
            writer.add_scalar('val_f1_{}'.format(hparams.id_to_class[lbl]),
                              val_f1[lbl],
                              global_step=epoch)
            writer.add_scalar('val_auc_{}'.format(hparams.id_to_class[lbl]),
                              val_auc[lbl],
                              global_step=epoch)
            writer.add_scalar('val_acc_{}'.format(hparams.id_to_class[lbl]),
                              val_acc[lbl],
                              global_step=epoch)
        writer.add_scalar('val_f1_{}'.format('micro'),
                          val_f1['micro'],
                          global_step=epoch)
        writer.add_scalar('val_auc_{}'.format('micro'),
                          val_auc['micro'],
                          global_step=epoch)
        writer.add_scalar('val_f1_{}'.format('macro'),
                          val_f1['macro'],
                          global_step=epoch)
        writer.add_scalar('val_auc_{}'.format('macro'),
                          val_auc['macro'],
                          global_step=epoch)
        writer.add_scalar('val_loss', val_loss, global_step=epoch)
        writer.add_scalar('val_f1',
                          val_f1[hparams.avg_mode],
                          global_step=epoch)
        writer.add_scalar('val_auc',
                          val_auc[hparams.avg_mode],
                          global_step=epoch)
        writer.add_scalar('val_acc', val_acc['avg'], global_step=epoch)
        scheduler_D.step(val_loss)
        writer.add_scalar('learning_rate',
                          optimizer_D.param_groups[0]['lr'],
                          global_step=epoch)

        # torch.save({
        #     'epoch': epoch,
        #     'discriminator_state_dict': discriminator.state_dict(),
        #     'optimizer_D_state_dict': optimizer_D.state_dict(),
        #     }, hparams.model+'.'+str(epoch))
        if best_valid_acc <= val_acc['avg']:
            best_valid_acc = val_acc['avg']
            fig = plot_cf(val_conf_mat)
            writer.add_figure('best_val_conf', fig, global_step=epoch)
            plt.close(fig)
            torch.save(
                {
                    'epoch': epoch,
                    'discriminator_state_dict': discriminator.state_dict(),
                    'optimizer_D_state_dict': optimizer_D.state_dict(),
                }, hparams.model + '.best')
            print('best model on validation set saved.')

        print('[Epoch - {0:.1f} ---> train_acc - {1:.4f}, current_lr - {2:.6f}, val_loss - {3:.4f}, best_val_acc - {4:.4f}, val_acc - {5:.4f}, val_f1 - {6:.4f}] - time - {7:.1f}'\
            .format(1.0*epoch, train_acc['avg'], optimizer_D.param_groups[0]['lr'], val_loss, best_valid_acc, val_acc['avg'], val_f1[hparams.avg_mode], time.time()-start_time))
        start_time = time.time()
Пример #6
0
	parser.add_argument('--train-teacher', action='store_true', help='Train teacher')
	parser.add_argument('--train-student', action='store_true', help='Train student')

	parser.add_argument('--test-teacher-fast', action='store_true', help='Test teacher (fast generation)')
	parser.add_argument('--test-teacher-slow', action='store_true', help='Test teacher (slow generation)')
	parser.add_argument('--test-student', action='store_true', help='Test student')

	args = parser.parse_args()

	batch_size = 1
	num_steps = 200000
	print_steps = 100

	last_checkpoint_time = time.time()

	audio_data = AudioData()
	num_samples = audio_data.num_samples
	num_classes = audio_data.classes

	quantization_channels = 256

	num_samples = 5120
	num_classes = 10
	quantization_channels = 256


	dilations = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512,
              1, 2, 4, 8, 16, 32, 64, 128, 256, 512,
              1, 2, 4, 8, 16, 32, 64, 128, 256, 512]

    # input_size, condition_size, output_size, dilations, filter_width=2, encoder_channels=128, dilation_channels=32, skip_channels=256, 
Пример #7
0
from torch.nn import CrossEntropyLoss
from model import BidirectionalLSTM
from trainer import AudioTrainer
from logger import AudioLogger
from torch.optim import Adam
from data import AudioData
from os.path import join

# Any parameters that may change from run-to-run
RUN_CONFIG_FILE = "config_1.json"

# Run Configs
model_configs, _ = get_config_from_json(join('./configs', RUN_CONFIG_FILE))

# Data
audio_data = AudioData(configs=model_configs)
train_loader = DataLoader(dataset=audio_data,
                          batch_size=model_configs.batch_size,
                          shuffle=True,
                          num_workers=4)

# Model
audio_model = BidirectionalLSTM(model_configs=model_configs)
audio_model.cuda()

# Training Params
loss_fn = CrossEntropyLoss()
optimizer = Adam(audio_model.parameters(), lr=model_configs.learning_rate)

# Train Model
trainer = AudioTrainer(model_configs,
Пример #8
0
else:
    raise NotImplementedError("Could not find model " + str(args.model))

if args.cuda:
    model = DataParallel(model)
    print("move model to gpu")
    model.cuda()
load_latest_model_from(model, None, args.snapshot_dir)

print('model: ', model)
print('input length: ', model.input_size)
print('output length: ', model.output_size)
print('parameter count: ', str(sum(p.numel() for p in model.parameters())))

if args.conditional:
    test_audio_data = AudioData(os.path.join(args.preprocessed_dataset_dir, "audio_test.hdf5"), sr=args.sr, channels=1)
    transform = lambda x : audio_to_onehot(x, model.output_size, NUM_CLASSES)
    data = AudioDataset(test_audio_data, input_size=1,
                        context_front=max(int(args.conditional_duration*args.sr),model.input_size),
                        hop_size=20*args.sr, random_hops=True, audio_transform=transform)

    print('the dataset has ' + str(len(data)) + ' items')

for i in range(args.num_batches):
    if args.conditional:
        idx = np.random.choice(len(data), args.batch_size)
        start_data = [torch.max(data[i][0], 0)[1] for i in idx]
        start_data = torch.stack(start_data)
    else:
        start_data = None
Пример #9
0
                        help='Train a classifier wavenet')
    parser.add_argument('--siamese',
                        action='store_true',
                        help='Train a siamese wavenet')
    parser.add_argument('--test', action='store_true', help='Test mode')
    args = parser.parse_args()

    batch_size = 1
    num_steps = 100000
    print_steps = 100

    last_checkpoint_time = time.time()

    if args.classifier:

        audio_data = AudioData()
        num_samples = audio_data.num_samples
        num_classes = audio_data.classes

        dilations = [
            1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1, 2, 4, 8, 16, 32, 64, 128,
            256, 512
        ]

        network = WaveNet(num_samples,
                          num_classes,
                          dilations,
                          dilation_channels=32,
                          skip_channels=128,
                          output_channels=num_classes,
                          learning_rate=0.001)