コード例 #1
0
def train(net, train_loader):
    ckpt = utils.Checkpoint(iters_per_epoch=40,
                            model_dir="../models/",
                            model_name="psp_full")
    optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
                          lr=1e-2,
                          momentum=0.9)
    for i, data in enumerate(train_loader, 0):
        optimizer.zero_grad()

        images, impulses, instance_masks, cat_ids = utils.cudify_data(data)

        # high resolution and low resolution mask logits
        big, small = net([images, impulses])
        big_targets = instance_masks
        small_targets = (F.interpolate(
            instance_masks, scale_factor=0.25, mode='bilinear') > 0.5).float()
        big_loss = L.soft_iou(big, big_targets)
        small_loss = L.soft_iou(small, small_targets)

        loss = (small_loss + 0.2 * big_loss).mean()

        eval_dict = M.evaluate_metrics(big, instance_masks)
        eval_dict["small_loss"] = small_loss
        eval_dict["big_loss"] = big_loss

        ckpt.update(eval_dict, net)

        loss.backward()
        optimizer.step()
コード例 #2
0
def val_stats(net, val_loader):
    running_stats = utils.RunningStats(iters_per_stats=50)
    for i, data in enumerate(val_loader, 0):
        images, impulses, instance_masks, cat_ids = utils.cudify_data(data)
        with torch.no_grad():
            mask_logits = net([images, impulses])

        eval_dict = M.evaluate_metrics(mask_logits, instance_masks)
        running_stats.update(eval_dict)
コード例 #3
0
def evaluate(config):
    device = torch.device('cuda' if config['use_cuda'] else 'cpu')

    model = architecture.Model().to(device)

    train_state = dict(model=model)

    print('Loading model checkpoint')
    workflow.ignite.handlers.ModelCheckpoint.load(
        train_state, 'model/checkpoints', device
    )


    @workflow.ignite.decorators.evaluate(model)
    def evaluate_batch(engine, examples):
        predictions = model.predictions(
            architecture.FeatureBatch.from_examples(examples)
        )
        loss = predictions.loss(examples)
        return dict(
            examples=examples,
            predictions=predictions.cpu().detach(),
            loss=loss,
        )

    evaluate_data_loaders = {
        f'evaluate_{name}': datastream.data_loader(
            batch_size=config['eval_batch_size'],
            num_workers=config['n_workers'],
            collate_fn=tuple,
        )
        for name, datastream in datastream.evaluate_datastreams().items()
    }

    tensorboard_logger = TensorboardLogger(log_dir='tb')

    for desciption, data_loader in evaluate_data_loaders.items():
        engine = evaluator(
            evaluate_batch, desciption,
            metrics.evaluate_metrics(),
            tensorboard_logger,
        )
        engine.run(data=data_loader)
コード例 #4
0
def evaluate(config):
    torch.set_grad_enabled(False)
    device = torch.device("cuda" if config["use_cuda"] else "cpu")

    model = architecture.Model().to(device)

    if Path("model").exists():
        print("Loading model checkpoint")
        model.load_state_dict(torch.load("model/model.pt"))

    evaluate_data_loaders = {
        f"evaluate_{name}": (
            datastream.map(architecture.StandardizedImage.from_example).data_loader(
                batch_size=config["eval_batch_size"],
                collate_fn=tools.unzip,
                num_workers=config["n_workers"],
            )
        )
        for name, datastream in datastream.evaluate_datastreams().items()
        if "mini" not in name
    }

    tensorboard_logger = torch.utils.tensorboard.SummaryWriter()
    evaluate_metrics = {
        name: metrics.evaluate_metrics() for name in evaluate_data_loaders
    }

    for name, data_loader in evaluate_data_loaders.items():
        for examples, standardized_images in tqdm(data_loader, desc=name, leave=False):
            with lantern.module_eval(model):
                predictions = model.predictions(standardized_images)
                loss = predictions.loss(examples)

            evaluate_metrics[name]["loss"].update_(loss)
            evaluate_metrics[name]["accuracy"].update_(examples, predictions)

        for metric_name, metric in evaluate_metrics[name].items():
            metric.log(tensorboard_logger, name, metric_name)

        print(lantern.MetricTable(name, evaluate_metrics[name]))

    tensorboard_logger.close()
コード例 #5
0
 def run_validation(self, epoch, val_data, neg_val_data, w2v):
     """
     runs validation every configured number of epochs and save the model, if it gives a better validation metric
     """
     if epoch % self.val_after_epoch == 0:
         start = time.time()
         print 'Starting with validation'
         res = metrics.evaluate_metrics(self,
                                        'Model2Deeper',
                                        w2v,
                                        val_data,
                                        neg_val_data,
                                        k=500,
                                        metrics=['hits@k'])
         hits_res = res['hits@k']
         if hits_res[0] > self.best_val:
             self.best_val = hits_res[0]
             file_name = constants.RUN_ID + '_model.pth'
             self.save(file_name)
         end = time.time()
         print 'validation time:', (end - start)
コード例 #6
0
def train(config):

    set_seeds(config['seed'])

    device = torch.device('cuda' if config['use_cuda'] else 'cpu')

    model = architecture.Model().to(device)
    optimizer = torch.optim.Adam(
        model.parameters(), lr=config['learning_rate']
    )

    train_state = dict(model=model, optimizer=optimizer)

    if Path('model').exists():
        print('Loading model checkpoint')
        workflow.ignite.handlers.ModelCheckpoint.load(
            train_state, 'model/checkpoints', device
        )

        workflow.torch.set_learning_rate(optimizer, config['learning_rate'])

    n_parameters = sum([
        p.shape.numel() for p in model.parameters() if p.requires_grad
    ])
    print(f'n_parameters: {n_parameters:,}')

    def process_batch(examples):
        predictions = model.predictions(
            architecture.FeatureBatch.from_examples(examples)
        )
        loss = predictions.loss(examples)
        return predictions, loss

    @workflow.ignite.decorators.train(model, optimizer)
    def train_batch(engine, examples):
        predictions, loss = process_batch(examples)
        loss.backward()
        return dict(
            examples=examples,
            predictions=predictions.cpu().detach(),
            loss=loss,
        )

    @workflow.ignite.decorators.evaluate(model)
    def evaluate_batch(engine, examples):
        predictions, loss = process_batch(examples)
        return dict(
            examples=examples,
            predictions=predictions.cpu().detach(),
            loss=loss,
        )

    evaluate_data_loaders = {
        f'evaluate_{name}': datastream.data_loader(
            batch_size=config['eval_batch_size'],
            num_workers=config['n_workers'],
            collate_fn=tuple,
        )
        for name, datastream in datastream.evaluate_datastreams().items()
    }

    trainer, evaluators, tensorboard_logger = workflow.ignite.trainer(
        train_batch,
        evaluate_batch,
        evaluate_data_loaders,
        metrics=dict(
            progress=metrics.progress_metrics(),
            train=metrics.train_metrics(),
            **{
                name: metrics.evaluate_metrics()
                for name in evaluate_data_loaders.keys()
            }
        ),
        optimizers=optimizer,
    )

    workflow.ignite.handlers.ModelScore(
        lambda: -evaluators['evaluate_early_stopping'].state.metrics['loss'],
        train_state,
        {
            name: metrics.evaluate_metrics()
            for name in evaluate_data_loaders.keys()
        },
        tensorboard_logger,
        config,
    ).attach(trainer, evaluators)

    tensorboard_logger.attach(
        trainer,
        log_examples('train', trainer),
        ignite.engine.Events.EPOCH_COMPLETED,
    )
    tensorboard_logger.attach(
        evaluators['evaluate_compare'],
        log_examples('evaluate_compare', trainer),
        ignite.engine.Events.EPOCH_COMPLETED,
    )

    if config.get('search_learning_rate', False):

        def search(config):
            def search_(step, multiplier):
                return (
                    step,
                    (1 / config['minimum_learning_rate'])
                    ** (step / config['n_batches'])
                )
            return search_

        LearningRateScheduler(
            optimizer,
            search(config),
        ).attach(trainer)

    else:
        LearningRateScheduler(
            optimizer,
            starcompose(
                warmup(150),
                cyclical(length=500),
            ),
        ).attach(trainer)

    trainer.run(
        data=(
            datastream.GradientDatastream()
            .data_loader(
                batch_size=config['batch_size'],
                num_workers=config['n_workers'],
                n_batches_per_epoch=config['n_batches_per_epoch'],
                worker_init_fn=partial(worker_init, config['seed'], trainer),
                collate_fn=tuple,
            )
        ),
        max_epochs=config['max_epochs'],
    )
コード例 #7
0
def evaluate(args,
             data_list_info,
             save_individual_results=True,
             save_stat=True):
    print('========== Evaluating ==========')
    data_list = data_list_info[0]
    data_info = data_list_info[1]
    data_info['snr'] = args.snr
    # get config
    config = get_config(args)

    # get network
    net = get_network(config)
    name = args.ckpt if args.ckpt == 'latest' or args.ckpt == 'best_acc' else "ckpt_epoch{}".format(
        args.ckpt)
    load_path = os.path.join(config.model_dir, "{}.pth".format(name))
    print('Load saved model: {}'.format(load_path))
    net.load_state_dict(torch.load(load_path)['model_state_dict'])

    if torch.cuda.device_count() > 1:
        print('Multi-GPUs available')
        net = nn.DataParallel(net.cuda())  # For multi-GPU
    else:
        print('Single-GPU available')
        net = net.cuda()  # For single-GPU

    # evaluate
    net.eval()
    stat = []
    print('Save individual results:', save_individual_results)
    print('Save overall results:', save_stat)

    for i, data in enumerate(tqdm(data_list)):
        mixed_stft = data['mixed'].cuda()
        noise_stft = data['noise'].cuda()
        if not args.unknown_clean_signal:
            clean_stft = data['clean'].cuda()  # (B, 2, 257, L)
            full_noise_stft = data['full_noise'].numpy()[0].transpose(
                (1, 2, 0))
        with torch.no_grad():
            pred_noise_stft, output_mask = net(mixed_stft, noise_stft)
            # output_mask = F.interpolate(output_mask, size=(257, output_mask.size(3)))

        mixed_stft = mixed_stft.detach().cpu().numpy()[0].transpose((1, 2, 0))
        noise_stft = noise_stft.detach().cpu().numpy()[0].transpose((1, 2, 0))
        if not args.unknown_clean_signal:
            clean_stft = clean_stft.detach().cpu().numpy()[0].transpose(
                (1, 2, 0))
        pred_noise_stft = pred_noise_stft.detach().cpu().numpy()[0].transpose(
            (1, 2, 0))
        output_mask = output_mask.detach().cpu().numpy()[0].transpose(
            (1, 2, 0))

        output_stft = fast_icRM_sigmoid(mixed_stft, output_mask)

        mixed_sig = fast_istft(mixed_stft)
        noise_sig = fast_istft(noise_stft)
        output_sig = fast_istft(output_stft)

        # check if mask has sporatic 0/1's
        # groups = []
        # for k, g in groupby(data['mask']):
        #     groups.append((str(int(k)), len(list(g))))      # Store group iterator as a list
        # print(groups)

        # TEST: suppress silent intervals to be truly silent
        # output_sig = interpolate_waveform(output_sig, len(data['mask']))  # make output_sig same length as mask
        # output_sig = np.multiply(output_sig, 1 - data['mask'])  # make all silent intervals silent
        # TEST done

        ##### TEST: for non-silent intervals, get their low and high frequency bands #####
        # output_sig = butter_bandpass_filter(output_sig, 300, 3400, data['sr'])
        ##### TEST done #####

        pred_noise_sig = fast_istft(pred_noise_stft)
        if not args.unknown_clean_signal:
            clean_sig = fast_istft(clean_stft)
            # output_sig = interpolate_waveform(output_sig, len(clean_sig))  # from TEST: make output_sig same length as clean_sig
            full_noise_sig = fast_istft(full_noise_stft)[:len(mixed_sig)]

        if not args.unknown_clean_signal:
            info = OrderedDict([
                ('id', str(data['id'])),
                ('path', str(data['path'])),
                ('clean_audio_path', data['clean_audio_path']),
                ('mixed_audio_path', data['mixed_audio_path']),
                ('full_noise_path', data['full_noise_path']),
                ('bitstream', data['bitstream']),
                ('sr', data['sr']),
                ('snr', data['snr']),
            ])

            # resample audios to 16000 for metrics calculations
            output_sig_16k = librosa.resample(output_sig, data['sr'], 16000)
            clean_sig_16k = librosa.resample(clean_sig, data['sr'], 16000)

            # calculate metrics
            info.update(
                evaluate_metrics(output_sig_16k, clean_sig_16k, sr=16000))
        else:
            info = OrderedDict([
                ('id', str(data['id'])),
                ('path', str(data['path'])),
                ('mixed_audio_path', data['mixed_audio_path']),
                ('bitstream', data['bitstream']),
                ('sr', data['sr']),
                ('snr', data['snr']),
            ])

        # save results
        if save_individual_results:
            save_dir = os.path.join(os.path.abspath(args.outputs),
                                    convert_snr_to_suffix2(args.snr)[1:],
                                    str(data['id']))
            ensure_dir(save_dir)

            if not args.unknown_clean_signal:
                waveform = draw_waveform([mixed_sig, noise_sig, full_noise_sig, pred_noise_sig, clean_sig, output_sig], sr=data['sr'],\
                    titles=['Noisy Input', 'Noise Intervals', 'Ground Truth Full Noise', 'Predicted Full Noise', 'Ground Truth Clean Input', 'Denoised Output'])
                spectrum = draw_spectrum([mixed_sig, noise_sig, full_noise_sig, pred_noise_sig, clean_sig, output_sig], sr=data['sr'],\
                    titles=['Noisy Input', 'Noise Intervals', 'Ground Truth Full Noise', 'Predicted Full Noise', 'Ground Truth Clean Input', 'Denoised Output'])
                waveform_path = os.path.join(save_dir, 'waveform.png')
                cv2.imwrite(waveform_path, waveform)
                spectrum_path = os.path.join(save_dir, 'spectrum.png')
                cv2.imwrite(spectrum_path, spectrum)
                info['waveform'] = waveform_path
                info['spectrum'] = spectrum_path

                # draw_waveform_animated_faster(os.path.join(save_dir, 'waveform.mp4'),\
                #     [mixed_sig, noise_sig, full_noise_sig, pred_noise_sig, clean_sig, output_sig], sr=data['sr'],\
                #         titles=['Noisy Input', 'Noise Intervals', 'Ground Truth Full Noise', 'Predicted Full Noise', 'Ground Truth Clean Input', 'Denoised Output'],\
                #             fps=30)
            else:
                waveform = draw_waveform([mixed_sig, noise_sig, pred_noise_sig, output_sig], sr=data['sr'],\
                    titles=['Noisy Input', 'Noise Intervals', 'Predicted Full Noise', 'Denoised Output'])
                spectrum = draw_spectrum([mixed_sig, noise_sig, pred_noise_sig, output_sig], sr=data['sr'],\
                    titles=['Noisy Input', 'Noise Intervals', 'Predicted Full Noise', 'Denoised Output'])
                waveform_path = os.path.join(save_dir, 'waveform.png')
                cv2.imwrite(waveform_path, waveform)
                spectrum_path = os.path.join(save_dir, 'spectrum.png')
                cv2.imwrite(spectrum_path, spectrum)
                info['waveform'] = waveform_path
                info['spectrum'] = spectrum_path

                # draw_waveform_animated_faster(os.path.join(save_dir, 'waveform.mp4'),\
                #     [mixed_sig, noise_sig, pred_noise_sig, output_sig], sr=data['sr'],\
                #         titles=['Noisy Input', 'Noise Intervals', 'Predicted Full Noise', 'Denoised Output'],\
                #             fps=30)

            noisy_input_path = os.path.join(save_dir, 'noisy_input.wav')
            librosa.output.write_wav(noisy_input_path, mixed_sig, data['sr'])
            noise_intervals_path = os.path.join(save_dir,
                                                'noise_intervals.wav')
            librosa.output.write_wav(noise_intervals_path, noise_sig,
                                     data['sr'])
            predicted_full_noise_path = os.path.join(
                save_dir, 'predicted_full_noise.wav')
            librosa.output.write_wav(predicted_full_noise_path, pred_noise_sig,
                                     data['sr'])
            denoised_output_path = os.path.join(save_dir,
                                                'denoised_output.wav')
            librosa.output.write_wav(denoised_output_path, output_sig,
                                     data['sr'])
            info['noisy_input'] = noisy_input_path
            info['noise_intervals'] = noise_intervals_path
            info['predicted_full_noise'] = predicted_full_noise_path
            info['denoised_output'] = denoised_output_path

            if not args.unknown_clean_signal:
                gt_full_noise_path = os.path.join(
                    save_dir, 'ground_truth_full_noise.wav')
                librosa.output.write_wav(gt_full_noise_path, full_noise_sig,
                                         data['sr'])
                gt_clean_input_path = os.path.join(
                    save_dir, 'ground_truth_clean_input.wav')
                librosa.output.write_wav(gt_clean_input_path, clean_sig,
                                         data['sr'])
                info['ground_truth_full_noise'] = gt_full_noise_path
                info['ground_truth_clean_input'] = gt_clean_input_path

            with open(os.path.join(save_dir, 'stat.json'), 'w') as fp:
                json.dump(info, fp, **JSON_DUMP_PARAMS)

        stat.append(info)

    if save_stat:
        if not args.unknown_clean_signal:
            avg_cost_l1 = sum([item['l1'] for item in stat]) / len(stat)
            avg_cost_stoi = sum([item['stoi'] for item in stat]) / len(stat)
            avg_cost_csig = sum([item['csig'] for item in stat]) / len(stat)
            avg_cost_cbak = sum([item['cbak'] for item in stat]) / len(stat)
            avg_cost_covl = sum([item['covl'] for item in stat]) / len(stat)
            avg_cost_pesq = sum([item['pesq'] for item in stat]) / len(stat)
            avg_cost_ssnr_regular = sum(
                [item['ssnr_regular'] for item in stat]) / len(stat)
            avg_cost_ssnr_shift = sum([item['ssnr_shift']
                                       for item in stat]) / len(stat)
            avg_cost_ssnr_clip = sum([item['ssnr_clip']
                                      for item in stat]) / len(stat)
            avg_cost_ssnr_exsi = sum([item['ssnr_exsi']
                                      for item in stat]) / len(stat)
            avg_cost_overall_snr = sum([item['overall_snr']
                                        for item in stat]) / len(stat)

            data_info['denoise_statistics'] = OrderedDict([
                ('avg_l1', avg_cost_l1), ('avg_stoi', avg_cost_stoi),
                ('avg_csig', avg_cost_csig), ('avg_cbak', avg_cost_cbak),
                ('avg_covl', avg_cost_covl), ('avg_pesq', avg_cost_pesq),
                ('avg_ssnr_regular', avg_cost_ssnr_regular),
                ('avg_ssnr_shift', avg_cost_ssnr_shift),
                ('avg_ssnr_clip', avg_cost_ssnr_clip),
                ('avg_ssnr_exsi', avg_cost_ssnr_exsi),
                ('avg_overall_snr', avg_cost_overall_snr)
            ])

        data_info['files'] = stat

        suffix = convert_threshold_to_suffix(args.threshold)
        nsuffix = convert_snr_to_suffix2(args.snr)
        save_stat_path = os.path.join(
            os.path.abspath(args.outputs),
            'eval_results' + suffix + nsuffix + '.json')
        with open(save_stat_path, 'w') as fp:
            json.dump(data_info, fp, **JSON_DUMP_PARAMS)
コード例 #8
0
def train(config):
    torch.set_grad_enabled(False)
    device = torch.device("cuda" if config["use_cuda"] else "cpu")
    set_seeds(config["seed"])

    model = architecture.Model().to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=config["learning_rate"])

    if Path("model").exists():
        print("Loading model checkpoint")
        model.load_state_dict(torch.load("model/model.pt"))
        optimizer.load_state_dict(torch.load("model/optimizer.pt"))
        lantern.set_learning_rate(optimizer, config["learning_rate"])

    train_data_loader = (
        datastream.TrainDatastream()
        .map(architecture.StandardizedImage.from_example)
        .data_loader(
            batch_size=config["batch_size"],
            n_batches_per_epoch=config["n_batches_per_epoch"],
            collate_fn=tools.unzip,
            num_workers=config["n_workers"],
            worker_init_fn=worker_init_fn(config["seed"]),
            persistent_workers=(config["n_workers"] >= 1),
        )
    )

    evaluate_data_loaders = {
        f"evaluate_{name}": (
            datastream.map(architecture.StandardizedImage.from_example).data_loader(
                batch_size=config["eval_batch_size"],
                collate_fn=tools.unzip,
                num_workers=config["n_workers"],
            )
        )
        for name, datastream in datastream.evaluate_datastreams().items()
        if "mini" in name
    }

    tensorboard_logger = torch.utils.tensorboard.SummaryWriter(log_dir="tb")
    early_stopping = lantern.EarlyStopping(tensorboard_logger=tensorboard_logger)
    train_metrics = metrics.train_metrics()

    for epoch in lantern.Epochs(config["max_epochs"]):

        for examples, standardized_images in lantern.ProgressBar(
            train_data_loader, "train", train_metrics
        ):
            with lantern.module_train(model), torch.enable_grad():
                predictions = model.predictions(standardized_images)
                loss = predictions.loss(examples)
                loss.backward()
            optimizer.step()
            optimizer.zero_grad()

            train_metrics["loss"].update_(loss)
            train_metrics["accuracy"].update_(examples, predictions)

            for name, metric in train_metrics.items():
                metric.log(tensorboard_logger, "train", name, epoch)

        print(lantern.MetricTable("train", train_metrics))
        log_examples(tensorboard_logger, "train", epoch, examples, predictions)

        evaluate_metrics = {
            name: metrics.evaluate_metrics() for name in evaluate_data_loaders
        }

        for name, data_loader in evaluate_data_loaders.items():
            for examples, standardized_images in lantern.ProgressBar(data_loader, name):
                with lantern.module_eval(model):
                    predictions = model.predictions(standardized_images)
                    loss = predictions.loss(examples)

                evaluate_metrics[name]["loss"].update_(loss)
                evaluate_metrics[name]["accuracy"].update_(examples, predictions)

            for metric_name, metric in evaluate_metrics[name].items():
                metric.log(tensorboard_logger, name, metric_name, epoch)

            print(lantern.MetricTable(name, evaluate_metrics[name]))
            log_examples(tensorboard_logger, name, epoch, examples, predictions)

        early_stopping = early_stopping.score(
            evaluate_metrics["evaluate_mini_early_stopping"]["accuracy"].compute()
        )
        if early_stopping.scores_since_improvement == 0:
            torch.save(model.state_dict(), "model.pt")
            torch.save(optimizer.state_dict(), "optimizer.pt")
        elif early_stopping.scores_since_improvement > config["patience"]:
            break
        early_stopping.log(epoch).print()

        tensorboard_logger.close()