コード例 #1
0
def test_neural_net( trained_network_filename, testing_data_filename, results_filename): 
    num_input_nodes, num_hidden_nodes, num_output_nodes, weights = utils.load_neural_net(trained_network_filename)
    num_features, num_outputs, test_dataset = utils.load_data(testing_data_filename) 
    network = create_network(num_hidden_nodes, num_output_nodes, weights)
    # confusion_matrices[output_idx][ predicted ][ expected ]
    confusion_matrices = [
            [
                [0 , 0],
                [0 , 0]
            ]
            for _ in range(num_outputs)
        ]

    # num_features, num_outputs, dataset = load_data(data_filename) 
    for row in test_dataset:    
        prediction = predict(network, row)[-num_outputs:]
        expected = row[-num_outputs:]
        print("=="*10)
        print( prediction )
        print( expected )

        for i in range(num_outputs): 
            confusion_matrices[i][ int(round( prediction[i] )) ][ int(expected[i]) ] += 1.0

    outputs_metrics = utils.calc_metrics( confusion_matrices )
    utils.save_metrics( results_filename, outputs_metrics )
    return
コード例 #2
0
ファイル: predict.py プロジェクト: barwe/dental
def pred():
    batch_size = 32
    output_dir = ps.output
    output_dir = output_dir[:-1] if output_dir.endswith(os.sep) else output_dir
    if not os.path.exists(output_dir): os.mkdir(output_dir)
    rm_top = ps.rm_top

    data = load_data_from_tfr()
    num_samples = data['num_samples']
    num_batches = num_samples // batch_size
    batch_left = num_samples % batch_size
    if batch_left != 0: num_batches += 1

    models = get_model_list()
    num_models = len(models)
    res = model(is_train=False, show_layers_info=False)
    predictions = np.zeros((num_models, num_samples))  #(5, 80)
    for idx in range(num_models):
        sess = None
        with tf.Session() as sess:
            load_and_assign_npz(sess, models[idx], res['net'])
            for batchIdx in range(num_batches):
                if batchIdx == num_batches - 1:
                    batch_length = batch_left if batch_left else batch_size
                    batch_X = data['X'][batchIdx * batch_size:]
                else:
                    batch_length = batch_size
                    batch_X = data['X'][batchIdx * batch_size:(batchIdx + 1) *
                                        batch_size]
                prediction = sess.run([res['prediction']],
                                      feed_dict={res['x']: batch_X})
                predictions[idx, batchIdx * batch_size:batchIdx * batch_size +
                            batch_length] = np.squeeze(prediction)

    # predictions = np.mean(predictions, axis=0)
    predictions = cond_mean(predictions, rm_top=rm_top)
    predictions = predictions * 100

    TFRecordFileBuilder._write_dict(
        "{}{}metrics-results.csv".format(output_dir, os.sep),
        calc_metrics(data['y'], predictions))

    write2file("{}{}predictions-results.csv".format(output_dir, os.sep),
               data['y'], predictions)
コード例 #3
0
    def __metrics_init(self, fixed, moving):
        step = 0
        self.writer.set_step(step)

        residuals = self.losses['data']['loss'].map(fixed['im'], moving['im'])
        residuals_masked = residuals[fixed['mask']]

        log_hist_res(self.writer,
                     residuals_masked,
                     self.losses['data']['loss'],
                     model='VI')
        log_images(self.writer, fixed['im'], moving['im'], moving['im'])

        # metrics
        ASD, DSC = calc_metrics(fixed['seg'], moving['seg'],
                                self.structures_dict, self.im_spacing)

        for structure_idx, structure in enumerate(self.structures_dict):
            ASD_val, DSC_val = ASD[0][structure_idx], DSC[0][structure_idx]
            self.metrics.update(f'VI/train/ASD/{structure}', ASD_val)
            self.metrics.update(f'VI/train/DSC/{structure}', DSC_val)
コード例 #4
0
def test(test_loader, model, criterion, step, device):
    model.eval()
    test_len = len(test_loader)
    rand = random.randint(1, test_len)

    outputs, targets = [], []
    START_FLAG = True
    with torch.no_grad():
        for batch_idx, (data, target) in enumerate(test_loader):
            data, target = data.to(device), target.to(device)
            output, reconstructions = model(data)
            if START_FLAG:
                outputs = output
                targets = target
                START_FLAG = False
            else:
                outputs = torch.cat([outputs, output])
                targets = torch.cat([targets, target])

            if batch_idx == rand and args.add_decoder:
                log_reconstruction_sample(test_writer, data, reconstructions,
                                          step)

    act_loss, rec_loss, tot_loss = criterion(outputs, targets, None, None)
    test_loss = act_loss.item()
    acc, rec, f1, hamm, emr = calc_metrics(outputs, targets)

    # Logging
    test_writer.add_scalar('Loss/BCE', test_loss, step)
    test_writer.add_scalar('Metrics/Precision', acc, step)
    test_writer.add_scalar('Metrics/Recall', rec, step)
    test_writer.add_scalar('Metrics/F1-score', f1, step)
    test_writer.add_scalar('Metrics/HammingScore', hamm, step)
    test_writer.add_scalar('Metrics/ExactMatchRatio', emr, step)
    log_heatmap(test_writer, outputs, targets, step)

    print(
        '\nTest set: Average loss: {:.6f}, F1-Score: {:.6f}, ExactMatch: {:.6f} \n'
        .format(test_loss, f1, emr))
    return test_loss, f1
コード例 #5
0
def single_target_training(cfg, X_train, y_train, X_test, logger):
    model_name = list(cfg["model"].keys())[0]

    metric = utils.get_metric(cfg)
    metric_name = cfg["metric"]["name"]

    experiment_id = utils.get_experiment_id(cfg)
    run_name = cfg["mlflow"]["run_name"]

    logger.info(f"experiment config: {run_name}")
    logger.info(
        f"CV method: {cfg['split']['name']} {cfg['split']['params']['n_splits']}-Fold"
    )

    with mlflow.start_run(run_name=run_name, experiment_id=experiment_id):
        trainer = get_trainer(cfg, model_name, X_train, y_train, X_test)
        y_oof, models, y_pred = training_step(trainer)
        metrics = utils.calc_metrics(cfg, metric, y_train.values, y_oof)
        fig = utils.plot_feature_importance(models, X_train, model_name)

        logger.info(f"CV score : {metrics[metric_name]}")
        utils.mlflow_logger(cfg, metrics, fig, targets=None)

    return y_pred
コード例 #6
0
def multi_target_training(cfg, X_train, y_trains, X_test, logger):
    model_name = list(cfg["model"].keys())[0]

    targets = cfg["training"]["targets"]
    y_oof = np.zeros((len(X_train), len(targets)))
    y_pred = np.zeros((len(X_test), len(targets)))
    metric = utils.get_metric(cfg)
    metric_name = cfg["metric"]["name"]
    figs = []

    experiment_id = utils.get_experiment_id(cfg)
    run_name = cfg["mlflow"]["run_name"]

    logger.info(f"experiment config: {run_name}")
    logger.info(
        f"CV method: {cfg['split']['name']} {cfg['split']['params']['n_splits']}-Fold"
    )

    with mlflow.start_run(run_name=run_name, experiment_id=experiment_id):
        for i, target in enumerate(targets):
            logger.info(f"Training for {target}")
            y_train = y_trains[target]
            trainer = get_trainer(cfg, model_name, X_train, y_train, X_test)
            y_oof_, models, y_pred_ = training_step(trainer)
            y_oof[:, i] = y_oof_
            y_pred[:, i] = y_pred_

            fig = utils.plot_feature_importance(models, X_train, model_name,
                                                target)
            figs.append(fig)

        metrics = utils.calc_metrics(cfg, metric, y_trains.values, y_oof)
        logger.info(f"CV score : {metrics[metric_name]}")
        utils.mlflow_logger(cfg, metrics, figs, targets)

    return y_pred
コード例 #7
0
    def _test_VI(self, fixed, moving, var_params_q_v):
        """
        metrics
        """

        samples = torch.zeros([self.no_samples_VI_test, 3,
                               *self.dims])  # samples used in the evaluation

        for test_sample_no in range(1, self.no_samples_VI_test + 1):
            self.writer.set_step(test_sample_no)

            v_sample = sample_q_v(var_params_q_v, no_samples=1)
            v_sample_smoothed = SobolevGrad.apply(v_sample, self.S,
                                                  self.padding)
            transformation, displacement = self.transformation_module(
                v_sample_smoothed)
            samples[test_sample_no - 1] = displacement.clone().cpu()

            no_non_diffeomorphic_voxels, log_det_J = calc_no_non_diffeomorphic_voxels(
                transformation, self.diff_op)
            self.metrics.update('VI/test/no_non_diffeomorphic_voxels',
                                no_non_diffeomorphic_voxels)

            im_moving_warped = self.registration_module(
                moving['im'], transformation)
            seg_moving_warped = self.registration_module(
                moving['seg'], transformation)

            ASD, DSC = calc_metrics(fixed['seg'], seg_moving_warped,
                                    self.structures_dict, self.im_spacing)

            for structure_idx, structure in enumerate(self.structures_dict):
                ASD_val, DSC_val = ASD[0][structure_idx], DSC[0][structure_idx]
                self.metrics.update(f'VI/test/ASD/{structure}', ASD_val)
                self.metrics.update(f'VI/test/DSC/{structure}', DSC_val)

            save_sample(self.save_dirs, self.im_spacing, test_sample_no,
                        im_moving_warped, displacement, log_det_J, 'VI')

        self.logger.info(
            '\nsaving the displacement and warped moving image corresponding to the mean of the approximate variational posterior..'
        )

        mu_v = var_params_q_v['mu']
        mu_v_smoothed = SobolevGrad.apply(mu_v, self.S, self.padding)
        transformation, displacement = self.transformation_module(
            mu_v_smoothed)
        im_moving_warped = self.registration_module(moving['im'],
                                                    transformation)

        save_variational_posterior_mean(self.save_dirs, self.im_spacing,
                                        im_moving_warped, displacement)

        self.logger.info(
            '\ncalculating sample standard deviation of the displacement..')

        mean, std_dev = calc_posterior_statistics(samples)
        log_displacement_mean_and_std_dev(self.writer, mean, std_dev, 'VI')
        save_displacement_mean_and_std_dev(self.logger, self.save_dirs,
                                           self.im_spacing, mean, std_dev,
                                           moving['mask'], 'VI')
        """
        speed
        """

        no_samples_VI_speed_test = 100
        start = datetime.now()

        for VI_test_sample_no in range(1, no_samples_VI_speed_test + 1):
            v_sample = sample_q_v(var_params_q_v, no_samples=1)
            v_sample_smoothed = SobolevGrad.apply(v_sample, self.S,
                                                  self.padding)

            transformation, displacement = self.transformation_module(
                v_sample_smoothed)
            im_moving_warped = self.registration_module(
                moving['im'], transformation)
            seg_moving_warped = self.registration_module(
                moving['seg'], transformation)

        stop = datetime.now()
        VI_sampling_speed = no_samples_VI_speed_test / (stop -
                                                        start).total_seconds()
        self.logger.info(
            f'\nVI sampling speed: {VI_sampling_speed:.2f} samples/sec')
コード例 #8
0
def main():
    
    # setting arguments
    parser = argparse.ArgumentParser(description='Test arguments')
    parser.add_argument('--opt', type=str, required=True, help='path to test yaml file')
    parser.add_argument('--name', type=str, required=True, help='test log file name')
    parser.add_argument('--dataset_name', type=str, default=None)
    parser.add_argument('--scale', type=int, required=True)
    parser.add_argument('--gpu_ids', type=str, default=None, help='which gpu to use')
    parser.add_argument('--which_model', type=str, required=True, help='which pretrained model')
    parser.add_argument('--pretrained', type=str, required=True, help='pretrained path')

    args = parser.parse_args()
    args, lg = test_parse(args)
    pn = 50
    half_pn = pn//2
    lg.info('\n' + '-'*pn + 'General INFO' + '-'*pn)

    # create test dataloader
    test_loader_list = []
    for i in range(len(args['dataset_list'])):
        # get single dataset and dataloader
        single_dataset_args = copy.deepcopy(args)
        single_dataset_args['datasets']['test']['dataroot_HR'] = single_dataset_args['datasets']['test']['dataroot_HR'][i]
        single_dataset_args['datasets']['test']['dataroot_LR'] = single_dataset_args['datasets']['test']['dataroot_LR'][i]
   
        test_dataset = create_dataset(single_dataset_args['datasets']['test'])
        test_loader = create_loader(test_dataset, args['datasets']['test'])
        test_loader_list.append(test_loader)

    # create model
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model = create_model(args['networks']).to(device)
    lg.info('Create model: [{}]'.format(args['networks']['which_model']))
    scale = args['scale']

    # calc number of parameters
    in_ = torch.randn(1, 3, round(720/scale), round(1280/scale)).to(device)
    params, GFlops = summary(model, in_)
    lg.info('Total parameters: [{:.3f}M], GFlops: [{:.4f}G]'.format(params / 1e6, GFlops / 1e9))

    # load pretrained model
    state_dict = torch.load(args['networks']['pretrained'])
    new_state_dict = {}
    for k, v in state_dict.items():
        if k[:7] == 'module.':
            new_state_dict[k[7:]] = v
        else:
            new_state_dict[k] = v
    model.load_state_dict(new_state_dict, strict=True)
    lg.info('Load pretrained from: [{}]'.format(args['networks']['pretrained']))

    for i, test_loader in enumerate(test_loader_list):
        dataset_name = args['dataset_list'][i]
        l = (12-len(dataset_name)) // 2
        e = len(dataset_name) % 2
        lg.info('\n\n' + '-'*(pn+l) + dataset_name + '-'*(pn+l+e))
        lg.info('Number of [{}] images: [{}]'.format(dataset_name, len(test_loader)))

        # calculate cuda time
        avg_test_time = 0.0
        if args['calc_cuda_time'] and 'Set5' in dataset_name:
            lg.info('Start calculating cuda time...')
            avg_test_time = calc_cuda_time(test_loader, model)
            lg.info('Average cuda time on [{}]: [{:.5f}ms]'.format(dataset_name, avg_test_time))
    
        # calucate psnr and ssim
        psnr_list = []
        ssim_list = []

        model.eval()
        for iter, data in enumerate(test_loader):
            lr = data['LR'].to(device)
            hr = data['HR']
        
            # calculate evaluation metrics
            with torch.no_grad():
                sr = model(lr)
            #save(args['networks']['which_model'], dataset_name, data['filename'][0], tensor2np(sr))
            psnr, ssim = calc_metrics(tensor2np(sr), tensor2np(hr), crop_border=scale, test_Y=True)
            psnr_list.append(psnr)
            ssim_list.append(ssim)

            lg.info('[{:03d}/{:03d}] || PSNR/SSIM: {:.2f}/{:.4f} || {}'.format(iter+1, len(test_loader), psnr, ssim, data['filename']))


        avg_psnr = sum(psnr_list) / len(psnr_list)
        avg_ssim = sum(ssim_list) / len(ssim_list)

        if avg_test_time > 0:
            lg.info('Average PSNR: {:.2f}  Average SSIM: {:.4f}, Average time: {:.5f}ms'.format(avg_psnr, avg_ssim, avg_test_time))
        else:
            lg.info('Average PSNR: {:.2f}  Average SSIM: {:.4f}'.format(avg_psnr, avg_ssim))
        
    lg.info('\n' + '-'*pn + '---Finish---' + '-'*pn)
コード例 #9
0
def train(model,
          writer,
          seed,
          k=100,
          alpha=0.6,
          lr=0.002,
          num_epochs=150,
          batch_size=64,
          n_classes=10,
          max_epochs=80,
          max_val=1.):
    # prepare data
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(cfg['m'], cfg['s'])])
    train_dataset, val_dataset = prepare_mnist(root='~/datasets/MNIST',
                                               transform=transform)
    ntrain = len(train_dataset)

    # build model and feed to GPU
    model.cuda()

    # make data loaders
    train_loader, val_loader, indices = utils.sample_train(train_dataset,
                                                           val_dataset,
                                                           batch_size,
                                                           k,
                                                           n_classes,
                                                           seed,
                                                           shuffle_train=False)

    # setup param optimization
    optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.99))

    # model.train()

    Z = torch.zeros(ntrain, n_classes).float().cuda()  # intermediate values
    z = torch.zeros(ntrain, n_classes).float().cuda()  # temporal outputs
    outputs = torch.zeros(ntrain, n_classes).float().cuda()  # current outputs

    losses = []
    suplosses = []
    unsuplosses = []
    best_loss = 30.0
    for epoch in range(num_epochs):
        t = timer()
        print('\nEpoch: {}'.format(epoch + 1))
        model.train()
        # evaluate unsupervised cost weight
        w = utils.weight_scheduler(epoch, max_epochs, max_val, 5, k, 60000)

        w = torch.tensor(w, requires_grad=False).cuda()
        print('---------------------')

        # targets change only once per epoch
        for i, (images, labels) in enumerate(train_loader):
            batch_size = images.size(
                0)  # retrieve batch size again cause drop last is false
            images = images.cuda()
            labels = labels.requires_grad_(False).cuda()

            optimizer.zero_grad()
            out = model(images)
            zcomp = z[i * batch_size:(i + 1) * batch_size]
            # zcomp = torch.tensor(zcomp, requires_grad=False)
            zcomp.requires_grad_(False)
            loss, suploss, unsuploss, nbsup = utils.temporal_losses(
                out, zcomp, w, labels)

            # save outputs
            outputs[i * batch_size:(i + 1) * batch_size] = out.clone().detach()
            losses.append(loss.item())
            suplosses.append(nbsup * suploss.item())
            unsuplosses.append(unsuploss.item())

            # backprop
            loss.backward()
            optimizer.step()

            # print loss every 100 steps
            # if (i + 1) % 100 == 0:
            #     print('Step [%d/%d], Loss: %.6f, Time: %.2f s' % (i+1, len(train_dataset) // batch_size,
            #                                                       float(np.mean(losses)), timer()-t))

        loss_mean = np.mean(losses)
        supl_mean = np.mean(suplosses)
        unsupl_mean = np.mean(unsuplosses)

        writer.add_scalar('total loss', loss_mean, (epoch + 1) * ntrain)
        print(
            'Epoch [%d/%d], Loss: %.6f, Supervised Loss: %.6f, Unsupervised Loss: %.6f, Time: %.2f'
            % (epoch + 1, num_epochs, float(loss_mean), float(supl_mean),
               float(unsupl_mean), timer() - t))
        writer.add_scalar('supervised loss', supl_mean, (epoch + 1) * ntrain)
        writer.add_scalar('unsupervised loss', unsupl_mean,
                          (epoch + 1) * ntrain)

        Z = alpha * Z + (1. - alpha) * outputs
        z = Z * (1. / (1. - alpha**(epoch + 1)))

        if loss_mean < best_loss:
            best_loss = loss_mean
            torch.save({'state_dict': model.state_dict()}, 'model_best.pth')

        model.eval()
        acc = utils.calc_metrics(model, val_loader)
        writer.add_scalar('Acc', acc, (epoch + 1) * ntrain)
        print('Acc : %.2f' % acc)

    # test best model
    checkpoint = torch.load('model_best.pth')
    model.load_state_dict(checkpoint['state_dict'])
    model.eval()
    acc_best = utils.calc_metrics(model, val_loader)
    print('Acc (best model): %.2f' % acc_best)
コード例 #10
0
def train(model,
          seed,
          k=100,
          alpha=0.6,
          lr=0.002,
          beta2=0.99,
          num_epochs=150,
          batch_size=100,
          drop=0.5,
          std=0.15,
          fm1=16,
          fm2=32,
          divide_by_bs=False,
          w_norm=False,
          data_norm='pixelwise',
          early_stop=None,
          c=300,
          n_classes=10,
          max_epochs=80,
          max_val=30.,
          ramp_up_mult=-5.,
          n_samples=60000,
          print_res=True,
          **kwargs):

    #pdb.set_trace()

    writer = SummaryWriter(comment='Semi-supervised')
    # retrieve data
    train_dataset, test_dataset = prepare_mnist()
    ntrain = len(train_dataset)

    # build model
    model.cuda()

    # make data loaders
    train_loader, test_loader, indices = sample_train(train_dataset,
                                                      test_dataset,
                                                      batch_size,
                                                      k,
                                                      n_classes,
                                                      seed,
                                                      shuffle_train=False)

    # setup param optimization
    optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.99))

    # train
    model.train()
    losses = []
    sup_losses = []
    unsup_losses = []
    best_loss = 20.

    Z = torch.zeros(ntrain, n_classes).float().cuda()  # intermediate values
    z = torch.zeros(ntrain, n_classes).float().cuda()  # temporal outputs
    outputs = torch.zeros(ntrain, n_classes).float().cuda()  # current outputs

    for epoch in range(num_epochs):
        t = timer()

        # evaluate unsupervised cost weight
        w = weight_schedule(epoch, max_epochs, max_val, ramp_up_mult, k,
                            n_samples)

        if (epoch + 1) % 10 == 0:
            print 'unsupervised loss weight : {}'.format(w)

        # turn it into a usable pytorch object
        w = torch.autograd.Variable(torch.FloatTensor([w]).cuda(),
                                    requires_grad=False)

        l = []
        supl = []
        unsupl = []
        #pdb.set_trace()
        for i, (images, labels) in enumerate(train_loader):
            images = Variable(images.cuda())
            labels = Variable(labels.cuda(), requires_grad=False)

            # get output and calculate loss
            optimizer.zero_grad()
            out = model(images)
            zcomp = Variable(z[i * batch_size:(i + 1) * batch_size],
                             requires_grad=False)
            loss, suploss, unsuploss, nbsup = temporal_loss(
                out, zcomp, w, labels)

            # save outputs and losses
            outputs[i * batch_size:(i + 1) * batch_size] = out.data.clone()
            l.append(loss.data[0])
            supl.append(nbsup * suploss.data[0])
            unsupl.append(unsuploss.data[0])

            # backprop
            loss.backward()
            optimizer.step()

            # print loss
            if (epoch + 1) % 1 == 0:
                if i + 1 == 2 * c:
                    print(
                        'Epoch [%d/%d], Step [%d/%d], Loss: %.6f, Time (this epoch): %.2f s'
                        % (epoch + 1, num_epochs, i + 1, len(train_dataset) //
                           batch_size, np.mean(l), timer() - t))
                elif (i + 1) % c == 0:
                    print('Epoch [%d/%d], Step [%d/%d], Loss: %.6f' %
                          (epoch + 1, num_epochs, i + 1,
                           len(train_dataset) // batch_size, np.mean(l)))

        # update temporal ensemble
        Z = alpha * Z + (1. - alpha) * outputs
        z = Z * (1. / (1. - alpha**(epoch + 1)))

        # handle metrics, losses, etc.
        eloss = np.mean(l)
        losses.append(eloss)
        sup_losses.append(
            (1. / k) *
            np.sum(supl))  # division by 1/k to obtain the mean supervised loss
        unsup_losses.append(np.mean(unsupl))

        writer.add_scalar('Train_loss', losses[epoch], epoch)
        writer.add_scalar('Supervised_loss', sup_losses[epoch], epoch)
        writer.add_scalar('Unsupervised_loss', unsup_losses[epoch], epoch)

        # saving model
        if eloss < best_loss:
            best_loss = eloss
            torch.save({'state_dict': model.state_dict()},
                       'model_best.pth.tar')

    writer.add_graph(model, (images, ))
    writer.close()
    # test
    model.eval()
    acc = calc_metrics(model, test_loader)
    if print_res:
        print 'Accuracy of the network on the 10000 test images: %.2f %%' % (
            acc)

    # test best model
    checkpoint = torch.load('model_best.pth.tar')
    model.load_state_dict(checkpoint['state_dict'])
    model.eval()
    acc_best = calc_metrics(model, test_loader)
    if print_res:
        print 'Accuracy of the network (best model) on the 10000 test images: %.2f %%' % (
            acc_best)

    return acc, acc_best, losses, sup_losses, unsup_losses, indices
コード例 #11
0
ファイル: train.py プロジェクト: zc280330/amt-wavenet
def main():
    args = get_arguments()

    with open(args.model_params, 'r') as f:
        model_params = json.load(f)

    with open(args.training_params, 'r') as f:
        train_params = json.load(f)

    try:
        directories = validate_directories(args)
    except ValueError as e:
        print('Some arguments are wrong:')
        print(str(e))
        return

    logdir = directories['logdir']
    restore_from = directories['restore_from']

    # Even if we restored the model, we will treat it as new training
    # if the trained model is written into an arbitrary location.
    is_overwritten_training = logdir != restore_from

    receptive_field = WaveNetModel.calculate_receptive_field(
        model_params['filter_width'], model_params['dilations'],
        model_params['initial_filter_width'])
    # Save arguments and model params into file
    save_run_config(args, receptive_field, STARTED_DATESTRING, logdir)

    # Create coordinator.
    coord = tf.train.Coordinator()

    # Create data loader.
    with tf.name_scope('create_inputs'):
        reader = WavMidReader(data_dir=args.data_dir_train,
                              coord=coord,
                              audio_sample_rate=model_params['audio_sr'],
                              receptive_field=receptive_field,
                              velocity=args.velocity,
                              sample_size=args.sample_size,
                              queues_size=(10, 10 * args.batch_size))
        data_batch = reader.dequeue(args.batch_size)

    # Create model.
    net = WaveNetModel(
        batch_size=args.batch_size,
        dilations=model_params['dilations'],
        filter_width=model_params['filter_width'],
        residual_channels=model_params['residual_channels'],
        dilation_channels=model_params['dilation_channels'],
        skip_channels=model_params['skip_channels'],
        output_channels=model_params['output_channels'],
        use_biases=model_params['use_biases'],
        initial_filter_width=model_params['initial_filter_width'])

    input_data = tf.placeholder(dtype=tf.float32,
                                shape=(args.batch_size, None, 1))
    input_labels = tf.placeholder(dtype=tf.float32,
                                  shape=(args.batch_size, None,
                                         model_params['output_channels']))

    loss, probs = net.loss(input_data=input_data,
                           input_labels=input_labels,
                           pos_weight=train_params['pos_weight'],
                           l2_reg_str=train_params['l2_reg_str'])
    optimizer = optimizer_factory[args.optimizer](
        learning_rate=train_params['learning_rate'],
        momentum=train_params['momentum'])
    trainable = tf.trainable_variables()
    optim = optimizer.minimize(loss, var_list=trainable)

    # Set up logging for TensorBoard.
    writer = tf.summary.FileWriter(logdir)
    writer.add_graph(tf.get_default_graph())
    run_metadata = tf.RunMetadata()
    summaries = tf.summary.merge_all()
    histograms = tf.summary.merge_all(key=HKEY)

    # Separate summary ops for validation, since they are
    # calculated only once per evaluation cycle.
    with tf.name_scope('validation_summaries'):

        metric_summaries = metrics_empty_dict()
        metric_value = tf.placeholder(tf.float32)
        for name in metric_summaries.keys():
            metric_summaries[name] = tf.summary.scalar(name, metric_value)

        images_buffer = tf.placeholder(tf.string)
        images_batch = tf.stack([
            tf.image.decode_png(images_buffer[0], channels=4),
            tf.image.decode_png(images_buffer[1], channels=4),
            tf.image.decode_png(images_buffer[2], channels=4)
        ])
        images_summary = tf.summary.image('estim', images_batch)

        audio_data = tf.placeholder(tf.float32)
        audio_summary = tf.summary.audio('input', audio_data,
                                         model_params['audio_sr'])

    # Set up session
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
    init = tf.global_variables_initializer()
    sess.run(init)

    # Saver for storing checkpoints of the model.
    saver = tf.train.Saver(var_list=tf.trainable_variables(),
                           max_to_keep=args.max_checkpoints)

    # Trainer for keeping best validation-performing model
    # and optional early stopping.
    trainer = Trainer(sess, logdir, train_params['early_stop_limit'], 0.999)

    try:
        saved_global_step = load(saver, sess, restore_from)
        if is_overwritten_training or saved_global_step is None:
            # The first training step will be saved_global_step + 1,
            # therefore we put -1 here for new or overwritten trainings.
            saved_global_step = -1

    except:
        print('Something went wrong while restoring checkpoint. '
              'Training will be terminated to avoid accidentally '
              'overwriting the previous model.')
        raise

    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    reader.start_threads(sess)

    step = None
    last_saved_step = saved_global_step
    try:
        for step in range(saved_global_step + 1, train_params['num_steps']):
            waveform, pianoroll = sess.run([data_batch[0], data_batch[1]])
            feed_dict = {input_data: waveform, input_labels: pianoroll}
            # Reload switches from file on each step
            with open(RUNTIME_SWITCHES, 'r') as f:
                switch = json.load(f)

            start_time = time.time()
            if switch['store_meta'] and step % switch['store_every'] == 0:
                # Slow run that stores extra information for debugging.
                print('Storing metadata')
                run_options = tf.RunOptions(
                    trace_level=tf.RunOptions.FULL_TRACE)
                summary, loss_value, _ = sess.run([summaries, loss, optim],
                                                  feed_dict=feed_dict,
                                                  options=run_options,
                                                  run_metadata=run_metadata)
                writer.add_summary(summary, step)
                writer.add_run_metadata(run_metadata,
                                        'step_{:04d}'.format(step))
                tl = timeline.Timeline(run_metadata.step_stats)
                timeline_path = os.path.join(logdir, 'timeline.trace')
                with open(timeline_path, 'w') as f:
                    f.write(tl.generate_chrome_trace_format(show_memory=True))
            else:
                summary, loss_value, _ = sess.run([summaries, loss, optim],
                                                  feed_dict=feed_dict)
                writer.add_summary(summary, step)

            duration = time.time() - start_time
            print('step {:d} - loss = {:.3f}, ({:.3f} sec/step)'.format(
                step, loss_value, duration))

            if step % switch['checkpoint_every'] == 0:
                save(saver, sess, logdir, step)
                last_saved_step = step

            # Evaluate model performance on validation data
            if step % switch['evaluate_every'] == 0:
                if switch['histograms']:
                    hist_summary = sess.run(histograms)
                    writer.add_summary(hist_summary, step)
                print('evaluating...')
                stats = 0, 0, 0, 0, 0, 0
                est = np.empty([0, model_params['output_channels']])
                ref = np.empty([0, model_params['output_channels']])

                b_data, b_labels, b_cntr = (np.empty(
                    (0, args.sample_size + receptive_field - 1,
                     1)), np.empty((0, model_params['output_channels'])),
                                            args.batch_size)

                # if (batch_size * sample_size > valid_data) single_pass() again
                while est.size == 0:  # and ref.size == 0 and sum(stats) == 0 ...

                    for data, labels in reader.single_pass(
                            sess, args.data_dir_valid):

                        # cumulate batch
                        if b_cntr > 1:
                            b_data, b_labels, decr = cumulateBatch(
                                data, labels, b_data, b_labels)
                            b_cntr -= decr
                            continue
                        elif args.batch_size > 1:
                            b_data, b_labels, decr = cumulateBatch(
                                data, labels, b_data, b_labels)
                            if not decr:
                                continue
                            data = b_data
                            labels = b_labels
                            # reset batch cumulation variables
                            b_data, b_labels, b_cntr = (
                                np.empty(
                                    (0, args.sample_size + receptive_field - 1,
                                     1)),
                                np.empty((0, model_params['output_channels'])),
                                args.batch_size)

                        predictions = sess.run(probs,
                                               feed_dict={input_data: data})
                        # Aggregate sums for metrics calculation
                        stats_chunk = calc_stats(predictions, labels,
                                                 args.threshold)
                        stats = tuple(
                            [sum(x) for x in zip(stats, stats_chunk)])
                        est = np.append(est, predictions, axis=0)
                        ref = np.append(ref, labels, axis=0)

                metrics = calc_metrics(None, None, None, stats=stats)
                write_metrics(metrics, metric_summaries, metric_value, writer,
                              step, sess)
                trainer.check(metrics['f1_measure'])

                # Render evaluation results
                if switch['log_image'] or switch['log_sound']:
                    sub_fac = int(model_params['audio_sr'] / switch['midi_sr'])
                    est = roll_subsample(est.T, sub_fac)
                    ref = roll_subsample(ref.T, sub_fac)
                if switch['log_image']:
                    write_images(est, ref, switch['midi_sr'], args.threshold,
                                 (8, 6), images_summary, images_buffer, writer,
                                 step, sess)
                if switch['log_sound']:
                    write_audio(est, ref, switch['midi_sr'],
                                model_params['audio_sr'], 0.007, audio_summary,
                                audio_data, writer, step, sess)

    except KeyboardInterrupt:
        # Introduce a line break after ^C is displayed so save message
        # is on its own line.
        print()
    finally:
        if step > last_saved_step:
            save(saver, sess, logdir, step)
        coord.request_stop()
        coord.join(threads)
        flush_n_close(writer, sess)
コード例 #12
0
def generate_best(dir_list, border=4):
    img_list_dict = dict()

    # get sorted img names in corresponding model
    for dir_name in dir_list:
        img_list_dict[dir_name] = sorted(os.listdir(dir_name))
    
    length = len(dir_list)
    for i in range(0, 100):    # for every Urban img
        # load hr img
        hr_basename = img_list_dict[dir_list[0]][i]
        hr_path = osp.join(dir_list[0], hr_basename)
        hr_img = imageio.imread(hr_path, pilmode='RGB')
        h, w = hr_img.shape[:-1]
        h_step, w_step = h // 20, w // 20
        img_psnrs, img_ssims = [], []

        # get metrics of different models for this img
        for k in range(length-1):
            basename = img_list_dict[dir_list[k+1]][i]
            path = osp.join(dir_list[k+1], basename)
            img = imageio.imread(path, pilmode='RGB')
            if dir_list[k+1] == 'IDN':
                img = cv2.copyMakeBorder(img, 4, 4, 4, 4, cv2.BORDER_REPLICATE)
            psnr, ssim = calc_metrics(hr_img, img, crop_border=border, test_Y=True)
            img_psnrs.append(psnr)
            img_ssims.append(ssim)
        str_img_psnrs = ['{:.2f}'.format(x) for x in img_psnrs]
        print('full img[{:03d}] | {}'.format((i+1), str_img_psnrs))

        # whether best is ours
        if np.argmax(np.array(img_psnrs)) < length-2 or np.argmax(np.array(img_ssims)) < length-2:
            continue

        # fixed stride for different location, get 64*64*3 patch
        for y in range(0, h-64, h_step):
            for x in range(0, w-64, w_step):
                imgs, psnrs, ssims = [], [], []
                # plot rectangle on hr img
                hr_img1 = hr_img.copy()
                cv2.rectangle(hr_img1, (x, y), (x+63, y+63), (255, 0, 0), 2)
                left, right, top, bottom = get_direction(x+32, y+32, w, h)
                imgs.append(hr_img1[top:bottom+1, left:right+1, :]) # 513 * 513 * 3
                hr_patch = hr_img[y:y+64, x:x+64, :]
                imgs.append(hr_patch)
                
                # for different model, get corresponding patch
                for k in range(length-1):
                    basename = img_list_dict[dir_list[k+1]][i]
                    path = osp.join(dir_list[k+1], basename)
                    img = imageio.imread(path, pilmode='RGB')
                    if dir_list[k+1] == 'IDN':
                        img = cv2.copyMakeBorder(img, 4, 4, 4, 4, cv2.BORDER_REPLICATE)
                    img_patch = img[y:y+64, x:x+64, :]
                    imgs.append(img_patch)

                    # calculate psnr and ssim
                    psnr, ssim = calc_metrics(hr_patch, img_patch)
                    psnrs.append(psnr)
                    ssims.append(ssim)
                    str_psnrs = ['{:.2f}'.format(psnr) for psnr in psnrs]
                print('[{:03d}] | ({}/{}, {}/{}) | {}'.format((i+1), y, h, x, w, str_psnrs))
                if np.argmax(np.array(psnrs)) == length-2 and np.argmax(np.array(ssims)) == length-2:
                    print('Saving...')
                    plot_compare(imgs, img_psnrs, img_ssims, i+1, '{}_{}'.format(y, x), dir_list)
コード例 #13
0
def train(train_loader, model, criterion, optimizer, epoch, device):
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()
    train_len = len(train_loader)
    scores = {
        'acc': 0,
        'rec': 0,
        'f1': 0,
        'hamm': 0,
        'emr': 0,
    }
    end = time.time()

    for batch_idx, (data, target) in enumerate(train_loader):
        data_time.update(time.time() - end)

        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        act_output, reconstructions = model(data)
        act_loss, reconstruction_loss, total_loss = criterion(
            act_output, target, data, reconstructions)

        acc, rec, f1, hamm, emr = calc_metrics(act_output, target)
        scores.update(acc=scores['acc'] + acc,
                      rec=scores['rec'] + rec,
                      f1=scores['f1'] + f1,
                      hamm=scores['hamm'] + hamm,
                      emr=scores['emr'] + emr)

        global_step = (batch_idx + 1) + (epoch - 1) * len(train_loader)
        # change the learning rate exponentially
        exp_lr_decay(optimizer=optimizer, global_step=global_step)

        total_loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()

        if batch_idx % args.log_interval == 0:
            # Logging
            train_writer.add_scalar('Loss/BCE', act_loss.item(), global_step)
            if args.add_decoder:
                train_writer.add_scalar('Loss/Reconstruction',
                                        reconstruction_loss.item(),
                                        global_step)
                log_reconstruction_sample(train_writer, data, reconstructions,
                                          global_step)
                train_writer.add_scalar('Loss/Total', total_loss.item(),
                                        global_step)
            train_writer.add_scalar('Metrics/Precision', acc, global_step)
            train_writer.add_scalar('Metrics/Recall', rec, global_step)
            train_writer.add_scalar('Metrics/F1-score', f1, global_step)
            train_writer.add_scalar('Metrics/HammingScore', hamm, global_step)
            train_writer.add_scalar('Metrics/ExactMatchRatio', emr,
                                    global_step)

            # Console output
            print('Train Epoch: {}\t[{}/{} ({:.0f}%)]\t'
                  'Loss: {:.6f}\tF1-Score: {:.6f}\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})'.format(
                      epoch,
                      batch_idx * len(data),
                      len(train_loader.dataset),
                      100. * batch_idx / len(train_loader),
                      total_loss.item(),
                      f1,
                      batch_time=batch_time,
                      data_time=data_time))

    scores.update(acc=scores['acc'] / train_len,
                  rec=scores['rec'] / train_len,
                  f1=scores['f1'] / train_len,
                  hamm=scores['hamm'] / train_len,
                  emr=scores['emr'] / train_len)
    return scores
コード例 #14
0
    def forward(self,
                data_batch,
                epoch,
                use_second_order,
                use_multi_step_loss_optimization,
                num_steps,
                training_phase,
                do_evaluation=False):
        """
        Runs a forward outer loop pass on the batch of tasks using the MAML/++ framework.
        :param data_batch: A data batch containing the support and target sets.
        :param epoch: Current epoch's index
        :param use_second_order: A boolean saying whether to use second order derivatives.
        :param use_multi_step_loss_optimization: Whether to optimize on the outer loop using just the last step's
        target loss (True) or whether to use multi step loss which improves the stability of the system (False)
        :param num_steps: Number of inner loop steps.
        :param training_phase: Whether this is a training phase (True) or an evaluation phase (False)
        :return: A dictionary with the collected losses of the current outer forward propagation.
        """
        frames = data_batch

        total_losses = []
        loss_accumulator = {'total': utils.AverageMeter()}
        metrics = {'psnr': utils.AverageMeter(), 'ssim': utils.AverageMeter()}
        per_task_target_preds = [[] for i in range(len(frames[0]))]
        self.net.zero_grad()

        for task_id in range(len(frames[0])):  # loop over batch dimension
            task_losses = []
            per_step_loss_importance_vectors = self.get_per_step_loss_importance_vector(
            )

            names_weights_copy = self.get_inner_loop_parameter_dict(
                self.net.named_parameters())
            names_weights_copy = {
                name.replace('module.', ''): value
                for name, value in names_weights_copy.items()
            }

            # inner loop
            support_idxs = self.support_idxs  # frame indices: input[0, 2, 4, 6] --> output[3]
            target_idx = self.target_idxs
            self.inner_loop_optimizer.initialize_state()

            # Attenuate the initialization for L2F
            if self.args.attenuate:
                task_embeddings = self.get_task_embeddings(
                    frames, task_id, names_weights_copy)
                names_weights_copy = self.attenuate_init(
                    task_embeddings=task_embeddings,
                    names_weights_copy=names_weights_copy)

            for num_step in range(num_steps):
                support_loss = 0
                for ind in support_idxs:
                    _loss, _ = self.net_forward(
                        frame0=frames[ind[0]][task_id].unsqueeze(0),
                        frame1=frames[ind[2]][task_id].unsqueeze(0),
                        target=frames[ind[1]][task_id].unsqueeze(0),
                        weights=names_weights_copy,
                        backup_running_statistics=True if
                        (num_step == 0) else False,
                        training=True,
                        num_step=num_step)
                    support_loss = support_loss + _loss['total']

                names_weights_copy = self.apply_inner_loop_update(
                    loss=support_loss,
                    names_weights_copy=names_weights_copy,
                    use_second_order=use_second_order,
                    current_step_idx=num_step)

                kwargs = {
                    'backup_running_statistics': False,
                    'training': True,
                    'num_step': num_step
                }
                if use_multi_step_loss_optimization and training_phase and epoch < self.args.multi_step_loss_num_epochs:
                    target_loss, target_preds = self.net_forward(
                        frame0=frames[target_idx[0]][task_id].unsqueeze(0),
                        frame1=frames[target_idx[2]][task_id].unsqueeze(0),
                        target=frames[target_idx[1]][task_id].unsqueeze(0),
                        weights=names_weights_copy,
                        **kwargs)

                    task_losses.append(
                        per_step_loss_importance_vectors[num_step] *
                        target_loss['total'])
                    self.update_loss_metrics(loss_accumulator, target_loss)

            if not training_phase:
                kwargs = {
                    'backup_running_statistics': False,
                    'training': True,
                    'num_step': num_steps
                }
                with torch.no_grad():
                    target_loss, target_preds = self.net_forward(
                        frame0=frames[target_idx[0]][task_id].unsqueeze(0),
                        frame1=frames[target_idx[2]][task_id].unsqueeze(0),
                        target=frames[target_idx[1]][task_id].unsqueeze(0),
                        weights=names_weights_copy,
                        **kwargs)
                task_losses.append(target_loss['total'])
                self.update_loss_metrics(loss_accumulator, target_loss)
            elif not (use_multi_step_loss_optimization and training_phase
                      and epoch < self.args.multi_step_loss_num_epochs):
                kwargs = {
                    'backup_running_statistics': False,
                    'training': True,
                    'num_step': num_steps
                }
                target_loss, target_preds = self.net_forward(
                    frame0=frames[target_idx[0]][task_id].unsqueeze(0),
                    frame1=frames[target_idx[2]][task_id].unsqueeze(0),
                    target=frames[target_idx[1]][task_id].unsqueeze(0),
                    weights=names_weights_copy,
                    **kwargs)
                task_losses.append(target_loss['total'])
                self.update_loss_metrics(loss_accumulator, target_loss)

            if self.args.model == 'superslomo':
                per_task_target_preds[task_id] = self.revNormalize(
                    target_preds.detach().squeeze(0)).unsqueeze(0)
            elif self.args.model == 'voxelflow':
                per_task_target_preds[task_id] = (
                    (target_preds.detach().squeeze(0) * self.std + self.mean) /
                    255.0).unsqueeze(0)
            else:
                per_task_target_preds[task_id] = target_preds.detach(
                )  # target_preds.shape: (1, C, H, W)

            if do_evaluation:
                if self.args.model == 'superslomo':
                    output = self.revNormalize(target_preds.squeeze(0))
                    target = self.revNormalize(frames[target_idx[1]][task_id])
                elif self.args.model == 'voxelflow':
                    output = (target_preds.squeeze(0) * self.std +
                              self.mean) / 255.0
                    target = (frames[target_idx[1]][task_id] * self.std +
                              self.mean) / 255.0
                else:
                    output = target_preds.squeeze(0)
                    target = frames[target_idx[1]][task_id]
                output = output.detach()
                target = target.detach()
                psnr, ssim = utils.calc_metrics(output, target)
                # print(psnr, ssim)
                metrics['psnr'].update(psnr)
                metrics['ssim'].update(ssim)
            else:
                pass

            task_losses = torch.sum(torch.stack(task_losses))
            total_losses.append(task_losses)

            if not training_phase:
                self.net.restore_backup_stats()

        losses = self.get_across_task_loss_metrics(
            total_losses=total_losses, specific_losses=loss_accumulator)

        for idx, item in enumerate(per_step_loss_importance_vectors):
            losses['loss_importance_vector_{}'.format(
                idx)] = item.detach().cpu().numpy()

        return losses, per_task_target_preds, metrics
コード例 #15
0
ファイル: main.py プロジェクト: YuanshuaiHuang/dacl
def validate(valid_loader, model, criterion, epoch, cfg):
    losses = {
        'softmax': AverageMeter(),
        'center': AverageMeter(),
        'total': AverageMeter()
    }
    accs = AverageMeter()
    y_pred, y_true, y_scores = [], [], []

    # switch to evaluate mode
    model.eval()

    with tqdm(total=int(len(valid_loader.dataset) /
                        cfg['batch_size'])) as pbar:
        with torch.no_grad():
            for i, (images, target) in enumerate(valid_loader):

                images = images.to(device)
                target = target.to(device)

                # compute output
                feat, output, A = model(images)
                l_softmax = criterion['softmax'](output, target)
                l_center = criterion['center'](feat, A, target)
                l_total = l_softmax + cfg['lamb'] * l_center

                # measure accuracy and record loss
                acc, pred = accuracy(output, target)
                losses['softmax'].update(l_softmax.item(), images.size(0))
                losses['center'].update(l_center.item(), images.size(0))
                losses['total'].update(l_total.item(), images.size(0))
                accs.update(acc.item(), images.size(0))

                # collect for metrics
                y_pred.append(pred)
                y_true.append(target)
                y_scores.append(output.data)

                # progressbar
                pbar.set_description(
                    f'VALIDATING [{epoch:03d}/{cfg["epochs"]}]')
                pbar.update(1)

    metrics = calc_metrics(y_pred, y_true, y_scores)
    progress = (f'[-] VALID [{epoch:03d}/{cfg["epochs"]}] | '
                f'L={losses["total"].avg:.4f} | '
                f'Ls={losses["softmax"].avg:.4f} | '
                f'Lsc={losses["center"].avg:.4f} | '
                f'acc={accs.avg:.4f} | '
                f'rec={metrics["rec"]:.4f} | '
                f'f1={metrics["f1"]:.4f} | '
                f'aucpr={metrics["aucpr"]:.4f} | '
                f'aucroc={metrics["aucroc"]:.4f}')
    print(progress)

    # save model checkpoints for best valid
    if accs.avg > best_valid['acc']:
        save_checkpoint(epoch, model, cfg, tag='best_valid_acc.pth')
    if metrics['rec'] > best_valid['rec']:
        save_checkpoint(epoch, model, cfg, tag='best_valid_rec.pth')

    best_valid['acc'] = max(best_valid['acc'], accs.avg)
    best_valid['rec'] = max(best_valid['rec'], metrics['rec'])
    best_valid['f1'] = max(best_valid['f1'], metrics['f1'])
    best_valid['aucpr'] = max(best_valid['aucpr'], metrics['aucpr'])
    best_valid['aucroc'] = max(best_valid['aucroc'], metrics['aucroc'])
    write_log(losses, accs.avg, metrics, epoch, tag='valid')
コード例 #16
0
    def _run_VI(self, fixed, moving, var_params_q_v):
        self.__init_optimizer_q_v(var_params_q_v)
        data_loss, reg_loss, entropy_loss = self.losses['data'][
            'loss'], self.losses['reg']['loss'], self.losses['entropy']

        # .nii.gz/.vtk
        with torch.no_grad():
            save_fixed_im(self.save_dirs, self.im_spacing, fixed['im'])
            save_fixed_mask(self.save_dirs, self.im_spacing, fixed['mask'])
            save_moving_im(self.save_dirs, self.im_spacing, moving['im'])
            save_moving_mask(self.save_dirs, self.im_spacing, moving['mask'])

        for iter_no in range(self.start_iter_VI, self.no_iters_VI + 1):
            # needed to calculate the maximum update in terms of the L2 norm
            var_params_q_v_prev = {
                k: v.detach().clone()
                for k, v in var_params_q_v.items()
            }
            v_sample1_unsmoothed, v_sample2_unsmoothed = sample_q_v(
                var_params_q_v, no_samples=2)

            loss_terms1, output, aux = self.__calc_sample_loss_VI(
                data_loss, reg_loss, entropy_loss, fixed, moving,
                var_params_q_v, v_sample1_unsmoothed)
            loss_terms2, _, _ = self.__calc_sample_loss_VI(
                data_loss, reg_loss, entropy_loss, fixed, moving,
                var_params_q_v, v_sample2_unsmoothed)

            # data
            data_term = (loss_terms1['data'] + loss_terms2['data']) / 2.0
            data_term -= self.losses['data']['scale_prior'](
                data_loss.log_scales).sum()
            data_term -= self.losses['data']['proportion_prior'](
                data_loss.log_proportions).sum()

            # regularisation
            reg_term = (loss_terms1['reg'] + loss_terms2['reg']) / 2.0

            if reg_loss.learnable:
                if reg_loss.__class__.__name__ == 'RegLoss_LogNormal':
                    reg_term -= (loss_terms1['reg_loc_prior'] +
                                 loss_terms2['reg_loc_prior']) / 2.0
                    reg_term -= self.losses['reg']['scale_prior'](
                        reg_loss.log_scale).sum()
                elif reg_loss.__class__.__name__ == 'RegLoss_L2':
                    reg_term -= (loss_terms1['w_reg_prior'] +
                                 loss_terms2['w_reg_prior']) / 2.0

            # entropy
            entropy_term = (loss_terms1['entropy'] +
                            loss_terms2['entropy']) / 2.0
            entropy_term += entropy_loss(log_var=var_params_q_v['log_var'],
                                         u=var_params_q_v['u']).sum()

            # total loss
            loss = data_term + reg_term - entropy_term

            if reg_loss.learnable:
                self.optimizer_reg.zero_grad()

            self.optimizer_q_v.zero_grad()

            loss.backward()  # backprop

            if reg_loss.learnable:
                self.optimizer_reg.step()

            self.optimizer_q_v.step()
            """
            tensorboard and logging
            """

            with torch.no_grad():
                self.writer.set_step(iter_no)

                # model parameters
                for idx in range(data_loss.no_components):
                    self.metrics.update(f'VI/train/GMM/scale_{idx}',
                                        data_loss.scales[idx].item())
                    self.metrics.update(f'VI/train/GMM/proportion_{idx}',
                                        data_loss.proportions[idx].item())

                if reg_loss.learnable:
                    if reg_loss.__class__.__name__ == 'RegLoss_LogNormal':
                        self.metrics.update('VI/train/reg/loc',
                                            reg_loss.loc.item())
                        self.metrics.update('VI/train/reg/scale',
                                            reg_loss.scale.item())
                    elif reg_loss.__class__.__name__ == 'RegLoss_L2':
                        self.metrics.update('VI/train/reg/w_reg',
                                            reg_loss.log_w_reg.exp().item())

                if self.virutal_decimation:
                    self.metrics.update('VI/train/VD/alpha',
                                        aux['alpha'].item())

                # losses
                self.metrics.update('VI/train/data_term', data_term.item())
                self.metrics.update('VI/train/reg_term', reg_term.item())
                self.metrics.update('VI/train/entropy_term',
                                    entropy_term.item())
                self.metrics.update('VI/train/total_loss', loss.item())

                # other
                self.metrics.update('VI/train/reg/energy',
                                    aux['reg_energy'].item())
                self.metrics.update('VI/train/no_non_diffeomorphic_voxels',
                                    aux['no_non_diffeomorphic_voxels'].item())

                for key in var_params_q_v:
                    max_update, max_update_idx = max_field_update(
                        var_params_q_v_prev[key], var_params_q_v[key])
                    self.metrics.update(f'VI/train/max_updates/{key}',
                                        max_update.item())

                if iter_no % self.log_period_VI == 0 or iter_no == self.no_iters_VI:
                    # metrics
                    seg_moving_warped = self.registration_module(
                        moving['seg'], output['transformation'])
                    ASD, DSC = calc_metrics(fixed['seg'], seg_moving_warped,
                                            self.structures_dict,
                                            self.im_spacing)

                    for structure_idx, structure in enumerate(
                            self.structures_dict):
                        ASD_val, DSC_val = ASD[0][structure_idx], DSC[0][
                            structure_idx]
                        self.metrics.update(f'VI/train/ASD/{structure}',
                                            ASD_val)
                        self.metrics.update(f'VI/train/DSC/{structure}',
                                            DSC_val)

                    # visualisation in tensorboard
                    var_params_q_v_smoothed = self.__get_var_params_smoothed(
                        var_params_q_v)

                    log_hist_res(self.writer,
                                 aux['residuals'],
                                 data_loss,
                                 model='VI')
                    log_images(self.writer, fixed['im'], moving['im'],
                               output['im_moving_warped'])
                    log_fields(self.writer, var_params_q_v_smoothed,
                               output['displacement'], output['log_det_J'])
コード例 #17
0
ファイル: main.py プロジェクト: TaoStarlit/Minus-Plus-Network
    def evaluate(self, loader):
        print('Evaluating at {} epochs...'.format(self.epoch))
        torch.set_grad_enabled(False)

        # remove previous viz results
        makedirs(self.args.vis, remove=True)

        self.netwrapper.eval()

        # initialize meters
        loss_meter = AverageMeter()
        sdr_mix_meter = AverageMeter()
        sdr_meter = AverageMeter()
        sir_meter = AverageMeter()
        sar_meter = AverageMeter()

        # initialize HTML header
        visualizer = HTMLVisualizer(os.path.join(self.args.vis, 'index.html'))
        header = ['Filename', 'Input Mixed Audio']
        for n in range(1, self.args.num_mix + 1):
            header += [
                'Video {:d}'.format(n), 'Predicted Audio {:d}'.format(n),
                'GroundTruth Audio {}'.format(n),
                'Predicted Mask {}'.format(n), 'GroundTruth Mask {}'.format(n)
            ]
        header += ['Loss weighting']
        visualizer.add_header(header)
        vis_rows = []
        eval_num = 0
        valid_num = 0

        #for i, batch_data in enumerate(self.loader['eval']):
        for i, batch_data in enumerate(loader):
            # forward pass
            eval_num += batch_data['mag_mix'].shape[0]
            with torch.no_grad():
                err, outputs = self.netwrapper.forward(batch_data, args)
                err = err.mean()

            if self.mode == 'train':
                self.writer.add_scalar('data/val_loss', err,
                                       self.args.epoch_iters * self.epoch + i)

            loss_meter.update(err.item())
            print('[Eval] iter {}, loss: {:.4f}'.format(i, err.item()))

            # calculate metrics
            sdr_mix, sdr, sir, sar, cur_valid_num = calc_metrics(
                batch_data, outputs, self.args)
            print("sdr_mix, sdr, sir, sar: ", sdr_mix, sdr, sir, sar)
            sdr_mix_meter.update(sdr_mix)
            sdr_meter.update(sdr)
            sir_meter.update(sir)
            sar_meter.update(sar)
            valid_num += cur_valid_num
            '''
            # output visualization
            if len(vis_rows) < self.args.num_vis:
                output_visuals(vis_rows, batch_data, outputs, self.args)
            '''
        metric_output = '[Eval Summary] Epoch: {}, Loss: {:.4f}, ' \
            'SDR_mixture: {:.4f}, SDR: {:.4f}, SIR: {:.4f}, SAR: {:.4f}'.format(
                self.epoch, loss_meter.average(),
                sdr_mix_meter.sum_value()/eval_num,
                sdr_meter.sum_value()/eval_num,
                sir_meter.sum_value()/eval_num,
                sar_meter.sum_value()/eval_num
        )
        if valid_num / eval_num < 0.8:
            metric_output += ' ---- Invalid ---- '

        print(metric_output)
        learning_rate = ' lr_sound: {}, lr_frame: {}'.format(
            self.args.lr_sound, self.args.lr_frame)
        with open(self.args.log, 'a') as F:
            F.write(metric_output + learning_rate + '\n')

        self.history['val']['epoch'].append(self.epoch)
        self.history['val']['err'].append(loss_meter.average())
        self.history['val']['sdr'].append(sdr_meter.sum_value() / eval_num)
        self.history['val']['sir'].append(sir_meter.sum_value() / eval_num)
        self.history['val']['sar'].append(sar_meter.sum_value() / eval_num)
        '''
        print('Plotting html for visualization...')
        visualizer.add_rows(vis_rows)
        visualizer.write_html()
        '''
        # Plot figure
        if self.epoch > 0:
            print('Plotting figures...')
            plot_loss_metrics(self.args.ckpt, self.history)
コード例 #18
0
    def evaluation_iteration(self, val_sample, total_losses, pbar_val, phase):
        """
        Runs a validation iteration, updates the progress bar and returns the total and current epoch val losses.
        :param val_sample: A sample from the data provider
        :param total_losses: The current total losses dictionary to be updated.
        :param pbar_val: The progress bar of the val stage.
        :return: The updated val_losses, total_losses
        """
        images, metadata = val_sample

        def _eval_iter(frames):
            H, W = frames[0].shape[-2:]
            if H * W > 5e5 or (
                    self.args.model == 'rrin' and H * W > 3e5
            ):  # or (self.args.model == 'dain' and H * W > 1e5):
                print(H, W)
                if H > W:
                    images_0 = [im[:, :, :H // 2, :] for im in frames]
                    images_1 = [im[:, :, H // 2:, :] for im in frames]
                else:
                    images_0 = [im[:, :, :, :W // 2] for im in frames]
                    images_1 = [im[:, :, :, W // 2:] for im in frames]
                losses_0, outputs_0, metrics_0 = _eval_iter(images_0)
                losses_1, outputs_1, metrics_1 = _eval_iter(images_1)
                outputs = [
                    torch.cat([outputs_0[i], outputs_1[i]],
                              dim=2 if H > W else 3)
                    for i in range(len(outputs_0))
                ]
                losses = losses_0
                for k, v in losses_1.items():
                    losses[k] = (v + losses[k]) / 2
                    if k == 'loss':
                        losses[k] = losses[k].detach()
                #metrics = metrics_0
                #for k, v in metrics_1.items():
                #    metrics[k].update(val=v.avg, n=v.count)
                del losses_0, losses_1, outputs_0, outputs_1  #, metrics_0, metrics_1
            else:
                losses, outputs, metrics = self.model.run_validation_iter(
                    data_batch=frames)
                losses['loss'] = losses['loss'].detach()
            return losses, outputs, None  #metrics

        losses, outputs, _ = _eval_iter(images)

        output = outputs[0].squeeze(0).detach()
        target = images[3][0].detach().cuda()
        if self.args.model == 'voxelflow':
            target = (target * self.model.std + self.model.mean) / 255.0
        elif self.args.model == 'superslomo':
            target = self.model.revNormalize(target)
        metrics = {'psnr': utils.AverageMeter(), 'ssim': utils.AverageMeter()}
        psnr, ssim = utils.calc_metrics(output, target)
        metrics['psnr'].update(psnr)
        metrics['ssim'].update(ssim)

        val_output_update = self.build_loss_summary_string(losses, metrics)

        pbar_val.update(1)
        pbar_val.set_description("val_phase {} -> {}".format(
            self.epoch, val_output_update))

        return losses, outputs, metrics
コード例 #19
0
ファイル: train.py プロジェクト: lmartak/amt-wavenet
def main():
    args = get_arguments()

    with open(args.model_params, 'r') as f:
        model_params = json.load(f)

    with open(args.training_params, 'r') as f:
        train_params = json.load(f)

    try:
        directories = validate_directories(args)
    except ValueError as e:
        print('Some arguments are wrong:')
        print(str(e))
        return

    logdir = directories['logdir']
    restore_from = directories['restore_from']

    # Even if we restored the model, we will treat it as new training
    # if the trained model is written into an arbitrary location.
    is_overwritten_training = logdir != restore_from

    receptive_field = WaveNetModel.calculate_receptive_field(
        model_params['filter_width'],
        model_params['dilations'],
        model_params['initial_filter_width'])
    # Save arguments and model params into file
    save_run_config(args, receptive_field, STARTED_DATESTRING, logdir)

    # Create coordinator.
    coord = tf.train.Coordinator()

    # Create data loader.
    with tf.name_scope('create_inputs'):
        reader = WavMidReader(data_dir=args.data_dir_train,
                              coord=coord,
                              audio_sample_rate=model_params['audio_sr'],
                              receptive_field=receptive_field,
                              velocity=args.velocity,
                              sample_size=args.sample_size,
                              queues_size=(10, 10*args.batch_size))
        data_batch = reader.dequeue(args.batch_size)

    # Create model.
    net = WaveNetModel(
        batch_size=args.batch_size,
        dilations=model_params['dilations'],
        filter_width=model_params['filter_width'],
        residual_channels=model_params['residual_channels'],
        dilation_channels=model_params['dilation_channels'],
        skip_channels=model_params['skip_channels'],
        output_channels=model_params['output_channels'],
        use_biases=model_params['use_biases'],
        initial_filter_width=model_params['initial_filter_width'])

    input_data = tf.placeholder(dtype=tf.float32,
                                shape=(args.batch_size, None, 1))
    input_labels = tf.placeholder(dtype=tf.float32,
                                  shape=(args.batch_size, None,
                                         model_params['output_channels']))

    loss, probs = net.loss(input_data=input_data,
                           input_labels=input_labels,
                           pos_weight=train_params['pos_weight'],
                           l2_reg_str=train_params['l2_reg_str'])
    optimizer = optimizer_factory[args.optimizer](
                    learning_rate=train_params['learning_rate'],
                    momentum=train_params['momentum'])
    trainable = tf.trainable_variables()
    optim = optimizer.minimize(loss, var_list=trainable)

    # Set up logging for TensorBoard.
    writer = tf.summary.FileWriter(logdir)
    writer.add_graph(tf.get_default_graph())
    run_metadata = tf.RunMetadata()
    summaries = tf.summary.merge_all()
    histograms = tf.summary.merge_all(key=HKEY)

    # Separate summary ops for validation, since they are
    # calculated only once per evaluation cycle.
    with tf.name_scope('validation_summaries'):

        metric_summaries = metrics_empty_dict()
        metric_value = tf.placeholder(tf.float32)
        for name in metric_summaries.keys():
            metric_summaries[name] = tf.summary.scalar(name, metric_value)

        images_buffer = tf.placeholder(tf.string)
        images_batch = tf.stack(
            [tf.image.decode_png(images_buffer[0], channels=4),
             tf.image.decode_png(images_buffer[1], channels=4),
             tf.image.decode_png(images_buffer[2], channels=4)])
        images_summary = tf.summary.image('estim', images_batch)

        audio_data = tf.placeholder(tf.float32)
        audio_summary = tf.summary.audio('input', audio_data,
                                         model_params['audio_sr'])

    # Set up session
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
    init = tf.global_variables_initializer()
    sess.run(init)

    # Saver for storing checkpoints of the model.
    saver = tf.train.Saver(var_list=tf.trainable_variables(),
                           max_to_keep=args.max_checkpoints)

    # Trainer for keeping best validation-performing model
    # and optional early stopping.
    trainer = Trainer(sess, logdir, train_params['early_stop_limit'], 0.999)

    try:
        saved_global_step = load(saver, sess, restore_from)
        if is_overwritten_training or saved_global_step is None:
            # The first training step will be saved_global_step + 1,
            # therefore we put -1 here for new or overwritten trainings.
            saved_global_step = -1

    except:
        print('Something went wrong while restoring checkpoint. '
              'Training will be terminated to avoid accidentally '
              'overwriting the previous model.')
        raise

    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    reader.start_threads(sess)


    step = None
    last_saved_step = saved_global_step
    try:
        for step in range(saved_global_step + 1, train_params['num_steps']):
            waveform, pianoroll = sess.run([data_batch[0], data_batch[1]])
            feed_dict = {input_data : waveform, input_labels : pianoroll}
            # Reload switches from file on each step
            with open(RUNTIME_SWITCHES, 'r') as f:
                switch = json.load(f)

            start_time = time.time()
            if switch['store_meta'] and step % switch['store_every'] == 0:
                # Slow run that stores extra information for debugging.
                print('Storing metadata')
                run_options = tf.RunOptions(
                    trace_level=tf.RunOptions.FULL_TRACE)
                summary, loss_value, _ = sess.run(
                    [summaries, loss, optim],
                    feed_dict=feed_dict,
                    options=run_options,
                    run_metadata=run_metadata)
                writer.add_summary(summary, step)
                writer.add_run_metadata(run_metadata,
                                        'step_{:04d}'.format(step))
                tl = timeline.Timeline(run_metadata.step_stats)
                timeline_path = os.path.join(logdir, 'timeline.trace')
                with open(timeline_path, 'w') as f:
                    f.write(tl.generate_chrome_trace_format(show_memory=True))
            else:
                summary, loss_value, _ = sess.run([summaries, loss, optim],
                                                  feed_dict=feed_dict)
                writer.add_summary(summary, step)

            duration = time.time() - start_time
            print('step {:d} - loss = {:.3f}, ({:.3f} sec/step)'
                  .format(step, loss_value, duration))

            if step % switch['checkpoint_every'] == 0:
                save(saver, sess, logdir, step)
                last_saved_step = step

            # Evaluate model performance on validation data
            if step % switch['evaluate_every'] == 0:
                if switch['histograms']:
                    hist_summary = sess.run(histograms)
                    writer.add_summary(hist_summary, step)
                print('evaluating...')
                stats = 0, 0, 0, 0, 0, 0
                est = np.empty([0, model_params['output_channels']])
                ref = np.empty([0, model_params['output_channels']])

                b_data, b_labels, b_cntr = (
                    np.empty((0, args.sample_size + receptive_field - 1, 1)),
                    np.empty((0, model_params['output_channels'])),
                    args.batch_size)

                # if (batch_size * sample_size > valid_data) single_pass() again
                while est.size == 0: # and ref.size == 0 and sum(stats) == 0 ...

                    for data, labels in reader.single_pass(
                        sess, args.data_dir_valid):

                        # cumulate batch
                        if b_cntr > 1:
                            b_data, b_labels, decr = cumulateBatch(
                                data, labels, b_data, b_labels)
                            b_cntr -= decr
                            continue
                        elif args.batch_size > 1:
                            b_data, b_labels, decr = cumulateBatch(
                                data, labels, b_data, b_labels)
                            if not decr:
                                continue
                            data = b_data
                            labels = b_labels
                            # reset batch cumulation variables
                            b_data, b_labels, b_cntr = (
                                np.empty((
                                    0, args.sample_size + receptive_field - 1, 1
                                )),
                                np.empty((0, model_params['output_channels'])),
                                args.batch_size)

                        predictions = sess.run(
                            probs, feed_dict={input_data : data})
                        # Aggregate sums for metrics calculation
                        stats_chunk = calc_stats(
                            predictions, labels, args.threshold)
                        stats = tuple([sum(x) for x in zip(stats, stats_chunk)])
                        est = np.append(est, predictions, axis=0)
                        ref = np.append(ref, labels, axis=0)

                metrics = calc_metrics(None, None, None, stats=stats)
                write_metrics(metrics, metric_summaries, metric_value,
                              writer, step, sess)
                trainer.check(metrics['f1_measure'])

                # Render evaluation results
                if switch['log_image'] or switch['log_sound']:
                    sub_fac = int(model_params['audio_sr']/switch['midi_sr'])
                    est = roll_subsample(est.T, sub_fac)
                    ref = roll_subsample(ref.T, sub_fac)
                if switch['log_image']:
                    write_images(est, ref, switch['midi_sr'], args.threshold,
                                 (8, 6), images_summary, images_buffer,
                                 writer, step, sess)
                if switch['log_sound']:
                    write_audio(est, ref, switch['midi_sr'],
                                model_params['audio_sr'], 0.007,
                                audio_summary, audio_data,
                                writer, step, sess)

    except KeyboardInterrupt:
        # Introduce a line break after ^C is displayed so save message
        # is on its own line.
        print()
    finally:
        if step > last_saved_step:
            save(saver, sess, logdir, step)
        coord.request_stop()
        coord.join(threads)
        flush_n_close(writer, sess)
コード例 #20
0
ファイル: main.py プロジェクト: YuanshuaiHuang/dacl
def train(train_loader, model, criterion, optimizer, epoch, cfg):
    losses = {
        'softmax': AverageMeter(),
        'center': AverageMeter(),
        'total': AverageMeter()
    }
    accs = AverageMeter()
    y_pred, y_true, y_scores = [], [], []

    # switch to train mode
    model.train()

    with tqdm(total=int(len(train_loader.dataset) /
                        cfg['batch_size'])) as pbar:
        for i, (images, target) in enumerate(train_loader):

            images = images.to(device)
            target = target.to(device)

            # compute output
            feat, output, A = model(images)
            l_softmax = criterion['softmax'](output, target)
            l_center = criterion['center'](feat, A, target)
            l_total = l_softmax + cfg['lamb'] * l_center

            # measure accuracy and record loss
            acc, pred = accuracy(output, target)
            losses['softmax'].update(l_softmax.item(), images.size(0))
            losses['center'].update(l_center.item(), images.size(0))
            losses['total'].update(l_total.item(), images.size(0))
            accs.update(acc.item(), images.size(0))

            # collect for metrics
            y_pred.append(pred)
            y_true.append(target)
            y_scores.append(output.data)

            # compute grads + opt step
            optimizer['softmax'].zero_grad()
            optimizer['center'].zero_grad()
            l_total.backward()
            optimizer['softmax'].step()
            optimizer['center'].step()

            # progressbar
            pbar.set_description(f'TRAINING [{epoch:03d}/{cfg["epochs"]}]')
            pbar.set_postfix({
                'L': losses["total"].avg,
                'Ls': losses["softmax"].avg,
                'Lsc': losses["center"].avg,
                'acc': accs.avg
            })
            pbar.update(1)

    metrics = calc_metrics(y_pred, y_true, y_scores)
    progress = (f'[-] TRAIN [{epoch:03d}/{cfg["epochs"]}] | '
                f'L={losses["total"].avg:.4f} | '
                f'Ls={losses["softmax"].avg:.4f} | '
                f'Lsc={losses["center"].avg:.4f} | '
                f'acc={accs.avg:.4f} | '
                f'rec={metrics["rec"]:.4f} | '
                f'f1={metrics["f1"]:.4f} | '
                f'aucpr={metrics["aucpr"]:.4f} | '
                f'aucroc={metrics["aucroc"]:.4f}')
    print(progress)
    write_log(losses, accs.avg, metrics, epoch, tag='train')
コード例 #21
0
    def _run_MCMC(self, fixed, moving, var_params_q_v):
        data_loss, reg_loss = self.losses['data']['loss'], self.losses['reg'][
            'loss']

        fixed = {
            k: v.expand(self.no_chains, *v.shape[1:])
            for k, v in fixed.items()
        }
        moving = {
            k: v.expand(self.no_chains, *v.shape[1:])
            for k, v in moving.items()
        }
        self.__SGLD_init(var_params_q_v)

        no_samples = self.no_chains * self.no_samples_MCMC // self.log_period_MCMC
        samples = torch.zeros([no_samples, 3,
                               *self.dims])  # samples used in the evaluation
        test_sample_idx = 0

        self.logger.info(f'\nNO. CHAINS: {self.no_chains}, BURNING IN...')

        for sample_no in range(
                1, self.no_iters_burn_in + self.no_samples_MCMC + 1):
            if sample_no < self.no_iters_burn_in and sample_no % self.log_period_MCMC == 0:
                self.logger.info(
                    f'burn-in sample no. {sample_no}/{self.no_iters_burn_in}')
            """
            stochastic gradient Langevin dynamics
            """

            loss_terms, output, aux = self._SGLD_transition(
                fixed, moving, data_loss, reg_loss)

            if sample_no == self.no_iters_burn_in:
                self.logger.info('ENDED BURNING IN')
            """
            outputs
            """

            with torch.no_grad():
                self.writer.set_step(sample_no)

                if self.no_samples_MCMC < 1e4 or (
                        sample_no - 1
                ) % 100 == 0:  # NOTE (DG): the logs are too large otherwise
                    # model parameters
                    for idx in range(data_loss.no_components):
                        self.metrics.update(f'MCMC/GMM/scale_{idx}',
                                            data_loss.scales[idx].item())
                        self.metrics.update(f'MCMC/GMM/proportion_{idx}',
                                            data_loss.proportions[idx].item())

                    if reg_loss.learnable:
                        if reg_loss.__class__.__name__ == 'RegLoss_LogNormal':
                            self.metrics.update('MCMC/reg/loc',
                                                reg_loss.loc.item())
                            self.metrics.update('MCMC/reg/scale',
                                                reg_loss.scale.item())
                        elif reg_loss.__class__.__name__ == 'RegLoss_L2':
                            self.metrics.update(
                                'MCMC/reg/w_reg',
                                reg_loss.log_w_reg.exp().item())

                    # losses
                    total_loss = sum(loss_terms['data']) + sum(
                        loss_terms['reg'])
                    self.metrics.update(f'MCMC/avg_loss',
                                        total_loss.item() / self.no_chains)

                    for idx in range(self.no_chains):
                        self.metrics.update(f'MCMC/chain_{idx}/data_term',
                                            loss_terms['data'][idx].item())
                        self.metrics.update(f'MCMC/chain_{idx}/reg_term',
                                            loss_terms['reg'][idx].item())
                        self.metrics.update(f'MCMC/chain_{idx}/VD/alpha',
                                            aux['alpha'][idx].item())
                        self.metrics.update(f'MCMC/chain_{idx}/reg/energy',
                                            aux['reg_energy'][idx].item())

                if sample_no > self.no_iters_burn_in:
                    if sample_no % self.log_period_MCMC == 0 or sample_no == self.no_samples_MCMC:
                        self.writer.set_step(sample_no - self.no_iters_burn_in)

                        displacement, transformation = output[
                            'displacement'], output['transformation']
                        im_moving_warped = output['im_moving_warped']
                        seg_moving_warped = self.registration_module(
                            moving['seg'], transformation)

                        ASD, DSC = calc_metrics(fixed['seg'],
                                                seg_moving_warped,
                                                self.structures_dict,
                                                self.im_spacing,
                                                no_samples=self.no_chains)
                        no_non_diffeomorphic_voxels, log_det_J = calc_no_non_diffeomorphic_voxels(
                            transformation, self.diff_op)

                        v_curr_state_smoothed = output['curr_state']
                        v_norm, displacement_norm = calc_norm(
                            v_curr_state_smoothed), calc_norm(displacement)

                        for idx in range(self.no_chains):
                            samples[test_sample_idx] = displacement[
                                idx].detach().cpu()
                            test_sample_idx += 1

                            for structure_idx, structure in enumerate(
                                    self.structures_dict):
                                ASD_val, DSC_val = ASD[idx][
                                    structure_idx], DSC[idx][structure_idx]
                                self.metrics.update(
                                    f'MCMC/chain_{idx}/ASD/{structure}',
                                    ASD_val)
                                self.metrics.update(
                                    f'MCMC/chain_{idx}/DSC/{structure}',
                                    DSC_val)

                            no_non_diffeomorphic_voxels_chain = no_non_diffeomorphic_voxels[
                                idx]
                            self.metrics.update(
                                f'MCMC/chain_{idx}/no_non_diffeomorphic_voxels',
                                no_non_diffeomorphic_voxels_chain)

                            if no_non_diffeomorphic_voxels_chain > 0.001 * self.no_voxels:
                                self.logger.info(
                                    f'chain {idx}, sample {sample_no}: '
                                    f'detected {no_non_diffeomorphic_voxels} voxels where '
                                    f'the sampled transformation is not diffeomorphic; exiting..'
                                )
                                exit()

                            residuals = aux['residuals'][idx]

                            # tensorboard
                            log_sample(self.writer, idx, im_moving_warped,
                                       v_norm, displacement_norm, log_det_J)
                            log_hist_res(self.writer,
                                         residuals,
                                         data_loss,
                                         model='MCMC',
                                         chain_no=idx)

                            # .nii.gz/.vtk
                            save_sample(self.save_dirs,
                                        self.im_spacing,
                                        sample_no,
                                        im_moving_warped,
                                        displacement,
                                        log_det_J,
                                        'MCMC',
                                        chain_no=idx)

        self.logger.info(
            '\ncalculating sample standard deviation of the displacement..')

        mean, std_dev = calc_posterior_statistics(samples)
        log_displacement_mean_and_std_dev(self.writer, mean, std_dev, 'MCMC')
        save_displacement_mean_and_std_dev(self.logger, self.save_dirs,
                                           self.im_spacing, mean, std_dev,
                                           moving['mask'], 'MCMC')
        """
        speed
        """

        no_samples_MCMC_speed_test = 100
        start = datetime.now()

        for sample_no in range(1, no_samples_MCMC_speed_test + 1):
            _, output, _ = self._SGLD_transition(fixed, moving, data_loss,
                                                 reg_loss)
            seg_moving_warped = self.registration_module(
                moving['seg'], output['transformation'])

        stop = datetime.now()
        MCMC_sampling_speed = self.no_chains * no_samples_MCMC_speed_test / (
            stop - start).total_seconds()
        self.logger.info(
            f'\nMCMC sampling speed: {MCMC_sampling_speed:.2f} samples/sec')
コード例 #22
0
    clf = RandomForestClassifier(n_estimators=200,
                                 max_depth=100,
                                 random_state=SEED)

    pipeline = Pipeline([('feature_extractor', combined), ('classifier', clf)])

    bench = EstimatorPerformance(pipeline)

    # training
    bench.fit(x_train, y_train)

    # predict
    predict = bench.predict(x_test)

    # calc metrics
    utils.calc_metrics(y_test, predict)

    # test for competition
    utils.test(pipeline)

    # importance
    # importanceの値が高い順に特徴量をソートする
    sorted_features_list = [
        f for (idx, f) in sorted(zip(np.argsort(clf.feature_importances_),
                                     features_list),
                                 reverse=True)
    ]
    feature_importance = pd.DataFrame({
        "feature":
        sorted_features_list,
        "importance":
コード例 #23
0
def find_best_algo(
    train_data: pd.DataFrame,
    target: Union[np.array, pd.Series],
    alg_class,
    cross_val,
    params: List[Mapping],
    metric: str,
    random_state: int,
    early_stopping: int,
) -> Tuple[float, dict, list]:
    """"Find best algorithm

    Args:
        train_data: training data
        target: target values
        alg_class: algorith class with fit and early_stopping
        cross_val: sklearn cross validation class object
        params: list of parameters dicts
        metric: optimization metric name
        random_state: random seed for algorithm
        early_stopping: number of early stopping rounds

    Return:
        best_score (float): best metric score
        best_params (dict): best algorithm parameters
        best_alg_list (list): list of trained on validation splits algorithms
    """
    best_params = None
    best_score = None
    best_alg_list = None

    for param in tqdm(params):
        real_values = []
        predictions = []
        alg_list = []
        param["random_state"] = random_state

        for train_index, test_index in cross_val.split(train_data):
            alg = alg_class(**param)

            X_train, X_test = train_data.iloc[train_index], train_data.iloc[
                test_index]
            y_train, y_test = (
                target.iloc[train_index].values,
                target.iloc[test_index].values,
            )

            if param.get("boosting_type", None) != "dart" and param.get(
                    "boosting_type", None):
                alg.fit(
                    X_train,
                    y_train,
                    early_stopping_rounds=early_stopping,
                    eval_set=[(X_test, y_test)],
                    verbose=False,
                )
            else:
                alg.fit(X_train, y_train)

            alg_list.append(alg)
            predictions.append(alg.predict(X_test))
            real_values.append(y_test)

        metrics_df = calc_metrics(
            real_values, predictions,
            [mean_absolute_error, mean_squared_error, rmse])

        if best_score is None or metrics_df[metric].mean() < best_score:
            print("new best score {}+-{:.2f}".format(metrics_df[metric].mean(),
                                                     metrics_df[metric].std()))
            best_score = metrics_df[metric].mean()
            best_params = param
            best_alg_list = alg_list

    return best_score, best_params, best_alg_list
コード例 #24
0
ファイル: test.py プロジェクト: lmartak/amt-wavenet
def main():
    args = get_arguments()

    if (args.logdir is not None and os.path.isdir(args.logdir)):
        logdir = args.logdir
    else:
        print('Argument --logdir=\'{}\' is not (but should be) '
              'a path to valid directory.'.format(args.logdir))
        return

    with open(args.model_params, 'r') as f:
        model_params = json.load(f)
    with open(RUNTIME_SWITCHES, 'r') as f:
        switch = json.load(f)

    receptive_field = WaveNetModel.calculate_receptive_field(
        model_params['filter_width'],
        model_params['dilations'],
        model_params['initial_filter_width'])

    # Create coordinator.
    coord = tf.train.Coordinator()

    # Create data loader.
    with tf.name_scope('create_inputs'):
        reader = WavMidReader(data_dir=args.data_dir_test,
                              coord=coord,
                              audio_sample_rate=model_params['audio_sr'],
                              receptive_field=receptive_field,
                              velocity=args.velocity,
                              sample_size=args.sample_size,
                              queues_size=(100, 100*BATCH_SIZE))

    # Create model.
    net = WaveNetModel(
        batch_size=BATCH_SIZE,
        dilations=model_params['dilations'],
        filter_width=model_params['filter_width'],
        residual_channels=model_params['residual_channels'],
        dilation_channels=model_params['dilation_channels'],
        skip_channels=model_params['skip_channels'],
        output_channels=model_params['output_channels'],
        use_biases=model_params['use_biases'],
        initial_filter_width=model_params['initial_filter_width'])

    input_data = tf.placeholder(dtype=tf.float32,
                                shape=(BATCH_SIZE, None, 1))
    input_labels = tf.placeholder(dtype=tf.float32,
                                  shape=(BATCH_SIZE, None,
                                         model_params['output_channels']))

    _, probs = net.loss(input_data=input_data,
                        input_labels=input_labels,
                        pos_weight=1.0,
                        l2_reg_str=None)

    # Set up session
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
    init = tf.global_variables_initializer()
    sess.run(init)

    # Saver for storing checkpoints of the model.
    saver = tf.train.Saver(var_list=tf.trainable_variables())

    try:
        load(saver, sess, logdir)

    except:
        print('Something went wrong while restoring checkpoint.')
        raise

    try:
        stats = 0, 0, 0, 0, 0, 0
        est = np.empty([model_params['output_channels'], 0])
        ref = np.empty([model_params['output_channels'], 0])
        sub_fac = int(model_params['audio_sr']/switch['midi_sr'])
        for data, labels in reader.single_pass(sess,
                                               args.data_dir_test):

            predictions = sess.run(probs, feed_dict={input_data : data})
            # Aggregate sums for metrics calculation
            stats_chunk = calc_stats(predictions, labels, args.threshold)
            stats = tuple([sum(x) for x in zip(stats, stats_chunk)])
            est = np.append(est, roll_subsample(predictions.T, sub_fac), axis=1)
            ref = np.append(ref, roll_subsample(labels.T, sub_fac, b=True),
                            axis=1)

        metrics = calc_metrics(None, None, None, stats=stats)
        write_metrics(metrics, None, None, None, None, None, logdir=logdir)

        # Save subsampled data for further arbitrary evaluation
        np.save(logdir+'/est.npy', est)
        np.save(logdir+'/ref.npy', ref)

        # Render evaluation results
        figsize=(int(args.plot_scale*est.shape[1]/switch['midi_sr']),
                 int(args.plot_scale*model_params['output_channels']/12))
        if args.media:
            write_images(est, ref, switch['midi_sr'],
                         args.threshold, figsize,
                         None, None, None, 0, None,
                         noterange=(21, 109),
                         legend=args.plot_legend,
                         logdir=logdir)
            write_audio(est, ref, switch['midi_sr'],
                        model_params['audio_sr'], 0.007,
                        None, None, None, 0, None, logdir=logdir)

    except KeyboardInterrupt:
        # Introduce a line break after ^C is displayed so save message
        # is on its own line.
        print()
    finally:
        coord.request_stop()
コード例 #25
0
def trainGAN():
    req_data = request.get_json()
    size_viz = req_data['visualization_size']
    nb_batches = req_data['nb_batches']
    train_more_Discriminator = req_data['train_more_Discriminator']
    train_more_Generator = req_data['train_more_Generator']
    unrolling_step = req_data['unrolling_step']
    nb_batches = nb_batches
    necessary_elements['flip'] = necessary_elements['flip'] if (necessary_elements['flip']) else False
    necessary_elements['smooth'] = necessary_elements['smooth'] if (necessary_elements['smooth']) else False
    D_Loss_real_min = []
    D_Loss_real_mean = []
    D_Loss_real_max = []
    D_Loss_fake_min = []
    D_Loss_fake_mean = []
    D_Loss_fake_max = []
    fake_generated = []
    track_convergence_DS = []
    Precision = []
    Recall = []
    F1_score = []
    #Inception_Score = []
    generated_bytes = None
    real_bytes = None
    necessary_elements['discriminator'].to(torch.device(necessary_elements['deviceDiscriminator']))
    necessary_elements['generator'].to(torch.device(necessary_elements['deviceGenerator']))
    necessary_elements['training'] = True
    best_KL = 30
    worst_KL = 0
    start_time = time.time()
    for i in range(nb_batches):
        if (unrolling_step != 0):
            unrollingIterator = iter(necessary_elements['dataloader'])
            for tmpIndex in range(unrolling_step):
                unrollingIterItem = next(unrollingIterator)
                if(len(unrollingIterItem) == 1):
                    unrolling_real_batch_element = unrollingIterItem[0]
                else: unrolling_real_batch_element,_ = unrollingIterItem
                if (necessary_elements['model_discriminator'] != 'DCGAN'):
                    unrolling_real_batch_element = unrolling_real_batch_element.view(unrolling_real_batch_element.shape[0], necessary_elements['channels']*necessary_elements['img_size']*necessary_elements['img_size'])
                else: unrolling_real_batch_element = unrolling_real_batch_element.view(unrolling_real_batch_element.shape[0], necessary_elements['channels'], necessary_elements['img_size'], necessary_elements['img_size'])
                unrolling_real_batch.append(unrolling_real_batch_element)

        if (necessary_elements['index_batch'] == 0) or (necessary_elements['index_batch'] == len(necessary_elements['dataloader'])-1):
            necessary_elements['index_batch'] = 0
            necessary_elements['loaderIteraor'] = iter(necessary_elements['dataloader'])
            necessary_elements['epoch_number'] = necessary_elements['epoch_number'] + 1
        iterItem = next(necessary_elements['loaderIteraor'])
        necessary_elements['index_batch'] = necessary_elements['index_batch'] + 1
        if(len(iterItem) == 1):
            real_batch = iterItem[0]
        else: real_batch,_ = iterItem
        if (necessary_elements['apply_occasional_flip'] and necessary_elements['index_batch'] % necessary_elements['occasional_flip'] == 0):
            necessary_elements['flip'] = True
        # train Discriminator:
        fake_batch = necessary_elements['generator'](necessary_elements['latentvector']).detach()
        real_batch, fake_batch = reshape(necessary_elements['model_discriminator'],real_batch, fake_batch,
                                        necessary_elements['batch_size'], necessary_elements['img_size'], necessary_elements['channels'])
        real_batch = real_batch.to(necessary_elements['deviceDiscriminator'])
        fake_batch = fake_batch.to(necessary_elements['deviceDiscriminator'])
        d_error, d_real, d_fake, gradient_penalty = train_discriminator(necessary_elements['optimizerD'], real_batch,
                                                    fake_batch, necessary_elements['discriminator'], 
                                                    necessary_elements['discriminator_loss_function'],
                                                    necessary_elements['deviceDiscriminator'],
                                                    necessary_elements['flip'], necessary_elements['smooth'],
                                                    necessary_elements['symmetric_labels'],
                                                    necessary_elements['apply_gp'], necessary_elements['lambda_gp'],
                                                    LVmodel_generator['model_generator'], necessary_elements['clip_d'],
                                                    necessary_elements['apply_clip_d'], necessary_elements['apply_divide_d_cost'])
        D_Loss.append(d_error.item())
        d_real_squeezed = torch.squeeze(d_real)
        d_fake_squeezed = torch.squeeze(d_fake)
        d_real_squeezed = d_real_squeezed.data.cpu()
        d_fake_squeezed = d_fake_squeezed.data.cpu()
        D_Loss_real_mean.append(torch.mean(d_real_squeezed).item())
        D_Loss_real_min.append(torch.min(d_real_squeezed).item())
        D_Loss_real_max.append(torch.max(d_real_squeezed).item())
        D_Loss_fake_mean.append(torch.mean(d_fake_squeezed).item())
        D_Loss_fake_min.append(torch.min(d_fake_squeezed).item())
        D_Loss_fake_max.append(torch.max(d_fake_squeezed).item())
        calc_metrics(d_real_squeezed, d_fake_squeezed, Precision, Recall, F1_score)
        #when we want to train D more:
        if (train_more_Discriminator > 0):
            for index in range(train_more_Discriminator):
                fake_batch = necessary_elements['generator'](necessary_elements['latentvector']).detach()
                _, fake_batch = reshape(necessary_elements['model_discriminator'],real_batch, fake_batch,
                                            necessary_elements['batch_size'], necessary_elements['img_size'], necessary_elements['channels'])
                fake_batch = fake_batch.to(necessary_elements['deviceDiscriminator'])
                d_error, d_real, d_fake, gradient_penalty = train_discriminator(necessary_elements['optimizerD'], real_batch,
                                                            fake_batch, necessary_elements['discriminator'], 
                                                            necessary_elements['discriminator_loss_function'],
                                                            necessary_elements['deviceDiscriminator'],
                                                            necessary_elements['flip'], necessary_elements['smooth'],
                                                            necessary_elements['symmetric_labels'],
                                                            necessary_elements['apply_gp'], necessary_elements['lambda_gp'],
                                                            LVmodel_generator['model_generator'], necessary_elements['clip_d'],
                                                            necessary_elements['apply_clip_d'], necessary_elements['apply_divide_d_cost'])
                D_Loss.append(d_error.item())
                d_real_squeezed = torch.squeeze(d_real)
                d_fake_squeezed = torch.squeeze(d_fake)
                d_real_squeezed = d_real_squeezed.data.cpu()
                d_fake_squeezed = d_fake_squeezed.data.cpu()
                D_Loss_real_mean.append(torch.mean(d_real_squeezed).item())
                D_Loss_real_min.append(torch.min(d_real_squeezed).item())
                D_Loss_real_max.append(torch.max(d_real_squeezed).item())
                D_Loss_fake_mean.append(torch.mean(d_fake_squeezed).item())
                D_Loss_fake_min.append(torch.min(d_fake_squeezed).item())
                D_Loss_fake_max.append(torch.max(d_fake_squeezed).item())                                           
        
        #train Generator:
        fake_generated = necessary_elements['generator'](necessary_elements['latentvector'])
        _, fake_generated = reshape(necessary_elements['model_discriminator'],real_batch, fake_generated,
                                        necessary_elements['batch_size'], necessary_elements['img_size'], necessary_elements['channels'])
        g_error = train_generator(necessary_elements['optimizerG'], fake_generated, 
                                necessary_elements['discriminator'], necessary_elements['generator_loss_function'], 
                                necessary_elements['deviceGenerator'], necessary_elements['deviceDiscriminator'],
                                necessary_elements['flip'], necessary_elements['smooth'],
                                necessary_elements['symmetric_labels'],
                                unrolling_step, necessary_elements['optimizerD'],
                                unrolling_real_batch, necessary_elements['discriminator_loss_function'],
                                real_batch, necessary_elements['apply_feature_matching'],
                                necessary_elements['batch_size'])
        G_Loss.append(g_error.item())
        kl_div_item = (F.kl_div(fake_generated, real_batch)).item()
        KL_div.append(abs(kl_div_item))
        if (abs(kl_div_item) < best_KL):
            best_KL = abs(kl_div_item)
            best_generated = fake_generated
        if (abs(kl_div_item) > worst_KL):
            worst_KL = abs(kl_div_item)
            worst_generated = fake_generated
        tmpJS = 0.5 * (real_batch + fake_generated)
        JS_div.append(abs((0.5*(F.kl_div(real_batch, tmpJS) + F.kl_div(fake_generated, tmpJS))).item()))

        #when we want to train G more:
        if (train_more_Generator > 0):
            for index in range(train_more_Generator):
                fake_generated = necessary_elements['generator'](necessary_elements['latentvector'])
                _, fake_batch = reshape(necessary_elements['model_discriminator'],real_batch, fake_batch,
                                            necessary_elements['batch_size'], necessary_elements['img_size'], necessary_elements['channels'])
                g_error = train_generator(necessary_elements['optimizerG'], fake_generated, 
                                        necessary_elements['discriminator'], necessary_elements['generator_loss_function'], 
                                        necessary_elements['deviceGenerator'], necessary_elements['deviceDiscriminator'],
                                        necessary_elements['flip'], necessary_elements['smooth'],
                                        necessary_elements['symmetric_labels'],
                                        unrolling_step, necessary_elements['optimizerD'],
                                        unrolling_real_batch, necessary_elements['discriminator_loss_function'],
                                        real_batch, necessary_elements['apply_feature_matching'],
                                        necessary_elements['batch_size'])
                G_Loss.append(g_error.item())
                KL_div.append(abs((F.kl_div(fake_generated, real_batch)).item()))
                tmpJS = 0.5 * (real_batch + fake_generated)
                JS_div.append(abs((0.5*(F.kl_div(real_batch, tmpJS) + F.kl_div(fake_generated, tmpJS))).item()))
        if (necessary_elements['apply_occasional_flip'] and necessary_elements['index_batch'] % necessary_elements['occasional_flip'] == 0):
            necessary_elements['flip'] = False
        track_convergence_DS.extend([(F.kl_div(real_batch, fake_generated)).item(),
                                    g_error.item(), d_error.item(),
                                    torch.min(d_real_squeezed).item(), torch.max(d_real_squeezed).item(),
                                    torch.min(d_fake_squeezed).item(), torch.max(d_fake_squeezed).item()])
    # loop done:
    result_elements['d_error'] = d_error.tolist()
    result_elements['d_real'] = d_real.tolist()
    result_elements['d_fake'] = d_fake.tolist()
    # 2D data:
    best_js_sample = 0
    fake_sample = worst_generated[0]
    real_sample = real_batch[0]
    for real_sample_item in real_batch :
        tmpJS_sample = 0.5 * (real_sample_item + fake_sample)
        js_div_item_sample = (0.5*(F.kl_div(real_sample_item, tmpJS_sample) + F.kl_div(fake_sample, tmpJS_sample))).item()
        if (js_div_item_sample > best_js_sample):
            best_js_sample = js_div_item_sample
            real_sample = real_sample_item

    real_sample, fake_sample = rgb_to_gray(real_sample, fake_sample, necessary_elements['img_size'], necessary_elements['channels'])

    result_elements['pca_real2D'] = pca_real2D.fit_transform(real_sample.data.cpu())
    result_elements['pca_generated2D'] = pca_generated2D.fit_transform(fake_sample.data.cpu())
    # 3D data:
    result_elements['pca_real3D'] = pca_real3D.fit_transform(real_sample.data.cpu())
    result_elements['pca_generated3D'] = pca_generated3D.fit_transform(fake_sample.data.cpu())
    
    result_elements['g_error'] = g_error.tolist()
    tmpFakeGenerated = best_generated[:size_viz * size_viz]
    tmpWorstGenerated = worst_generated[:size_viz * size_viz]
    tmpFakeGenerated = vector_to_image(tmpFakeGenerated, size_viz * size_viz).data.cpu()
    tmpWorstGenerated = vector_to_image(tmpWorstGenerated, size_viz * size_viz).data.cpu()
    if type(tmpFakeGenerated) == np.ndarray:
        tmpFakeGenerated = torch.from_numpy(tmpFakeGenerated)
    if type(tmpWorstGenerated) == np.ndarray:
        tmpWorstGenerated = torch.from_numpy(tmpWorstGenerated)
    gridGenerated = vutils.make_grid(tmpFakeGenerated, nrow=size_viz, normalize=True, scale_each=True)
    gridWorstGenerated = vutils.make_grid(tmpWorstGenerated, nrow=size_viz, normalize=True, scale_each=True)
    tmpRealImages = real_batch[:size_viz * size_viz]
    tmpRealImages = vector_to_image(tmpRealImages, size_viz * size_viz).data.cpu()
    if type(tmpRealImages) == np.ndarray:
        tmpRealImages = torch.from_numpy(tmpRealImages)
    gridReal = vutils.make_grid(tmpRealImages, nrow=size_viz, normalize=True, scale_each=True)  
    tmpFakeGenerated = np.transpose(gridGenerated.cpu(), (1, 2, 0))
    strIO = BytesIO()
    imsave(strIO, tmpFakeGenerated, plugin='pil', format_str='png')
    strIO.seek(0)
    generated_bytes = base64.b64encode(strIO.getvalue())

    tmpWorstGenerated = np.transpose(gridWorstGenerated.cpu(), (1, 2, 0))
    strIO = BytesIO()
    imsave(strIO, tmpWorstGenerated, plugin='pil', format_str='png')
    strIO.seek(0)
    worst_generated_bytes = base64.b64encode(strIO.getvalue())

    tmpRealImages = np.transpose(gridReal.cpu(), (1, 2, 0))
    strIO = BytesIO()
    imsave(strIO, tmpRealImages, plugin='pil', format_str='png')
    strIO.seek(0)
    real_bytes = base64.b64encode(strIO.getvalue())
    end_time = time.time()
    elapsed_time = end_time - start_time

    if (result_elements != None):
        return jsonify(
            d_error= D_Loss[(len(D_Loss) - 40):],
            g_error= G_Loss[(len(G_Loss) - 40):],
            size_generated_images= fake_generated[:size_viz * size_viz].shape,
            d_Loss_real_min= D_Loss_real_min,
            d_Loss_real_mean= D_Loss_real_mean,
            d_Loss_real_max= D_Loss_real_max,
            d_Loss_fake_min= D_Loss_fake_min,
            d_Loss_fake_mean= D_Loss_fake_mean,
            d_Loss_fake_max= D_Loss_fake_max,
            generated_bytes= generated_bytes.decode('ascii'),
            worst_generated_bytes= worst_generated_bytes.decode('ascii'),
            real_bytes= real_bytes.decode('ascii'),
            real_2d= [result_elements['pca_real2D'][:, 0].tolist() , result_elements['pca_real2D'][:, 1].tolist()],
            fake_2d= [result_elements['pca_generated2D'][:, 0].tolist() , result_elements['pca_generated2D'][:, 1].tolist() ],
            real_3d= [result_elements['pca_real3D'][:, 0].tolist() , result_elements['pca_real3D'][:, 1].tolist(), result_elements['pca_real3D'][:, 2].tolist()],
            fake_3d= [result_elements['pca_generated3D'][:, 0].tolist() , result_elements['pca_generated3D'][:, 1].tolist(), result_elements['pca_generated3D'][:, 2].tolist()],
            kl_div= KL_div[(len(KL_div) - 40):],
            js_div= JS_div[(len(JS_div) - 40):],
            precision= Precision,
            recall= Recall,
            f1_score= F1_score,
            training= necessary_elements['training'],
            track_convergence_DS= track_convergence_DS,
            elapsed_time= elapsed_time,
            index_batch= necessary_elements['index_batch'],
            epoch_number= necessary_elements['epoch_number'],
            status= 200,
            mimetype='application/json'
        )
    else:
        return app.response_class(
                response = json.dumps(result_elements),
                status = 400,
                mimetype='application/json'
                    )