Exemple #1
0
def test_epoch(epoch, experiment):
    testloaders, testsets = experiment.create_test_dataloaders()
    use_cuda = experiment.use_cuda
    net = experiment.net
    summaries = experiment.summaries
    criterion = experiment.criterion

    net.eval()
    utils.set_random_seeds(1234)

    with torch.no_grad():
        for i, (testloader, testname) in enumerate(testloaders):
            stats = get_stats()
            print("Testing on {}".format(testname))
            for batch_idx, input_set in enumerate(testloader):
                experiment.step = epoch * len(experiment.trainloader) + int(
                    batch_idx / len(testloader) * len(experiment.trainloader))
                experiment.iter = batch_idx
                torch.cuda.empty_cache()
                inputs, targets = input_set
                if use_cuda:
                    inputs = inputs.cuda()
                    targets = targets.cuda()
                # inputs, targets = experiment.data_preprocessing(inputs)
                # inputs, targets = Variable(inputs, requires_grad=False), Variable(targets, requires_grad=False)
                pred = torch.clamp(net(inputs), 0.0, 1.0)
                batch_loss = criterion(pred, targets)
                loss = batch_loss.mean()
                stats["loss"].update(loss.data)
                psnr_iter = metrics.psnr(pred, targets, maxval=1).mean().data
                ssim_iter = metrics.ssim(pred, targets)

                stats["psnr"].update(psnr_iter, pred.size(0))
                stats["ssim"].update(ssim_iter.data, pred.size(0))

                progress_bar(
                    batch_idx, len(testloader),
                    'Loss: %.5f | PSNR: %.2f | SSIM: %.3f' %
                    (stats["loss"].avg, stats["psnr"].avg, stats["ssim"].avg))

                # save predicted image
                learned_img = Image.fromarray(
                    (255 * pred[0, 0].cpu().data.numpy()).astype(np.uint8))
                filename = os.path.join(
                    './n3net-results', testsets[0][i].at(batch_idx).split(
                        '/home/pacole2/Projects/datasets/DeepLesionTestPreprocessed/miniStudies/'
                    )[1])
                directory = os.path.dirname(filename)
                if not os.path.exists(directory):
                    os.makedirs(directory)
                learned_img.save(os.path.join(filename))

                del pred, inputs, targets

            add_summary(experiment, summaries, testname + "/epoch", epoch)
            for k, stat in stats.items():
                add_summary(experiment, summaries, testname + "/" + k,
                            stat.avg)
Exemple #2
0
def train_mixup(nbEpochs=1):
    best_test_acc = 0
    train_size = len(training_generator)
    train_size = len(training_generator.dataset)
    test_size = len(eval_generator)
    test_size = len(eval_generator.dataset)
    for epoch in range(nbEpochs):
        train_loss = 0.0
        train_acc = 0.0
        cnn.train()
        for index_batch, (inputs, labels) in enumerate(training_generator):
            inputs, labels = inputs.to(device), labels.long().to(device)
            inputs, lbl_a, lbl_b, lam = mixup_data(inputs, labels, alpha)
            #?? from torch.autograd import Variable
            #inputs, lbl_a, lbl_b = map(Variable, (inputs, lbl_a, lbl_b))

            optimizer.zero_grad()
            outputs = cnn(inputs)
            #loss = criterion(outputs, labels)
            loss = mixup_criterion(criterion, outputs, lbl_a, lbl_b, lam)
            _, predicted = torch.max(outputs.data, 1)
            #train_acc += (predicted == labels).sum().item()
            train_acc += (lam * (predicted == lbl_a).sum().item() + (1 - lam) *
                          (predicted == lbl_b).sum().item())
            loss.backward()
            optimizer.step()
            train_loss += loss.item()
            progress_bar(index_batch, len(training_generator))
        train_loss /= train_size
        train_acc /= train_size
        print("Train at Epoch:", epoch, " loss:", train_loss, " accuracy:",
              100.0 * train_acc)

        test_loss = 0.0
        test_acc = 0.0
        cnn.eval()
        with torch.no_grad():
            for index_batch, (inputs, labels) in enumerate(eval_generator):
                inputs, labels = inputs.to(device), labels.long().to(device)
                outputs = cnn(inputs)
                loss = criterion(outputs, labels)
                _, predicted = torch.max(outputs.data, 1)
                test_acc += (predicted == labels).sum().item()
                test_loss += loss.item()
                progress_bar(index_batch, len(eval_generator))
        test_loss /= test_size
        test_acc /= test_size
        print("Test at Epoch:", epoch, " loss:", test_loss, " accuracy:",
              100.0 * test_acc)

        lr = linear_adjust_learning_rate(epoch)
        save_result.append(
            [epoch, lr, train_loss, train_acc, test_loss, test_acc])

        if epoch > 30 and test_acc > best_test_acc:
            best_test_acc = test_acc
            #Add save model
            save_model("Dorfer2", str(epoch))
def bootstrap_for_secondary(func2, block, samples, show_progressbar, *args):
  """Bootstrap for secondary observables.
  
  Every element of *arg is a list of two element of the form
  args[i]=[func_i, vec_i]
  and the final result is 
  func2(<func_0(vec_0)>, ..,<func_n(vec_n)>) 
  with blocksize "block" for blocking
  and "samples" resampling
  show_progressbar: if =1 show the progressbar
  """

  if not isinstance(block, int):
    print("ERROR: blocksize has to be an integer!")
    sys.exit(1)

  if block<1:
    print("ERROR: blocksize has to be positive!")
    sys.exit(1)


  secondary_samples=np.empty(samples, dtype=np.float)

  for sample in range(samples):
    if show_progressbar==1:
      pb.progress_bar(sample, samples)

    primary_samples=[]

    numblocks=int(len(args[0][1])/block)
    end =  block * numblocks

    resampling = np.random.randint(0,numblocks,size=numblocks) 

    for arg in args:
      func_l, vec_l = arg

      # cut vec_in to have a number of columns multiple of "block" and apply "func" 
      data=func_l(vec_l[:end])  

      #block
      block_sum_data=bs.blocksum(data, block)/float(block)

      #sample average
      tmp = np.average([block_sum_data[i] for i in resampling])  

      primary_samples.append(tmp)

    aux=func2(primary_samples)
    secondary_samples[sample]=aux

  ris=np.mean(secondary_samples)
  err=np.std(secondary_samples, ddof=1)

  return ris, err
Exemple #4
0
def train_epoch(model,
                criterion,
                optimizer,
                train_loader,
                device=torch.device('cuda'),
                dtype=torch.float,
                collector=None):
    model.train()
    train_loss = 0

    for batch_idx, batch_data in enumerate(train_loader):
        input, target, extra = batch_data['input'], batch_data[
            'target'], batch_data['extra']

        input = input.to(device, dtype)

        if isinstance(target, torch.Tensor):
            target = target.to(device, dtype)
        elif isinstance(target, dict):
            for k in target:
                if isinstance(target[k], torch.Tensor):
                    target[k] = target[k].to(device, dtype)

        #print('solver target[heatmap]: ', target['heatmap'].dtype)

        optimizer.zero_grad()
        output = model(input)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()

        if collector is None:
            train_loss += loss.item()
            progress_bar(batch_idx, len(train_loader),
                         'Loss: {0:.4e}'.format(train_loss / (batch_idx + 1)))
            #print('loss: {0: .4e}'.format(train_loss/(batch_idx+1)))
        else:
            model.eval()
            with torch.no_grad():
                extra['batch_idx'], extra['loader_len'], extra[
                    'batch_avg_loss'] = batch_idx, len(
                        train_loader), loss.item()
                collector({
                    'model': model,
                    'input': input,
                    'target': target,
                    'output': output,
                    'extra': extra
                })

            # Keep train mode
            model.train()
Exemple #5
0
def test_epoch(epoch, experiment):
    testloaders = experiment.create_test_dataloaders()
    use_cuda = experiment.use_cuda
    net = experiment.net
    summaries = experiment.summaries
    criterion = experiment.criterion

    net.eval()
    utils.set_random_seeds(1234)

    with torch.no_grad():
        for testloader, testname in testloaders:
            stats = get_stats()
            print("Testing on {}".format(testname))
            for batch_idx, inputs in enumerate(testloader):
                experiment.step = epoch * len(experiment.trainloader) + int(
                    batch_idx / len(testloader) * len(experiment.trainloader))
                experiment.iter = batch_idx
                torch.cuda.empty_cache()
                if use_cuda:
                    inputs = inputs.cuda()
                inputs, targets = experiment.data_preprocessing(inputs)

                # CLAMP values to [0,1] after adding noise
                inputs = torch.clamp(inputs, min=0, max=1)

                inputs, targets = Variable(
                    inputs, requires_grad=False), Variable(targets,
                                                           requires_grad=False)
                pred = net(inputs)
                batch_loss = criterion(pred, targets)
                loss = batch_loss.mean()
                stats["loss"].update(loss.data)
                psnr_iter = metrics.psnr(pred, targets, maxval=1).mean().data
                ssim_iter = metrics.ssim(pred, targets)

                stats["psnr"].update(psnr_iter, pred.size(0))
                stats["ssim"].update(ssim_iter.data, pred.size(0))

                progress_bar(
                    batch_idx, len(testloader),
                    'Loss: %.5f | PSNR: %.2f | SSIM: %.3f' %
                    (stats["loss"].avg, stats["psnr"].avg, stats["ssim"].avg))

                del pred, inputs, targets

            add_summary(experiment, summaries, testname + "/epoch", epoch)
            for k, stat in stats.items():
                add_summary(experiment, summaries, testname + "/" + k,
                            stat.avg)
Exemple #6
0
def train(nbEpochs=1):
    best_test_acc = 0
    train_size = len(training_generator.dataset)
    test_size = len(eval_generator.dataset)
    for epoch in range(nbEpochs):
        train_loss = 0.0
        train_acc = 0.0
        cnn.train()
        for index_batch, (inputs, labels) in enumerate(training_generator):
            inputs, labels = inputs.to(device), labels.long().to(device)
            optimizer.zero_grad()
            outputs = cnn(inputs)
            loss = criterion(outputs, labels)
            _, predicted = torch.max(outputs.data, 1)
            train_acc += (predicted == labels).sum().item()
            loss.backward()
            optimizer.step()
            train_loss += loss.item()
            progress_bar(index_batch, len(training_generator))
        train_loss /= train_size
        train_acc /= train_size
        print("Train at Epoch:", epoch, " loss:", train_loss, " accuracy:",
              100.0 * train_acc)

        test_loss = 0.0
        test_acc = 0.0
        cnn.eval()
        with torch.no_grad():
            for index_batch, (inputs, labels) in enumerate(eval_generator):
                inputs, labels = inputs.to(device), labels.long().to(device)
                outputs = cnn(inputs)
                loss = criterion(outputs, labels)
                _, predicted = torch.max(outputs.data, 1)
                test_acc += (predicted == labels).sum().item()
                test_loss += loss.item()
                progress_bar(index_batch, len(eval_generator))
        test_loss /= test_size
        test_acc /= test_size
        print("Test at Epoch:", epoch, " loss:", test_loss, " accuracy:",
              100.0 * test_acc)

        lr = linear_adjust_learning_rate(epoch)
        save_result.append(
            [epoch, lr, train_loss, train_acc, test_loss, test_acc])

        if epoch > 30 and test_acc > best_test_acc:
            best_test_acc = test_acc
Exemple #7
0
def val_epoch(model,
              criterion,
              val_loader,
              device=torch.device('cuda'),
              dtype=torch.float,
              collector=None):
    model.eval()
    val_loss = 0

    with torch.no_grad():
        for batch_idx, batch_data in enumerate(val_loader):
            input, target, extra = batch_data['input'], batch_data[
                'target'], batch_data['extra']

            input = input.to(device, dtype)

            if isinstance(target, torch.Tensor):
                target = target.to(device, dtype)
            elif isinstance(target, dict):
                for k in target:
                    if isinstance(target[k], torch.Tensor):
                        target[k] = target[k].to(device, dtype)

            output = model(input)
            loss = criterion(output, target)

            if collector is None:
                val_loss += loss.item()
                progress_bar(
                    batch_idx, len(val_loader),
                    'Loss: {0:.4e}'.format(val_loss / (batch_idx + 1)))
                #print('loss: {0: .4e}'.format(val_loss/(batch_idx+1)))
            else:
                extra['batch_idx'], extra['loader_len'], extra[
                    'batch_avg_loss'] = batch_idx, len(
                        val_loader), loss.item()
                collector({
                    'model': model,
                    'input': input,
                    'target': target,
                    'output': output,
                    'extra': extra
                })

                # Keep eval mode
                model.eval()
Exemple #8
0
async def refresh_applist(dryrun, skip, from_scratch=False, max_apps=None):
    local_applist = await steam.get_applist_from_local()
    if (skip):
        return local_applist

    async with web.Session(limit_per_host=20) as webSession:
        foreign_applist = await steam.get_applist_from_server(
            webSession, max_apps)
        styledprint.print_info('Apps in server:', len(foreign_applist))
        styledprint.print_info('Apps in cache at start:', len(local_applist))
        calname = 'refresh_applist'
        try:
            tasks = []
            for name in foreign_applist:
                for app in foreign_applist[name]:
                    if ((not from_scratch) and (name in local_applist)
                            and (app in local_applist[name])):
                        continue
                    appid, typ = app
                    link = steam.get_store_link(appid, typ)
                    f = asyncio.ensure_future(
                        steam.get_page(link, name, webSession))
                    f.add_done_callback(
                        functools.partial(poolsubmit, calname,
                                          get_newgame_info, name, appid, typ,
                                          tasks))
                    tasks.append(f)

            if (len(tasks)):
                styledprint.print_info('async tasks:')
                await asyncio.gather(progressbar.progress_bar(tasks))
        except Exception as e:
            styledprint.print_error(
                'Error happened while running the async loop:', e)
            styledprint.print_error(traceback.format_exc())
            pass
        fs = parallelism.wait_calname(calname)
        for f in fs:
            ext_applist = f.result()
            merge_applists(local_applist, ext_applist)
        styledprint.print_info(
            'Apps in cache at end (duplicate names not in the count):',
            len(local_applist))

        if (not dryrun):
            logging.debug('not dryrun so saving local_applist to disk')
            await steam.save_applist_to_local(local_applist)

    games = utils.DictCaseInsensitive()
    for game in local_applist:
        if (not game.lower().endswith('demo')):
            games[game] = local_applist[game]
    styledprint.print_info('Apps in cleaned cache:', len(games))
    return games
Exemple #9
0
def timer(seconds: int, prefix: str) -> None:
    """ Simplify usage of progress_bar since many of the options are going to be the same throughout """
    remaining = seconds

    while True:
        progress = remaining / seconds * 100

        progress_bar(
            progress=progress,
            length=30,
            complete=0,
            msg_complete="Requesting new data...",
            msg_prefix=prefix +
            f' ({time.strftime("%M:%S", time.gmtime(remaining))} remaining): ',
            suppress_nl=True,
        )
        time.sleep(1)

        remaining -= 1

        if remaining < 0:
            break
Exemple #10
0
def start_loop(subGames, options, cachedgames, steamgames, winedb,
               cleansteamgames, cleanwinedb, urlsmapping, cpuCount):
    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    webSession = web.Session(limit_per_host=((20 / cpuCount) + 1))
    tasks = []
    for name in subGames:
        game = subGames[name]
        tasks.append(
            asyncio.ensure_future(get_game_info(options, game, cachedgames,
                                                steamgames, winedb,
                                                cleansteamgames, cleanwinedb,
                                                name, urlsmapping, webSession),
                                  loop=loop))
    loop.run_until_complete(
        asyncio.gather(progressbar.progress_bar(tasks), loop=loop))
    loop.run_until_complete(webSession.close())
    return subGames
Exemple #11
0
async def get_ratings():
    URL = 'https://appdb.winehq.org/objectManager.php?sClass=application&sTitle=Browse+Applications&iappVersion-ratingOp0=5&sOrderBy=appName&bAscending=true&iItemsPerPage=200&sappVersion-ratingData0='
    ratings = utils.DictCaseInsensitive()

    async with web.Session(limit_per_host=200) as webSession:
        tasks = []
        for e in Rating:
            tasks.append(
                asyncio.ensure_future(get_forOneRating(URL, e.name,
                                                       webSession)))
        await asyncio.gather(progressbar.progress_bar(tasks))

    for task in tasks:
        apps = task.result()[0]
        rating = task.result()[1]
        for app in apps:
            if (app[0] in ratings):
                ratings[app[0]].append(WineApp(app[1], rating))
            else:
                ratings[app[0]] = [WineApp(app[1], rating)]

    return ratings
Exemple #12
0
    def fit(self, X):
        """Actual implementation of K-SVD algorithm.
        Args:
        - X: numpy 2d-array of dimensions :
          (len(signal) = D.shape[0], n_samples)
        TODO: add a stopping condition like an epsilon
        (and return corresponding number of iterations
        """

        #Check wether data is coherent
        if self.D.shape[0] != X.shape[0]:
            raise TypeError("Supplied X matrix is not "
            "coherent with dictionary dimensions: you "
            "should have same number of lines for "
            "both the dictionary and the input data ")

        #ProgressBar setup
        print "Training dictionary over {} iterations".format(self.n_iter)
        progress = progressbar.progress_bar(self.n_iter)

        #self.n_iter iterations
        for it in range(self.n_iter):

            #Step 1: Compute sparse representation of X
            #given current dictionary D
            gamma = orthogonal_mp(self.D, X, n_nonzero_coefs = self.K,
                            precompute = self.precompute)

            #Step 2: Adjust dictionary D and sparse
            #representation gamma at the same time
            #column by column
            for j in range(self.D.shape[1]):
                
                #Compute I = {indices of the signals in X
                #whose representations use jth column of D
                I = self.find_indices(gamma, j)
                
                #If one column is not used, it won't be until 
                #the algorithm actually stops, which is a shame
                #So, we use heuristics: we set teh values of the
                #column to the worst represented columns of
                #X matrix
                if I == []:
                    #find worst represented column in X
                    d = self.worst_represented(gamma, X)
                    #normalize
                    d = d/np.linalg.norm(d)
                    #set D column to d
                    self.D[:,j] = d
                    #jump to the next column optimization
                    continue

                #Set D_j to zero
                self.D[:,j] = np.zeros_like(self.D[:,j])

                #From now, we use a certain number of tricks
                #explained in [1] to accelerate the (therefore
                #approximate) K-SVD algorithm
                #TODO: try to understand better... -> maybe we could
                #solve the equations in the report ;)
                g = gamma[j,:][I].T
                d = X[:,I].dot(g) - self.D.dot(gamma[:,I].dot(g))
                if d.sum() != 0:
                    d = d/np.linalg.norm(d)
                g = (X[:,I].T).dot(d) - ((self.D.dot(gamma[:,I])).T).dot(d)

                #Store new values
                self.D[:,j] = d
                gamma[j,:][I] = g.T

            #Update progress bar
            progress.update(it)

        print('   Done!')
Exemple #13
0
def train_epoch(experiment):
    use_cuda = experiment.use_cuda
    net = experiment.net
    optimizer = experiment.optimizer
    summaries = experiment.summaries
    criterion = experiment.criterion
    epoch = experiment.epoch

    lr = experiment.base_lr * experiment.learning_rate_decay(epoch)
    for group in experiment.optimizer.param_groups:
        group['lr'] = lr
    print('\nEpoch: %d, Learning rate: %f, Expdir %s' %
          (epoch, lr, experiment.expname))

    net.train()

    stats = get_stats()

    trainloader = experiment.trainloader
    for batch_idx, inputs in enumerate(trainloader):
        experiment.epoch_frac = float(batch_idx) / len(trainloader)
        experiment.step = epoch * len(trainloader) + batch_idx
        experiment.iter = batch_idx
        '''
        B = inputs.shape[0]
        for b in range(B):
            max_v = torch.max(inputs[b, :, :, :])
            inputs[b, :,:,:] *= 1.0/max_v
        '''

        if use_cuda:
            inputs = inputs.cuda()
        optimizer.zero_grad()
        inputs, targets = experiment.data_preprocessing(inputs)

        #np.save('/mnt/Lab-Kellman/Share/temp/inputs.npy', inputs.cpu().detach().numpy())
        #np.save('/mnt/Lab-Kellman/Share/temp/targets.npy', targets.cpu().detach().numpy())

        inputs, targets = Variable(inputs, requires_grad=False), Variable(
            targets, requires_grad=False)

        pred = net(inputs)
        batch_loss = criterion(pred, targets)

        loss = batch_loss.mean()
        psnr_iter = metrics.psnr(pred, targets,
                                 maxval=torch.max(targets)).mean().data
        ssim_iter = metrics.ssim(pred, targets)

        loss_v = loss.data

        stats["loss"].update(loss.data, pred.size(0))
        stats["psnr"].update(psnr_iter, pred.size(0))
        stats["ssim"].update(ssim_iter.data, pred.size(0))

        loss.backward()
        del (loss)
        optimizer.step()

        if batch_idx % 10 == 0:
            experiment.writer.add_scalars('train/psnr', {
                'psnr': stats["psnr"].ema,
                'loss': stats["loss"].ema
            },
                                          epoch * len(trainloader) + batch_idx)

            progress_bar(
                batch_idx, len(trainloader),
                'Batch: %05d | Loss: %.5f | PSNR: %.2f | SSIM: %.3f' %
                (batch_idx, stats["loss"].ema, stats["psnr"].ema,
                 stats["ssim"].ema))

        if batch_idx % (len(trainloader) // 20) == 0:
            #progress_bar(batch_idx, len(trainloader),"")
            #print("Batch {:05d}, ".format(batch_idx), end='')
            #for k,stat in stats.items():
            #    print("{}: {:.4f}, ".format(stat.name, stat.avg), end='')
            #print("")

            dump_dir = '/mnt/Lab-Kellman/RawData/MachinLearning_Labelled_data/denoising/perf_training_record'
            fname = 'inputs_epoch_%d__batch_%d.npy' % (epoch, batch_idx)
            np.save(os.path.join(dump_dir, fname),
                    inputs.detach().cpu().numpy())
            fname = 'targets_epoch_%d__batch_%d.npy' % (epoch, batch_idx)
            np.save(os.path.join(dump_dir, fname),
                    targets.detach().cpu().numpy())
            fname = 'pred_epoch_%d__batch_%d.npy' % (epoch, batch_idx)
            np.save(os.path.join(dump_dir, fname), pred.detach().cpu().numpy())

    stop = (lr == 0)
    progress_bar(
        batch_idx, len(trainloader), 'Loss: %.5f | PSNR: %.2f | SSIM: %.3f' %
        (stats["loss"].avg, stats["psnr"].avg, stats["ssim"].avg))

    # test the network

    #add_summary(experiment, summaries, "train/epoch", epoch)
    #for k,stat in stats.items():
    #    add_summary(experiment, summaries, "train/" + k, stat.avg)
    print("")

    return stop
Exemple #14
0
def train_epoch(experiment):
    use_cuda = experiment.use_cuda
    net = experiment.net
    optimizer = experiment.optimizer
    summaries = experiment.summaries
    criterion = experiment.criterion
    epoch = experiment.epoch

    lr = experiment.base_lr * experiment.learning_rate_decay(epoch)
    for group in experiment.optimizer.param_groups:
        group['lr'] = lr
    print('\nEpoch: %d, Learning rate: %f, Expdir %s' %
          (epoch, lr, experiment.expname))

    net.train()

    stats = get_stats()

    trainloader = experiment.trainloader
    for batch_idx, inputs in enumerate(trainloader):
        experiment.epoch_frac = float(batch_idx) / len(trainloader)
        experiment.step = epoch * len(trainloader) + batch_idx
        experiment.iter = batch_idx
        if use_cuda:
            inputs = inputs.cuda()
        optimizer.zero_grad()
        inputs, targets = experiment.data_preprocessing_Blind_SFM(inputs)
        inputs, targets = Variable(inputs, requires_grad=False), Variable(
            targets, requires_grad=False)

        pred = net(inputs)
        batch_loss = criterion(pred, targets)

        loss = batch_loss.mean()
        psnr_iter = metrics.psnr(pred, targets, maxval=1).mean().data
        ssim_iter = metrics.ssim(pred, targets)

        stats["loss"].update(loss.data, pred.size(0))
        stats["psnr"].update(psnr_iter, pred.size(0))
        stats["ssim"].update(ssim_iter.data, pred.size(0))

        loss.backward()
        del (loss)
        optimizer.step()

        if batch_idx % (len(trainloader) // 10) == 0:
            progress_bar(batch_idx, len(trainloader), "")
            print("Batch {:05d}, ".format(batch_idx), end='')
            for k, stat in stats.items():
                print("{}: {:.4f}, ".format(stat.name, stat.avg), end='')
            print("")
        progress_bar(
            batch_idx, len(trainloader),
            'Loss: %.5f | PSNR: %.2f | SSIM: %.3f' %
            (stats["loss"].ema, stats["psnr"].ema, stats["ssim"].ema))

    stop = (lr == 0)
    progress_bar(
        batch_idx, len(trainloader), 'Loss: %.5f | PSNR: %.2f | SSIM: %.3f' %
        (stats["loss"].avg, stats["psnr"].avg, stats["ssim"].avg))

    add_summary(experiment, summaries, "train/epoch", epoch)
    for k, stat in stats.items():
        add_summary(experiment, summaries, "train/" + k, stat.avg)
    print("")

    return stop
Exemple #15
0
def sample(draws, step, start=None, trace=None, tune=None, progressbar=True, model=None, random_seed=None):
    """
    Draw a number of samples using the given step method.
    Multiple step methods supported via compound step method
    returns the amount of time taken.

    Parameters
    ----------

    draws : int
        The number of samples to draw
    step : function
        A step function
    start : dict
        Starting point in parameter space (or partial point)
        Defaults to trace.point(-1)) if there is a trace provided and
        model.test_point if not (defaults to empty dict)
    trace : NpTrace or list
        Either a trace of past values or a list of variables to track
        (defaults to None)
    tune : int
        Number of iterations to tune, if applicable (defaults to None)
    progressbar : bool
        Flag for progress bar
    model : Model (optional if in `with` context)

    """
    model = modelcontext(model)
    draws = int(draws)
    seed(random_seed)

    if start is None:
        start = {}

    if isinstance(trace, NpTrace) and len(trace) > 0:

        trace_point = trace.point(-1)
        trace_point.update(start)
        start = trace_point

    else:

        test_point = model.test_point.copy()
        test_point.update(start)
        start = test_point

        if not isinstance(trace, NpTrace):
            if trace is None:
                trace = model.named_vars
            try:
                trace = NpTrace(trace.values())
            except AttributeError:
                trace = NpTrace(list(trace))

    try:
        step = step_methods.CompoundStep(step)
    except TypeError:
        pass

    point = Point(start, model=model)

    progress = progress_bar(draws)

    for i in xrange(draws):
        if (i == tune):
            step = stop_tuning(step)
        point = step.step(point)
        trace = trace.record(point)
        if progressbar:
            progress.update(i)

    return trace
Exemple #16
0
def integrator_yerr(x, y, dy, xmin, xmax, samples, spline_order, \
   plot_spline=1, plot_band=1, plot_distribution=1, save_figs=0, show_progressbar=1):
    """
   Compute the integral of the data in the range [xmin, xmax] using a spline
   interpolation of order "spline_order" and using "samples" bootstrap samples to
   evaluate the errors.

   plot_spline: if =1 plot the optimal fit together with the data
   plot_band: if =1 plot the 1std band together with data
   plot_distribtion: if =1 plot the bootstrapped distributions of the parameters
   save_figs: if =1 save the figures in png insted of displaying them
   show_progressbar: if =1 show the progressbar

   return the estimated value of the integral, its error and 
   the bootstrap samples of the integral.
   """

    mask = ((x <= xmax) & (x >= xmin))
    x = x[mask]
    y = y[mask]
    dy = dy[mask]

    band_size = 1000

    data_length = len(x)

    # array to store the bootstrapped results
    boot_sample = np.empty(samples, dtype=np.float)

    if plot_band == 1:
        x_band = np.linspace(xmin, xmax, band_size)
        boot_band = np.empty((band_size, samples), dtype=np.float)

    for i in range(samples):
        if show_progressbar == 1:
            pb.progress_bar(i, samples)

        # bootstrap sample
        booty = y + np.random.normal(0, dy, data_length)

        ris = 1
        err = 1

        # spline interpolation
        s = interp.UnivariateSpline(x, booty, k=spline_order, s=0)

        # integrate the spline interpolation
        boot_sample[i] = s.integral(xmin, xmax)

        if plot_band == 1:
            boot_band[:, i] = s(x_band)

    # optimal parameters and errors
    ris = np.mean(boot_sample)
    err = np.std(boot_sample, ddof=1)

    if plot_spline == 1:
        x_aux = np.linspace(xmin, xmax, 1000)
        s = interp.UnivariateSpline(x, y, k=spline_order, s=0)
        y_aux = s(x_aux)

        plt.figure('Plot of the spline interpolation')
        plt.xlim(0.9 * xmin, 1.1 * xmax)
        plt.errorbar(x, y, yerr=dy, fmt='ob', ms=5)
        plt.plot(x_aux, y_aux, 'r-')

        if plot_band == 1:
            band_mean = np.mean(boot_band, axis=1)
            band_std = np.std(boot_band, axis=1)
            plt.plot(x_band, band_mean + band_std, 'g-')
            plt.plot(x_band, band_mean - band_std, 'g-')

        if save_figs == 1:
            plt.savefig('spline.png')
        else:
            plt.show()

    if plot_distribution == 1:
        plt.figure('Bootstrapped distribution of the integral')
        plt.xlabel('value of the integral')
        plt.ylabel('distribution histogram')
        plt.hist(boot_sample, bins='auto')

        if save_figs == 1:
            plt.savefig('boot_integral.png')
        else:
            plt.show()

    return ris, err, boot_sample
Exemple #17
0
    def fit(self, X):
        """Actual implementation of K-SVD algorithm.
        Args:
        - X: numpy 2d-array of dimensions :
          (len(signal) = D.shape[0], n_samples)
        TODO: add a stopping condition like an epsilon
        (and return corresponding number of iterations
        """

        #Check wether data is coherent
        if self.D.shape[0] != X.shape[0]:
            raise TypeError("Supplied X matrix is not "
                            "coherent with dictionary dimensions: you "
                            "should have same number of lines for "
                            "both the dictionary and the input data ")

        #ProgressBar setup
        print "Training dictionary over {} iterations".format(self.n_iter)
        progress = progressbar.progress_bar(self.n_iter)

        #self.n_iter iterations
        for it in range(self.n_iter):

            #Step 1: Compute sparse representation of X
            #given current dictionary D
            gamma = orthogonal_mp(self.D,
                                  X,
                                  n_nonzero_coefs=self.K,
                                  precompute=self.precompute)

            #Step 2: Adjust dictionary D and sparse
            #representation gamma at the same time
            #column by column
            for j in range(self.D.shape[1]):

                #Compute I = {indices of the signals in X
                #whose representations use jth column of D
                I = self.find_indices(gamma, j)

                #If one column is not used, it won't be until
                #the algorithm actually stops, which is a shame
                #So, we use heuristics: we set teh values of the
                #column to the worst represented columns of
                #X matrix
                if I == []:
                    #find worst represented column in X
                    d = self.worst_represented(gamma, X)
                    #normalize
                    d = d / np.linalg.norm(d)
                    #set D column to d
                    self.D[:, j] = d
                    #jump to the next column optimization
                    continue

                #Set D_j to zero
                self.D[:, j] = np.zeros_like(self.D[:, j])

                #From now, we use a certain number of tricks
                #explained in [1] to accelerate the (therefore
                #approximate) K-SVD algorithm
                #TODO: try to understand better... -> maybe we could
                #solve the equations in the report ;)
                g = gamma[j, :][I].T
                d = X[:, I].dot(g) - self.D.dot(gamma[:, I].dot(g))
                if d.sum() != 0:
                    d = d / np.linalg.norm(d)
                g = (X[:, I].T).dot(d) - ((self.D.dot(gamma[:, I])).T).dot(d)

                #Store new values
                self.D[:, j] = d
                gamma[j, :][I] = g.T

            #Update progress bar
            progress.update(it)

        print('   Done!')
Exemple #18
0
def sample(draws, step, start=None, trace=None, tune=None, progressbar=True, model=None, random_seed=None):
    """
    Draw a number of samples using the given step method.
    Multiple step methods supported via compound step method
    returns the amount of time taken.

    Parameters
    ----------

    draws : int
        The number of samples to draw
    step : function
        A step function
    start : dict
        Starting point in parameter space (or partial point)
        Defaults to trace.point(-1)) if there is a trace provided and
        model.test_point if not (defaults to empty dict)
    trace : NpTrace or list
        Either a trace of past values or a list of variables to track
        (defaults to None)
    tune : int
        Number of iterations to tune, if applicable (defaults to None)
    progressbar : bool
        Flag for progress bar
    model : Model (optional if in `with` context)

    """
    model = modelcontext(model)
    draws = int(draws)
    seed(random_seed)

    if start is None:
        start = {}

    if isinstance(trace, NpTrace) and len(trace) > 0:

        trace_point = trace.point(-1)
        trace_point.update(start)
        start = trace_point

    else:

        test_point = model.test_point.copy()
        test_point.update(start)
        start = test_point

        if not isinstance(trace, NpTrace):
            if trace is None:
                trace = model.named_vars
            try:
                trace = NpTrace(trace.values())
            except AttributeError:
                trace = NpTrace(list(trace))

    try:
        step = step_methods.CompoundStep(step)
    except TypeError:
        pass

    point = Point(start, model=model)

    progress = progress_bar(draws)

    try:
        for i in xrange(draws):
            if (i == tune):
                step = stop_tuning(step)
            point = step.step(point)
            trace = trace.record(point)
            if progressbar:
                progress.update(i)
    except KeyboardInterrupt:
        pass
    finally:
        return trace
def fit_with_yerr(x, y, dy, xmin, xmax, func, params, samples, \
   stop_param=1.0e-15, plot_fit=1, plot_band=1, plot_residuals=1, \
   plot_distribution=1, save_figs=0, show_progressbar=1):
   """
   Perform a fit to data on [xmin, xmax] with the function func(x, param),
   using "samples" bootstrap samples to evaluate the errors.

   stop_param: stopping parameter for the least square regression
   plot_fit: if =1 plot the optimal fit together with the data
   plot_band: if =1 plot the 1std band together with data
   plot_residuals: if =1 plot residuals after convergence
   plot_distribtion: if =1 plot the bootstrapped distributions of the parameters
   save_figs: if =1 save the figures in png insted of displaying them
   show_progressbar: if =1 show the progressbar

   return the optimal vales of the parameters, their errors, 
   the value of chi^2,the number of dof, the p-value
   and the bootstrap samples of the parameters.
   """

   mask = ((x<=xmax) & (x>=xmin))
   x=x[mask]
   y=y[mask]
   dy=dy[mask]

   band_size=1000

   data_length=len(x)
 
   # array to store the bootstrapped results 
   boot_sample=np.empty((len(params), samples), dtype=np.float)

   if plot_band==1:
     x_band=np.linspace(xmin, xmax, band_size)
     boot_band=np.empty((band_size, samples), dtype=np.float)
     
   for i in range(samples): 
     if show_progressbar==1:
       pb.progress_bar(i, samples)

     # bootstrap sample
     booty=y+np.random.normal(0, dy, data_length) 

     # least square regression
     ris = opt.leastsq(_residuals_yerr, params, ftol=stop_param, args=(x, booty, dy, func))
     boot_sample[:,i]=ris[0]

     if plot_band==1:
       boot_band[:,i]=func(x_band, ris[0])

   # optimal parameters and errors
   ris=np.mean(boot_sample, axis=1)
   err=np.std(boot_sample, axis=1, ddof=1)

   # auxilliary stuff
   opt_res=_residuals_yerr(ris, x, y, dy, func)
   chi2=np.sum(opt_res*opt_res)
   dof=data_length - len(params)
   pvalue=1.0 - stats.chi2.cdf(chi2, dof)


   if plot_fit==1:
     x_aux=np.linspace(xmin, xmax, 1000)
     y_aux=func(x_aux, ris)

     plt.figure('Best fit (chi2/dof=%.4f/%d=%f)' % (chi2, dof, chi2/dof))
     plt.xlim(0.9*xmin, 1.1*xmax)
     plt.errorbar(x, y, yerr=dy, fmt='ob', ms=5)
     plt.plot(x_aux,y_aux,'r-')
      
     if plot_band==1:
       band_mean=np.mean(boot_band, axis=1)
       band_std=np.std(boot_band, axis=1)
       plt.plot(x_band, band_mean + band_std,'g-')
       plt.plot(x_band, band_mean - band_std,'g-')

     if save_figs==1:
       plt.savefig('fit.png')
     else:
       plt.show()

   if plot_residuals==1:
     x_aux=np.linspace(xmin, xmax, 1000)
     y_aux=np.ones(len(x_aux))

     plt.figure('Residuals')
     plt.xlim(0.9*xmin, 1.1*xmax)
     plt.errorbar(x, opt_res, yerr=1, fmt='ob', ms=5)
     plt.plot(x_aux, -2*y_aux, 'g:')
     plt.plot(x_aux, -y_aux, 'r--')
     plt.plot(x_aux, 0*y_aux, 'r-')
     plt.plot(x_aux, y_aux, 'r--')
     plt.plot(x_aux, 2*y_aux, 'g:')
     
     if save_figs==1:
       plt.savefig('residuals.png')
     else:
       plt.show()

   if plot_distribution==1:
     for i in range(len(params)):
       plt.figure('Bootstrapped distribution of param[%d]' % i)
       plt.xlabel('param[%d] values' % i)
       plt.ylabel('distribution histogram')
       plt.hist(boot_sample[i], bins='auto')
       
       if save_figs==1:
         plt.savefig('param'+str(i)+'.png')
       else:
         plt.show()

   return ris, err, chi2, dof, pvalue, boot_sample