Example #1
0
    def output_valstats(self,
                        sess,
                        summary_writer,
                        step,
                        batch_x,
                        batch_y,
                        name,
                        store_img=True):
        prediction, loss, avg_psnr = sess.run(
            [self.net.recons, self.net.valid_loss, self.net.valid_avg_psnr],
            feed_dict={
                self.net.x: batch_x,
                self.net.y: batch_y,
                self.net.keep_prob: 1.,
                self.net.phase: False
            })

        self.record_summary(summary_writer, 'valid_loss', loss, step)
        self.record_summary(summary_writer, 'valid_avg_psnr', avg_psnr, step)

        logging.info(
            "Validation Statistics, validation loss= {:.4f}, Avg PSNR= {:.4f}".
            format(loss, avg_psnr))

        util.save_mat(prediction, "%s/%s.mat" % (self.prediction_path, name))

        if store_img:
            util.save_img(prediction[0, ...],
                          "%s/%s_img.tif" % (self.prediction_path, name))
Example #2
0
def test2():
    """Test save_mat"""
    Ms = [matrix(1.0, (3, 2)), spmatrix(1.0, range(3), range(3))]
    names = ['M1', 'M2']
    util.save_mat(Ms, names, 'test')
Example #3
0
def bcredEst(dObj,
             rObj,
             num_patch=16,
             patch_size=40,
             pad=None,
             numIter=100,
             step=1,
             useNoise=True,
             verbose=False,
             is_save=False,
             save_path='bcred_intermediate_results',
             xtrue=None,
             xinit=None):
    """
    Block Coordinate Regularization by Denoising (BCRED)
    
    ### INPUT:
    dObj       ~ the data fidelity term, measurement/forward model
    rObj       ~ the regularizer term
    num_patch  ~ the number of blocks assigned (Patches should not overlap with each other)
    patch_size ~ the spatial size of a patch (block)
    pad        ~ the pad size for block-wise denoising / set to 'None' if you want to use the full denoiser 
    numIter    ~ the total number of iterations
    step       ~ the step-size
    verbose    ~ if true print info of each iteration
    is_save    ~ if true save the reconstruction of each iteration
    save_path  ~ the save path for is_save
    xtrue      ~ the ground truth of the image, for tracking purpose
    xinit      ~ the initial value of x 

    ### OUTPUT:
    x     ~ reconstruction of the algorithm
    outs  ~ detailed information including cost, snr, step-size and time of each iteration
    
    """

    ########### HELPER FUNCTION ###########

    evaluateSnr = lambda xtrue, x: 20 * np.log10(
        np.linalg.norm(xtrue.flatten('F')) / np.linalg.norm(
            xtrue.flatten('F') - x.flatten('F')))

    ########### INITIALIZATION ###########

    # initialize save foler
    if is_save:
        abs_save_path = os.path.abspath(save_path)
        if os.path.exists(save_path):
            print("Removing '{:}'".format(abs_save_path))
            shutil.rmtree(abs_save_path, ignore_errors=True)
        # make new path
        print("Allocating '{:}'".format(abs_save_path))
        os.makedirs(abs_save_path)

    #initialize info data
    if xtrue is not None:
        xtrueSet = True
        snr = []
    else:
        xtrueSet = False

    loss = []
    dist = []
    timer = []

    # initialize variables
    if xinit is not None:
        pass
    else:
        xinit = np.zeros(dObj.sigSize, dtype=np.float32)
    x = xinit
    xnext = x
    x_patches = util.extract_nonoverlap_patches(x, num_patch, patch_size)
    xnext_patches = x_patches

    # helper variable
    p, pfull = rObj.init(num_patch,
                         patch_size + 2 * pad)  # dual variable for TV
    res = dObj.res(x)  # compute the residual Ax-y for xinit

    ########### BC-RED (EPOCH) ############

    for indIter in range(numIter):

        # randomize order of patches
        patchInd = np.random.permutation(num_patch)

        # calculate full gradient (g = Sx)
        gfull_data, dcost = dObj.grad(x)
        gfull_robj, pfull = rObj.red(x,
                                     step,
                                     pfull,
                                     useNoise=useNoise,
                                     extend_p=None)
        gfull_tot = gfull_data + gfull_robj

        # calculate the loss for showing back-compatibility of PROX-TV
        obj = dcost + rObj.eval(x)

        # cost[indIter] = data
        loss.append(obj)
        dist.append(np.linalg.norm(gfull_tot.flatten('F'))**2)
        if xtrueSet:
            snr.append(evaluateSnr(xtrue, x))

        # set up a timer
        timeStart = time.time()

        ## Inner Loop ##
        for i in range(num_patch):

            # extract patch
            patch_idx = patchInd[i]
            cur_patch = x_patches[patch_idx, :, :]

            # get gradient of data-fit for the extracted block
            g_data = dObj.gradBloc(res, patch_idx)

            # denoise the block with padding & get the full gradient G
            if pad is None:
                g_robj, p[patch_idx, ...] = rObj.red(x,
                                                     step,
                                                     p[patch_idx, ...],
                                                     useNoise=useNoise,
                                                     extend_p=None)
                g_robj_patch = util.extract_padding_patches(g_robj,
                                                            patch_idx,
                                                            extend_p=0)
            else:
                padded_patch = util.extract_padding_patches(x,
                                                            patch_idx,
                                                            extend_p=pad)
                g_robj_patch, p[patch_idx, ...] = rObj.red(padded_patch,
                                                           step,
                                                           p[patch_idx, ...],
                                                           useNoise=useNoise,
                                                           extend_p=pad)

            g_tot = g_data + g_robj_patch

            # update the selected block
            xnext_patches[patch_idx, :, :] = cur_patch - step * g_tot
            xnext = util.putback_nonoverlap_patches(xnext_patches)

            # update
            res = res - step * dObj.fmultPatch(g_tot, patch_idx)
            x = xnext
            x_patches = xnext_patches

        # end of the timer
        timeEnd = time.time() - timeStart
        timer.append(timeEnd)

        ########### LOG INFO ###########

        # save & print
        if is_save:
            util.save_mat(
                xnext, abs_save_path + '/iter_{}_mat.mat'.format(indIter + 1))
            util.save_img(
                xnext, abs_save_path + '/iter_{}_img.tif'.format(indIter + 1))

        if verbose:
            if xtrueSet:
                print('[bcredEst: ' + str(indIter + 1) + '/' + str(numIter) +
                      ']' + ' [||Gx_k||^2/||Gx_0||^2: %.5e]' %
                      (dist[indIter] / dist[0]) + ' [snr: %.2f]' %
                      (snr[indIter]))
            else:
                print('[bcredEst: ' + str(indIter + 1) + '/' + str(numIter) +
                      ']' + ' [||Gx_k||^2/||Gx_0||: %.5e]' %
                      (dist[indIter] / dist[0]))

        # summarize outs
        outs = {'dist': dist / dist[0], 'snr': snr, 'time': timer}

    return x, outs
Example #4
0
def redEst(dObj,
           rObj,
           numIter=100,
           step=1,
           accelerate=False,
           mode='RED',
           useNoise=True,
           verbose=False,
           is_save=False,
           save_path='red_intermediate_results',
           xtrue=None,
           xinit=None):
    """
    Regularization by Denoising (RED)
    
    ### INPUT:
    dObj       ~ data fidelity term, measurement/forward model
    rObj       ~ regularizer term
    numIter    ~ total number of iterations
    accelerate ~ use APGM or PGM
    mode       ~ RED update or PROX update
    useNoise.  ~ true if CNN predict noise; false if CNN predict clean image
    step       ~ step-size
    verbose    ~ if true print info of each iteration
    is_save    ~ if true save the reconstruction of each iteration
    save_path  ~ the save path for is_save
    xtrue      ~ the ground truth of the image, for tracking purpose
    xinit      ~ initialization of x (zero otherwise)

    ### OUTPUT:
    x     ~ reconstruction of the algorithm
    outs  ~ detailed information including cost, snr, step-size and time of each iteration

    """

    ########### HELPER FUNCTION ###########

    evaluateSnr = lambda xtrue, x: 20 * np.log10(
        np.linalg.norm(xtrue.flatten('F')) / np.linalg.norm(
            xtrue.flatten('F') - x.flatten('F')))

    ########### INITIALIZATION ###########

    # initialize save foler
    if is_save:
        abs_save_path = os.path.abspath(save_path)
        if os.path.exists(save_path):
            print("Removing '{:}'".format(abs_save_path))
            shutil.rmtree(abs_save_path, ignore_errors=True)
        # make new path
        print("Allocating '{:}'".format(abs_save_path))
        os.makedirs(abs_save_path)

    #initialize info data
    if xtrue is not None:
        xtrueSet = True
        snr = []
    else:
        xtrueSet = False

    loss = []
    dist = []
    timer = []

    # initialize variables
    if xinit is not None:
        pass
    else:
        xinit = np.zeros(dObj.sigSize, dtype=np.float32)
    x = xinit
    s = x  # gradient update
    t = 1.  # controls acceleration
    p, pfull = rObj.init(1, dObj.sigSize[0])  # dual variable for TV
    p = p[0]

    ########### BC-RED (EPOCH) ############

    for indIter in range(numIter):
        timeStart = time.time()
        # get gradient
        g, _ = dObj.grad(s)
        if mode == 'RED':
            g_robj, p = rObj.red(s, step, p, useNoise=useNoise, extend_p=None)
            xnext = s - step * (g + g_robj)
        elif mode == 'PROX':
            xnext, p = rObj.prox(np.clip(s - step * g, 0, np.inf), step,
                                 p)  # clip to [0, inf]
        elif mode == 'GRAD':
            xnext = s - step * g
        else:
            print("No such mode option")
            exit()

        timeEnd = time.time() - timeStart

        ########### LOG INFO ###########

        # calculate full gradient for convergence plot
        gfull, dfull = dObj.grad(x)
        if mode == 'RED':
            g_robj, pfull = rObj.red(x,
                                     step,
                                     pfull,
                                     useNoise=useNoise,
                                     extend_p=None)
            Px = x - step * (gfull + g_robj)
            # Gx
            diff = np.linalg.norm(gfull.flatten('F') + g_robj.flatten('F'))**2
            obj = dfull + rObj.eval(x)
        elif mode == 'PROX':
            Px, pfull = rObj.prox(np.clip(x - step * gfull, 0, np.inf), step,
                                  pfull)
            # x-Px
            diff = np.linalg.norm(x.flatten('F') - Px.flatten('F'))**2
            obj = dfull + rObj.eval(x)
        elif mode == 'GRAD':
            # x-Px
            Px = x - step * g
            diff = np.linalg.norm(x.flatten('F') - Px.flatten('F'))**2
            obj = dfull
        else:
            print("No such mode option")
            exit()

        # acceleration
        if accelerate:
            tnext = 0.5 * (1 + np.sqrt(1 + 4 * t * t))
        else:
            tnext = 1
        s = xnext + ((t - 1) / tnext) * (xnext - x)

        # output info
        # cost[indIter] = data
        loss.append(obj)
        dist.append(diff)
        timer.append(timeEnd)
        # evaluateTol(x, xnext)
        if xtrueSet:
            snr.append(evaluateSnr(xtrue, x))

        # update
        t = tnext
        x = xnext

        # save & print
        if is_save:
            util.save_mat(
                xnext, abs_save_path + '/iter_{}_mat.mat'.format(indIter + 1))
            util.save_img(
                xnext, abs_save_path + '/iter_{}_img.tif'.format(indIter + 1))

        if verbose:
            if xtrueSet:
                print('[redEst: ' + str(indIter + 1) + '/' + str(numIter) +
                      ']' + ' [||Gx_k||^2/||Gx_0||^2: %.5e]' %
                      (dist[indIter] / dist[0]) + ' [snr: %.2f]' %
                      (snr[indIter]))
            else:
                print('[redEst: ' + str(indIter + 1) + '/' + str(numIter) +
                      ']' + ' [||Gx_k||^2/||Gx_0||^2: %.5e]' %
                      (dist[indIter] / dist[0]))

        # summarize outs
        outs = {'dist': dist / dist[0], 'snr': snr, 'time': timer}

    return x, outs
def test2():
    """Test save_mat"""
    Ms = [matrix(1.0, (3,2)), spmatrix(1.0, range(3), range(3))]
    names = ['M1', 'M2']
    util.save_mat(Ms, names, 'test')
Example #6
0
    def train(self,
              data_provider,
              output_path,
              valid_provider,
              valid_size,
              training_iters=100,
              epochs=1000,
              dropout=0.75,
              display_step=1,
              save_epoch=50,
              restore=False,
              write_graph=False,
              prediction_path='validation'):
        """
        Lauches the training process

        :param data_provider: callable returning training and verification data
        :param output_path: path where to store checkpoints
        :param valid_provider: data provider for the validation dataset
        :param valid_size: batch size for validation provider
        :param training_iters: number of training mini batch iteration
        :param epochs: number of epochs
        :param dropout: dropout probability
        :param display_step: number of steps till outputting stats
        :param restore: Flag if previous model should be restored
        :param write_graph: Flag if the computation graph should be written as protobuf file to the output path
        :param prediction_path: path where to save predictions on each epoch
        """

        # initialize the training process.
        init = self._initialize(training_iters, output_path, restore,
                                prediction_path)

        # create output path
        directory = os.path.join(output_path, "final/")
        if not os.path.exists(directory):
            os.makedirs(directory)

        save_path = os.path.join(directory, "model.cpkt")
        if epochs == 0:
            return save_path

        with tf.Session() as sess:
            if write_graph:
                tf.train.write_graph(sess.graph_def, output_path, "graph.pb",
                                     False)

            sess.run(init)

            if restore:
                ckpt = tf.train.get_checkpoint_state(output_path)
                if ckpt and ckpt.model_checkpoint_path:
                    self.net.restore(sess, ckpt.model_checkpoint_path)

            summary_writer = tf.summary.FileWriter(output_path,
                                                   graph=sess.graph)
            logging.info("Start optimization")

            # select validation dataset
            valid_x, valid_y = valid_provider(valid_size, fix=True)
            util.save_mat(valid_y,
                          "%s/%s.mat" % (self.prediction_path, 'origin_y'))
            util.save_mat(valid_x,
                          "%s/%s.mat" % (self.prediction_path, 'origin_x'))

            for epoch in range(epochs):
                total_loss = 0
                # batch_x, batch_y = data_provider(self.batch_size)
                for step in range((epoch * training_iters),
                                  ((epoch + 1) * training_iters)):
                    batch_x, batch_y = data_provider(self.batch_size)
                    # Run optimization op (backprop)
                    _, loss, lr, avg_psnr = sess.run(
                        [
                            self.optimizer, self.net.loss,
                            self.learning_rate_node, self.net.avg_psnr
                        ],
                        feed_dict={
                            self.net.x: batch_x,
                            self.net.y: batch_y,
                            self.net.keep_prob: dropout,
                            self.net.phase: True
                        })

                    if step % display_step == 0:
                        logging.info(
                            "Iter {:} (before training on the batch) Minibatch loss= {:.4f}, Minibatch abs err= {:.4f}"
                            .format(step, loss, avg_psnr))
                        self.output_minibatch_stats(sess, summary_writer, step,
                                                    batch_x, batch_y)

                    total_loss += loss

                    self.record_summary(summary_writer, 'training_loss', loss,
                                        step)
                    self.record_summary(summary_writer, 'training_avg_psnr',
                                        avg_psnr, step)

                # output statistics for epoch
                self.output_epoch_stats(epoch, total_loss, training_iters, lr)
                self.output_valstats(sess,
                                     summary_writer,
                                     step,
                                     valid_x,
                                     valid_y,
                                     "epoch_%s" % epoch,
                                     store_img=True)

                if epoch % save_epoch == 0:
                    directory = os.path.join(output_path,
                                             "{}_cpkt/".format(step))
                    if not os.path.exists(directory):
                        os.makedirs(directory)
                    path = os.path.join(directory, "model.cpkt".format(step))
                    self.net.save(sess, path)

                save_path = self.net.save(sess, save_path)

            logging.info("Optimization Finished!")

            return save_path
Example #7
0
def apgmEst(dObj,
            rObj,
            numIter=100,
            step=100,
            accelerate=True,
            stochastic=False,
            mini_batch=None,
            verbose=False,
            is_save=True,
            save_path='result',
            xtrue=None):
    """
    Plug-and-Play APGM with switch for PGM and SPGM
    
    ### INPUT:
    dObj       ~ data fidelity term, measurement/forward model
    rObj       ~ regularizer term
    numIter    ~ total number of iterations
    accelerate ~ use APGM or PGM
    stochastic ~ use SPGM or not
    mini_batch ~ the size of the mini_batch
    step       ~ step-size
    verbose    ~ if true print info of each iteration
    is_save    ~ if true save the reconstruction of each iteration
    save_path  ~ the save path for is_save
    xtrue      ~ the ground truth of the image, for tracking purpose

    ### OUTPUT:
    x     ~ reconstruction of the algorithm
    outs  ~ detailed information including cost, snr, step-size and time of each iteration

    """

    ##### HELPER FUNCTION #####

    evaluateSnr = lambda xtrue, x: 20 * np.log10(
        np.linalg.norm(xtrue.flatten('F')) / np.linalg.norm(
            xtrue.flatten('F') - x.flatten('F')))
    evaluateTol = lambda x, xnext: np.linalg.norm(
        x.flatten('F') - xnext.flatten('F')) / np.linalg.norm(x.flatten('F'))

    ##### INITIALIZATION #####

    # initialize save foler
    if save_path:
        abs_save_path = os.path.abspath(save_path)
        if os.path.exists(save_path):
            print("Removing '{:}'".format(abs_save_path))
            shutil.rmtree(abs_save_path, ignore_errors=True)
        # make new path
        print("Allocating '{:}'".format(abs_save_path))
        os.makedirs(abs_save_path)

    # initialize measurement mask
    if stochastic:
        totNum = dObj.y.shape[0]
        idx = 0
        keepIdx = np.zeros(mini_batch, dtype=np.int32)
        for j in range(mini_batch):
            keepIdx[j] = idx
            idx = idx + int(totNum / mini_batch)

    #initialize info data
    if xtrue is not None:
        xtrueSet = True
        snr = []
    else:
        xtrueSet = False

    dist = []
    timer = []
    relativeChange = []

    # initialize variables
    xinit = np.zeros(dObj.sigSize, dtype=np.float32)
    # outs = struct(xtrueSet)
    x = xinit
    s = x  # gradient update
    t = 1.  # controls acceleration
    p = rObj.init()  # dual variable for TV
    pfull = rObj.init()  # dual variable for TV

    ##### MAIN LOOP #####

    for indIter in range(numIter):
        timeStart = time.time()
        if stochastic:
            # get gradient
            g, _, keepIdx = dObj.gradStoc(s, keepIdx)
            # denoise
            xnext, p = rObj.prox(np.clip(s - step * g, 0, np.inf), step,
                                 p)  # clip to [0, inf]
        else:
            # get gradient
            g, _ = dObj.grad(s)
            # denoise
            xnext, p = rObj.prox(np.clip(s - step * g, 0, np.inf), step,
                                 p)  # clip to [0, inf]

        # calculate full gradient
        if stochastic:
            gfull, _ = dObj.grad(s)
            Px, pfull = rObj.prox(np.clip(s - step * gfull, 0, np.inf), step,
                                  pfull)
        else:
            Px = xnext

        # if indIter == 0:
        #     outs.dist0 = np.linalg.norm(x.flatten('F') - Px.flatten('F'))^2

        # acceleration
        if accelerate:
            tnext = 0.5 * (1 + np.sqrt(1 + 4 * t * t))
        else:
            tnext = 1
        s = xnext + ((t - 1) / tnext) * (xnext - x)

        # output info
        # cost[indIter] = data
        dist.append(np.linalg.norm(x.flatten('F') - Px.flatten('F'))**2)
        timer.append(time.time() - timeStart)
        if indIter == 0:
            relativeChange.append(np.inf)
        else:
            relativeChange.append(evaluateTol(x, xnext))
        # evaluateTol(x, xnext)
        if xtrueSet:
            snr.append(evaluateSnr(xtrue, x))

        # update
        t = tnext
        x = xnext

        # save & print
        if is_save:
            util.save_mat(
                xnext, abs_save_path + '/iter_{}_mat.mat'.format(indIter + 1))
            util.save_img(
                xnext, abs_save_path + '/iter_{}_img.tif'.format(indIter + 1))

        if verbose:
            if xtrueSet:
                print('[fistaEst: ' + str(indIter + 1) + '/' + str(numIter) +
                      ']' + '[tols: %.5e]' % (relativeChange[indIter]) +
                      '[||x-Px||^2: %.5e]' % (dist[indIter]) + '[step: %.1e]' %
                      (step) + '[time: %.1f]' % (np.sum(timer)) +
                      '[snr: %.2f]' % (snr[indIter]))
            else:
                print('[fistaEst: ' + str(indIter + 1) + '/' + str(numIter) +
                      ']' + '[tols: %.5e]' % (relativeChange[indIter]) +
                      '[||x-Px||^2: %.5e]' % (dist[indIter]) + '[step: %.1e]' %
                      (step) + '[time: %.1f]' % (np.sum(timer)))

        # summarize outs
        #time.sleep(0.01)
        outs = {
            # 'cost': cost,
            'dist': dist,
            'time': timer,
            'relativeChange': relativeChange,
            'snr': snr
        }

    return x, outs
Example #8
0
def uniform_block_process(x_blocks, dist, timer, snr, global_count,              # share_able variables
                          cpu_idx, gpu_idx, data_obj, rglr_obj,                  # cpu, gpu & objects
                          step, is_noise, pad, minibatch_size, max_global_iter,  # algorithmic parameters
                          logging, verbose, is_save, save_every,                 # logging parameters
                          is_conv, conv_every, save_path, xtrue) -> None:
    #### CPU Core Assigment ####
    process = mp.current_process()
    os.system("taskset -p -c %d %d" % (cpu_idx, process.pid))

    #### Random Seed ####
    np.random.seed(cpu_idx)

    ##### Main Loop ####
    # initialize some variables
    rglr_obj.init(gpu_idx=gpu_idx)

    while global_count.value < max_global_iter:

        # select block randomly at uniform
        block_idx = np.random.randint(data_obj.num_blocks)

        # 1st read
        xtilde_blocks = read_x_list(x_blocks)
        xtilde_block = xtilde_blocks[block_idx,:,:]

        # get the block gradient of data-fit
        g_data_block = data_obj.gradStoc_block(xtilde_block, block_idx, minibatch_size)
        if pad is 'full':
            xtilde = util.putback_nonoverlap_patches(xtilde_blocks)
            g_rglr = rglr_obj.red(xtilde, is_noise=is_noise, extend_p=0)
            g_rglr_block = util.extract_nonoverlap_patches(g_rglr, 
                    data_obj.num_blocks, data_obj.block_size)[block_idx,:,:]
        else:
            g_rglr_block = rglr_obj.red(xtilde_block, is_noise=is_noise, extend_p=pad)  # pad removed

        # compute the overall gradient g_tot
        g_tot_block = g_data_block + g_rglr_block

        # update the selected block & update global memory
        # upload new x to the global memory
        x_blocks[block_idx] = x_blocks[block_idx]-step*g_tot_block
        if logging:
            xlog = util.putback_nonoverlap_patches(read_x_list(x_blocks))
        with global_count.get_lock():
            local_count = np.copy(global_count.value)  # record local count for logging
            global_count.value += 1  # update global count

        # record final finishing time
        if local_count < max_global_iter:
            timer[local_count] = time.time() - timer[local_count]

        #### Log Info ####
        if logging and local_count < max_global_iter:
            if is_conv and (local_count+1) % conv_every == 0:
                # calculate full gradient (g = Sx)
                g_full_data = data_obj.grad(xtilde_blocks)
                g_full_rglr = rglr_obj.red(xtilde_blocks, is_noise=is_noise)
                g_full_tot = g_full_data + g_full_rglr
                dist[local_count] = np.linalg.norm(g_full_tot.flatten('F')) ** 2

            if snr is not None:
                snr[local_count] = evaluateSnr(xtrue, xlog)

            # save & print
            if is_save and (local_count+1) % save_every == 0:
                util.save_mat(xlog, save_path + '/iter_{}_mat.mat'.format(local_count + 1))
                util.save_img(xlog, save_path + '/iter_{}_img.tif'.format(local_count + 1))

            np.set_printoptions(precision=3)
            if verbose and snr is not None:
                print(
                    f"[uniform_block_async: {local_count + 1}/{max_global_iter}] [Process: {process.name} {process.pid}] "
                    f"[Step-size: {step:.{4}}] [||Gx_k||^2: {dist[local_count]:.{4}}] [SNR: {snr[local_count]:.{4}}] "
                    f"[Time: {timer[local_count]:.{4}}]", flush=True)
            elif verbose:
                print(
                    f"[uniform_block_async: {local_count + 1}/{max_global_iter}] [Process: {process.name} {process.pid}] "
                    f"[Step-size: {step:.{4}}] [||Gx_k||^2: {dist[local_count]:.{4}}] "
                    f"[Timer: {timer[local_count]:.{4}}]", flush=True)