Exemple #1
0
	def eval(self, sess, var):
		batch_size = 10000
		
		total_batch = int(np.ceil(len(self.val_data) / float(batch_size)))


		total_loss = 0.0
		total_rr = 0.0
		total_count = 0
		total_v1_rr = 0.0
		total_v2_rr = 0.0
		pbar = pb.ProgressBar(widgets=["[VALID] ", pb.DynamicMessage('loss'), " ", pb.DynamicMessage('mrr'), " ", pb.FileTransferSpeed(unit="batchs"), pb.Percentage(), pb.Bar(), pb.Timer(), " ", pb.ETA()], maxval=total_batch).start()

		for i in xrange(total_batch):
			batch = self.next_batch(batch_size, dtype="valid")
			loss, rr = sess.run([var['cost'], var['rr_sum']], feed_dict={var['y']:batch[:, :, 0], var['x']:batch[:, :, 1:]})
			total_loss += loss
			total_rr += rr
			total_count += len(batch)
			if i < total_batch / 2:
				total_v1_rr += rr
			else:
				total_v2_rr += rr
			pbar.update(i, loss=total_loss/total_count, mrr=total_rr/total_count)
		pbar.finish()

		return total_loss, total_rr, total_v1_rr, total_v2_rr
Exemple #2
0
    def train(self, generator, num_train, num_epochs, lr=1e-4):

        opt = torch.optim.RMSprop(self.parameters(), lr=lr)

        widgets = [
            progressbar.ETA(),
            progressbar.Bar(), ' ',
            progressbar.DynamicMessage('Loss'), ' ',
            progressbar.DynamicMessage('Epoch')
        ]
        with progressbar.ProgressBar(max_value=num_epochs,
                                     widgets=widgets) as bar:
            for n in range(num_epochs):
                tot_loss = 0

                for i in range(num_train):

                    [x, y] = next(generator)

                    opt.zero_grad()
                    pred = self.forward(x)
                    loss = torch.mean((pred - y)**2)
                    loss.backward()
                    opt.step()
                    tot_loss += torch.mean(
                        (torch.mean(pred) -
                         torch.mean(y))**2).cpu().detach().item()

                bar.update(n,
                           Loss=tot_loss / num_train,
                           Epoch=str(n + 1) + '/' + str(num_epochs))
Exemple #3
0
    def _make_progressbar(self, N):
        """ Returns a progressbar to use during optimization"""

        if self.max_ind_shift is not None:

            bar = progressbar.ProgressBar(widgets=[
                ' ',
                progressbar.DynamicMessage('ObjectiveFn'),
                ' ',
                progressbar.DynamicMessage('ObjectiveFn_Normalized'),
                ' Iteration: ',
                ' ',
                progressbar.Counter(),
                '/%d' % N,
                ' ',
                progressbar.AdaptiveETA(),
            ],
                                          max_value=N)

        else:

            bar = progressbar.ProgressBar(widgets=[
                ' ',
                progressbar.DynamicMessage('ObjectiveFn'),
                ' Iteration: ',
                ' ',
                progressbar.Counter(),
                '/%d' % N,
                ' ',
                progressbar.AdaptiveETA(),
            ],
                                          max_value=N)

        return bar
Exemple #4
0
def test_dynamic_message_widget():
    widgets = [
        ' [',
        progressbar.Timer(),
        '] ',
        progressbar.Bar(),
        ' (',
        progressbar.ETA(),
        ') ',
        progressbar.DynamicMessage('loss'),
        progressbar.DynamicMessage('text'),
        progressbar.DynamicMessage('error', precision=None),
    ]

    p = progressbar.ProgressBar(widgets=widgets, max_value=1000)
    p.start()
    for i in range(0, 200, 5):
        time.sleep(0.1)
        p.update(i + 1, loss=.5, text='spam', error=1)

    i += 1
    p.update(i, text=None)
    i += 1
    p.update(i, text=False)
    i += 1
    p.update(i, text=True, error='a')
    p.finish()
Exemple #5
0
    def _setup_progressbar(self, epochs, metrics, validation=False):
        widgets = [pb.SimpleProgress(), '    ']
        for metric in metrics:
            widgets += [pb.DynamicMessage(metric), '   ']

        if validation:
            for metric in metrics:
                widgets += [pb.DynamicMessage('val_' + metric), '   ']

        progress = pb.ProgressBar(max_value=epochs, widgets=widgets,
                                  term_with=60)
        return progress
Exemple #6
0
def make_widget():
    widgets = [
        progressbar.Percentage(), ' ',
        progressbar.SimpleProgress(), ' ',
        progressbar.Bar(left='[', right=']'), ' ',
        progressbar.ETA(), ' ',
        progressbar.DynamicMessage('LOSS'), ' ',
        progressbar.DynamicMessage('PREC'), ' ',
        progressbar.DynamicMessage('REC')
    ]
    bar = progressbar.ProgressBar(widgets=widgets)
    return bar
Exemple #7
0
def train_epoch(model, epoch, optimizer, use_cuda, train_dataset,
                loss_function):
    widgets = [
        "Epoch {}: ".format(epoch),
        progressbar.Percentage(),
        progressbar.FormatLabel(' (batch %(value)d/%(max_value)d) '),
        ' ==stats==> ',
        progressbar.DynamicMessage("loss"), ', ',
        progressbar.DynamicMessage("accuracy"), ', ',
        progressbar.ETA()
    ]
    progress_bar = progressbar.ProgressBar(
        widgets=widgets, max_value=len(train_dataset)).start()
    batches = len(train_dataset)
    losses = np.zeros(batches)
    accuracies = np.zeros(batches)
    correct = 0

    for batch_idx, (data, target) in enumerate(train_dataset):
        if use_cuda:
            data, target = data.cuda(), target.cuda()

        optimizer.zero_grad()
        #MODEL OUTPUT
        output = model(data)

        loss = loss_function(output, target)
        # loss = F.nll_loss(output, target)

        # UPDATE PARAMETERS
        loss.backward()
        optimizer.step()

        # ESTIMATE BATCH LOSS AND ACCURACY
        pred = output.data.max(
            1, keepdim=True)[1]  # get the index of the max log-probability
        matches = pred.eq(target.data.view_as(pred)).cpu()
        correct += matches.sum()
        accuracies[batch_idx] = matches.float().mean().item()
        losses[batch_idx] = loss.cpu().item()

        # UPDATE UI
        if batch_idx % 20 == 0:
            progress_bar.update(batch_idx + 1,
                                loss=losses[:batch_idx + 1].mean(),
                                accuracy=accuracies[:batch_idx + 1].mean())

    progress_bar.finish()
    return losses.mean(), accuracies.mean(), correct, len(
        train_dataset.dataset)
Exemple #8
0
    def widgets(self):
        """Return a list of widgets"""
        if not self._widgets:
            phases_digits = len(str(len(self.phases)))
            phase_widget = progressbar.DynamicMessage('phase',
                                                      width=1 +
                                                      2 * phases_digits)
            score_widget = progressbar.DynamicMessage('score', width=4)
            self._widgets = [
                ' [', phase_widget, '] ',
                progressbar.Bar(), ' [', score_widget, '] ',
                progressbar.Timer()
            ]

        return self._widgets
Exemple #9
0
def create_progress_bar(dynamic_msg=None):
    """
    Create a simple progressbar to monitor the training procedure.
    Usage:
        bar = _create_progress_bar('loss')
        L = []
        for i in bar(iterable):
          ...
          L.append(...)

        bar.dynamic_messages['loss'] = np.mean(L)

    :param dynamic_msg: a name of the measure being monitored (loss, accuracy, etc.)
    :return: a ProgressBar() object.
    """

    widgets = [
        '[batch ',
        progressbar.SimpleProgress(), '] ',
        progressbar.Bar(), ' (',
        progressbar.ETA(), ') '
    ]

    if dynamic_msg is not None:
        widgets.append(progressbar.DynamicMessage(dynamic_msg))
    return progressbar.ProgressBar(widgets=widgets)
Exemple #10
0
async def fetch(app):
    app.job_running = True

    loop = asyncio.get_event_loop()
    async with aiohttp.ClientSession(loop=loop) as session:
        repositories = await app.reg.retrieve_repositories(session)
        progress_queue = asyncio.Queue(loop=loop)
        for repo in repositories:
            progress_queue.put_nowait(repo)

        logging.info("Fetching the info for %d repositories", len(repositories))

        if app.cli:
            widgets = [progressbar.DynamicMessage("image"), progressbar.Bar(),
                       progressbar.Percentage(), ' [', progressbar.Timer(), '] ']
            app.bar = progressbar.ProgressBar(maxval=len(repositories), widgets=widgets)
            app.bar.start()
            app.count = 1
        async with aiohttp.ClientSession(loop=loop) as session:
            tasks = [(process_repository(app, session, progress_queue)) for repo in repositories]
            await asyncio.gather(*tasks)

        if app.cli:
            app.bar.finish()
            app.job_running = False
        else: 
            logging.info("Finished updating repositories.")

        app.job_running = False
Exemple #11
0
def dynamic_message():
    # Use progressbar.DynamicMessage to keep track of some parameter(s) during
    # your calculations
    widgets = [
        progressbar.Percentage(),
        progressbar.Bar(),
        progressbar.DynamicMessage('loss'),
        progressbar.DynamicMessage('username', width=12, precision=12),
    ]
    with progressbar.ProgressBar(max_value=100, widgets=widgets) as bar:
        min_so_far = 1
        for i in range(100):
            val = random.random()
            if val < min_so_far:
                min_so_far = val
            bar.update(i, loss=min_so_far, username='******' % i)
Exemple #12
0
    def run_unbounded(self, energy_ratio_limit):
        if self.data.length > 1:
            last_energy, new_energy = self.calc_energy(
                self.data[-2].data), self.calc_energy(self.data[-1].data)
            energy_ratio = np.abs((new_energy - last_energy) / last_energy)
        else:
            last_energy, energy_ratio = self.calc_energy(self.data[-1].data), 1

        i = self.data.length
        bar = progressbar.ProgressBar(widgets=[
            progressbar.AnimatedMarker(), ' ',
            progressbar.Counter('%(value)05d'), ' ',
            progressbar.DynamicMessage('energy_ratio'), ' ',
            progressbar.Timer()
        ],
                                      max_value=progressbar.UnknownLength)
        while energy_ratio > energy_ratio_limit:
            self.data.push((i + 1) * self.time_params.d * self.save_every)
            begin, end = self.data[-2].data, self.data[-1].data
            self.step_kernel(begin, end)
            new_energy = self.calc_energy(end)
            if new_energy > last_energy:
                raise Warning('Energy has increased')
            energy_ratio, last_energy = np.abs(
                (new_energy - last_energy) / last_energy), new_energy
            i += 1
            bar.update(i * self.save_every, energy_ratio=energy_ratio)
Exemple #13
0
def gmm_bic(X, n_clusters=8, initialization=5):
    '''Gaussian Mixture of Models with BIC score'''
    # widget to print while computing mixture of gaussians
    widgets = [ '  ', pb.Percentage(), ' | ', pb.Timer(), pb.Bar(), pb.ETA(),
                ' | ', pb.DynamicMessage('Neurons')]

    # initialize bic score
    lowest_bic = np.infty
    bic = []

    bar = pb.ProgressBar()
    with pb.ProgressBar(max_value=n_clusters, widgets=widgets) as bar:
        # try different number of clusters
        for k in range(1, n_clusters):
            gmm=GMM(n_components=k, n_init=initialization)
            gmm.fit(X)
            bic.append(gmm.bic(X))
            # keep if the score is the lowest so far
            if bic[-1] < lowest_bic:
                lowest_bic = bic[-1]
                best_gmm = gmm
                bar.update(k, Neurons=k)
            else:
                bar.update(k)

    return best_gmm
Exemple #14
0
def main(_):
    """do the thing"""
    input_a, input_b = get_inputs()

    with tf.variable_scope(
            'model',
            initializer=tf.random_normal_initializer(stddev=1 / 10)) as scope:

        targets = combine_inputs(input_a, input_b)

        model_outputs = bilinear_product(input_a, input_b)
        print('got model')
        if FLAGS.validation != 0.0:
            scope.reuse_variables()
            vinput_a = input_a * FLAGS.validation
            vinput_b = input_b * FLAGS.validation
            vtargets = combine_inputs(vinput_a, vinput_b)
            valid_outputs = bilinear_product(vinput_a, vinput_b)
            valid_loss = mse(vtargets, valid_outputs)
        else:
            valid_loss = None

    loss_op = mse(model_outputs, targets)
    train_op = get_train_step(loss_op)

    print('Got model with {} params'.format(count_trainable_params()))

    bar = progressbar.ProgressBar(widgets=[
        '[',
        progressbar.Percentage(), '] ', '(๑•̀ㅁ•́๑)✧',
        progressbar.Bar(marker='✧', left='', right=''), '(',
        progressbar.DynamicMessage('loss'), ')', '(',
        progressbar.AdaptiveETA(), ')'
    ],
                                  redirect_stdout=True)

    sess = tf.Session()
    sess.run(tf.initialize_all_variables())
    losses = []
    with sess.as_default():
        bar.start(FLAGS.max_steps)
        for step in range(FLAGS.max_steps):
            a, b, batch_loss, _ = sess.run(
                [input_a, input_b, loss_op, train_op])
            bar.update(step, loss=batch_loss)
            if (step + 1) % 50 == 0:
                losses.append([step, repr(batch_loss)])
            # print('a: {}'.format(a))
            # print('b: {}'.format(b))
            # import time; time.sleep(1)
        bar.finish()

        # print(sess.run(tf.reduce_sum(tf.add_n(tf.trainable_variables()))))
        if valid_loss is not None:
            vloss, = sess.run([valid_loss])
            print('Validation loss: {}'.format(vloss))
            print('   (inputs scaled by {})'.format(FLAGS.validation))
        # print(sess.run(tf.trainable_variables()))
        with open(FLAGS.log_path, 'w') as fp:
            json.dump(losses, fp)
def test_all_widgets_max_width(max_width, term_width):
    widgets = [
        progressbar.Timer(max_width=max_width),
        progressbar.ETA(max_width=max_width),
        progressbar.AdaptiveETA(max_width=max_width),
        progressbar.AbsoluteETA(max_width=max_width),
        progressbar.DataSize(max_width=max_width),
        progressbar.FileTransferSpeed(max_width=max_width),
        progressbar.AdaptiveTransferSpeed(max_width=max_width),
        progressbar.AnimatedMarker(max_width=max_width),
        progressbar.Counter(max_width=max_width),
        progressbar.Percentage(max_width=max_width),
        progressbar.FormatLabel('%(value)d', max_width=max_width),
        progressbar.SimpleProgress(max_width=max_width),
        progressbar.Bar(max_width=max_width),
        progressbar.ReverseBar(max_width=max_width),
        progressbar.BouncingBar(max_width=max_width),
        progressbar.FormatCustomText('Custom %(text)s',
                                     dict(text='text'),
                                     max_width=max_width),
        progressbar.DynamicMessage('custom', max_width=max_width),
        progressbar.CurrentTime(max_width=max_width),
    ]
    p = progressbar.ProgressBar(widgets=widgets, term_width=term_width)
    p.update(0)
    p.update()
    for widget in p._format_widgets():
        if max_width and max_width < term_width:
            assert widget == ''
        else:
            assert widget != ''
Exemple #16
0
	def train(self):
		self.load_data(train=True)
		with tf.Graph().as_default():
			print >> sys.stderr, "add model"
			var = self.add_model()

			saver = tf.train.Saver()

			# config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)
			config = tf.ConfigProto(allow_soft_placement=True)
			config.gpu_options.allow_growth = True
			# config.gpu_options.per_process_gpu_memory_fraction = 0.6

			sess = tf.Session(config = config)

			sess.run(tf.initialize_all_variables())

			total_batch = int(np.ceil(len(self.train_data) / float(self.args.batch)))

			for epoch in xrange(self.args.epochs):

				total_loss = 0.0
				total_rr = 0.0
				total_count = 0
				pbar = pb.ProgressBar(widgets=["[TRAIN] ", pb.DynamicMessage('loss'), " ", pb.DynamicMessage('mrr'), " ", pb.FileTransferSpeed(unit="batchs"), pb.Percentage(), pb.Bar(), pb.Timer(), " ", pb.ETA()], maxval=total_batch).start()

				for i in xrange(total_batch):
					batch = self.next_batch(self.args.batch)
					_, loss, rr = sess.run([var['opt'], var['cost'], var['rr_sum']], feed_dict={var['y']:batch[:, :, 0], var['x']:batch[:, :, 1:]})
					total_loss += loss
					total_rr += rr
					total_count += len(batch)
					pbar.update(i, loss=total_loss/total_count, mrr=total_rr/total_count)
				pbar.finish()

				# v_loss, v_rr = sess.run([var['cost'], var['rr_sum']], feed_dict={var['y']:self.val_data[:, :, 0], var['x']:self.val_data[:, :, 1:]})
				v_loss, v_rr, v1_rr, v2_rr = self.eval(sess, var)

				print >> sys.stderr, \
					"Epoch {}:\n  tr_cross_entropy: {}, tr_mrr: {}\n  v_cross_entropy: {}, v_mrr: {}\n  v1_rr: {}, v2_rr: {}".format(\
						epoch, total_loss / len(self.train_data), total_rr / len(self.train_data), \
						v_loss / len(self.val_data), v_rr / len(self.val_data), \
						v1_rr / (len(self.val_data)/2), v2_rr / (len(self.val_data)/2) )

			print >> sys.stderr, "save model"
			save_path = saver.save(sess, self.args.model)
			print >> sys.stderr, "save model in path", save_path
Exemple #17
0
def get_progressbar():
    return progressbar.ProgressBar(widgets=[
        '[',
        progressbar.Percentage(), '] ', '(>₃>)「 ',
        progressbar.Bar(marker='~', left='', right='|'), ' (',
        progressbar.DynamicMessage('nll'), ')', '{',
        progressbar.ETA(), '}'
    ],
                                   redirect_stdout=True)
def create_progress_bar(dynamic_msg=None):
    widgets = [
        ' [batch ', progressbar.SimpleProgress(), '] ',
        progressbar.Bar(),
        ' (', progressbar.ETA(), ') '
    ]
    if dynamic_msg is not None:
        widgets.append(progressbar.DynamicMessage(dynamic_msg))
    return progressbar.ProgressBar(widgets=widgets)
Exemple #19
0
 def __init__(self,
              epoch=20,
              batch_size=128,
              learning_rate=0.003,
              device=torch.device("cuda"),
              pos_weight=100.0,
              model=None):
     self.epoch = epoch
     self.batch_size = batch_size
     self.learning_rate = learning_rate
     self.device = device
     self.loss = nn.BCEWithLogitsLoss(
         pos_weight=torch.Tensor([pos_weight]).to(device))
     self.model = model.to(device)
     self.widget = [
         pb.DynamicMessage("epoch"), ' ',
         pb.Percentage(), ' ',
         pb.ETA(), ' ',
         pb.Timer(), ' ',
         pb.DynamicMessage("lossT"), ' ',
         pb.DynamicMessage("lossV")
     ]
    def create_progress_bar(self, dynamic_msg=None):
        widgets = [
            ' [batch ', progressbar.SimpleProgress(), '] ',
            progressbar.Bar(),
            ' (', progressbar.ETA(), ') '
        ]
        if dynamic_msg is not None:
            widgets.append(progressbar.DynamicMessage(dynamic_msg))

        if self.config_global.get('show_progress', True):
            return progressbar.ProgressBar(widgets=widgets)
        else:
            return progressbar.NullBar()
def test_dynamic_message_widget():
    widgets = [
        ' [',
        progressbar.Timer(), '] ',
        progressbar.Bar(), ' (',
        progressbar.ETA(), ') ',
        progressbar.DynamicMessage('loss')
    ]

    p = progressbar.ProgressBar(widgets=widgets, max_value=1000)
    p.start()
    for i in range(0, 200, 5):
        p.update(i + 1, loss=.5)
    p.finish()
Exemple #22
0
def make_progressbar(max_value, show_output):
    widgets = [
        progressbar.Timer(format='Time: %(elapsed)s'),
        ' |',
        progressbar.Percentage(),
        progressbar.Bar(),
        ' ',
        progressbar.ETA(),
    ]

    if show_output:
        widgets.extend([' | ', progressbar.DynamicMessage('loss')])

    return progressbar.ProgressBar(max_value=max_value, widgets=widgets)
Exemple #23
0
 def __init__(self, description, total):
     widgets = [
         progressbar.Percentage(),
         progressbar.Bar(),
         progressbar.DynamicMessage('loss'),
     ]
     import tqdm
     self.losses = []
     self.description = description
     import sys
     #self.tqdm = tqdm.tqdm(total=total)
     self.bar = progressbar.ProgressBar(max_value=total)
     self.losses = []
     self.current = 1
Exemple #24
0
    def eval(self, sess, var):
        batch_size = 200

        total_batch = int(np.ceil(len(self.val_data) / float(batch_size)))

        total_loss = 0.0
        total_acc_sum = 0.0
        total_count = 0

        pbar = pb.ProgressBar(widgets=[
            "[VALID] ",
            pb.DynamicMessage('loss'), " ",
            pb.DynamicMessage('acc'), " ",
            pb.FileTransferSpeed(unit="batchs"),
            pb.Percentage(),
            pb.Bar(),
            pb.Timer(), " ",
            pb.ETA()
        ],
                              maxval=total_batch).start()
        for i in xrange(total_batch):
            batchx, batchy = self.next_batch(batch_size, dtype="valid")
            loss, acc_sum = sess.run([var['cost'], var['acc_sum']],
                                     feed_dict={
                                         var['x']: batchx,
                                         var['y']: batchy,
                                         var['keep_prob']: 1.0
                                     })
            total_loss += loss
            total_acc_sum += acc_sum
            total_count += len(batchx)
            pbar.update(i,
                        loss=total_loss / total_count,
                        acc=total_acc_sum / total_count)
        pbar.finish()

        return total_loss, total_acc_sum
Exemple #25
0
    def train(self, batch_iterator, epochs=100, learning_rate=1e-3):
        """
        """
        optimizer = optim.SGD(self.parameters(), lr=learning_rate)
        total_batches_cnt = batch_iterator.batches_cnt
        pbar = progressbar.ProgressBar(
            maxval=total_batches_cnt,
            widgets=[
                progressbar.DynamicMessage('Epoch'),  # Static text
                ', ',
                progressbar.DynamicMessage('progress'),
                '%, ',
                progressbar.AdaptiveETA(),
                ', ',
                progressbar.DynamicMessage('loss'),
                ', ',
            ])
        for epoch in range(epochs):
            pbar.start()
            training_stat = {'Epoch': epoch + 1, 'progress': 0}
            for processed, X_batch in batch_iterator:
                self.zero_grad()
                X_batch_var = make_var(X_batch)
                y_predicted = self(X_batch_var)
                y_true = self.target(y_predicted, X_batch)

                loss = self.loss_function(y_predicted, y_true)
                loss.backward()
                optimizer.step()

                progress = 100.0 * processed / total_batches_cnt
                training_stat.update({
                    'progress': progress,
                    'loss': loss.data[0]
                })
                pbar.update(processed, **training_stat)
            pbar.finish()
Exemple #26
0
    def show_progressbar(self):
        if is_ipython():
            from ipywidgets import FloatProgress, HTML
            from IPython.display import display

            self._pbar_widget = FloatProgress(min = 0, max = self.final_time)
            self._pbar_widget_text = HTML(value = 'Simulation start is pending...')

            display(self._pbar_widget_text, self._pbar_widget)
        else:
            import progressbar

            self._pbar_tui = progressbar.ProgressBar(
                widgets = [
                    ' ', progressbar.Percentage(),
                    ' ', progressbar.SimpleProgress(format='%(value).2f of %(max_value).2f secs'),
                    ' ', progressbar.Bar('=', fill='.'),
                    ' ', progressbar.DynamicMessage('elapsed'),
                    ' ', progressbar.ETA(format='eta: %(eta_seconds).2f', ),
                    ' ', progressbar.DynamicMessage('rtfactor')
                ],
                max_value = self.final_time,
                redirect_stdout = True
            )
Exemple #27
0
def make_progressbar(max_value: int):
    return progressbar.ProgressBar(max_value=max_value,
                                   widgets=[
                                       progressbar.DynamicMessage('pplx'),
                                       ', ',
                                       progressbar.FileTransferSpeed(
                                           unit='it', prefixes=['']),
                                       ', ',
                                       progressbar.SimpleProgress(),
                                       ',',
                                       progressbar.Percentage(),
                                       ' ',
                                       progressbar.Bar(),
                                       ' ',
                                       progressbar.AdaptiveETA(),
                                   ]).start()
    def train(self, model, train_loader, epoch):
        # appearance of the progress bar
        widgets = [
            '  ',
            pbar.Percentage(),
            ' ',
            pbar.Bar(marker='█', left='|', right='|'),
            ' ',
            pbar.ETA(),
            " | ",
            pbar.DynamicMessage('loss'),
            '   '
        ]
        bar = pbar.ProgressBar(widgets=widgets, initial_value=1, max_value=len(train_loader))

        # start training
        for epoch_id in range(epoch):
            print('[Epoch %d/%d]' % (epoch_id+1, epoch))
            train_losses = []
            train_acc = []
            bar.start()
            for i, (inputs, targets) in enumerate(train_loader, 1):
                inputs, targets = Variable(inputs).cuda(), Variable(targets).cuda() 
                self.optimizer.zero_grad()
                outputs = model(inputs)
                loss = 0
                for s in range(inputs.size()[0]):
                    #loss += criterion(outputs[s].view(23, -1).transpose(1, 0), targets[s].view(-1))
                    loss += self.loss_func(outputs[s].view(23, -1).transpose(1, 0), targets[s].view(-1))
                loss.backward()
                self.optimizer.step()
                _, preds = torch.max(outputs, 1)
                targets_mask = targets >= 0
                train_acc.append(np.mean((preds == targets)[targets_mask].data.cpu().numpy()))
                train_losses.append(loss.data[0] / inputs.size()[0])
                bar.update(i, loss=train_losses[-1])
            
            bar.finish()
            print('[Epoch %d/%d] TRAIN acc/loss: %.3f/%.3f\n' % (epoch_id + 1,
                                                               epoch,
                                                               train_acc[-1],
                                                               train_losses[-1]))

        return model
Exemple #29
0
def create_progress_bar(dynamic_msg=None):
    # Taken from Andreas Rueckle.
    # usage:
    #   bar = _create_progress_bar('loss')
    #   L = []
    #   for i in bar(iterable):
    #       ...
    #       L.append(...)
    #
    #   bar.dynamic_messages['loss'] = np.mean(L)
    widgets = [
        ' [batch ',
        progressbar.SimpleProgress(), '] ',
        progressbar.Bar(), ' (',
        progressbar.ETA(), ') '
    ]
    if dynamic_msg is not None:
        widgets.append(progressbar.DynamicMessage(dynamic_msg))
    return progressbar.ProgressBar(widgets=widgets)
Exemple #30
0
    def loss_widgets(msg=''):
        """Generate a default widgets for progress bar with loss information

        Args:
            msg: a string to show the message at the beginning of the bar

        Return:
            widgets: list of items for the progress bar widgets
        """
        widgets = [
            msg,
            progressbar.Percentage(),
            ' ',
            progressbar.Bar(marker='#', left='[', right=']'),
            ' ',
            progressbar.DynamicMessage('loss', width=10, precision=7),
            ' ',
            progressbar.ETA(),
        ]
        return widgets